pypowervm-1.1.24/0000775000175000017500000000000013571367172013251 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/0000775000175000017500000000000013571367172015321 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/adapter.py0000664000175000017500000017632213571367171017325 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Low-level communication with the PowerVM REST API.""" import abc import copy import errno import hashlib import os import uuid if os.name == 'posix': import pwd import re import threading import time import xml.sax.saxutils as sax_utils from lxml import etree try: import urlparse except ImportError: import urllib.parse as urlparse from oslo_log import log as logging import requests import requests.exceptions as rqex import six import six.moves.urllib.parse as urllib import weakref from pypowervm import const as c import pypowervm.entities as ent import pypowervm.exceptions as pvmex from pypowervm.i18n import _ from pypowervm import traits as pvm_traits from pypowervm import util from pypowervm.utils import retry # Preserve CDATA on the way in (also ensures it is not altered on the way out) etree.set_default_parser(etree.XMLParser(strip_cdata=False, encoding='utf-8')) # Setup logging LOG = logging.getLogger(__name__) # Register the namespaces we'll use etree.register_namespace('atom', c.ATOM_NS) etree.register_namespace('xsi', c.XSI_NS) etree.register_namespace('web', c.WEB_NS) etree.register_namespace('uom', c.UOM_NS) class Session(object): """Responsible for PowerVM API session management.""" def __init__(self, host='localhost', username=None, password=None, auditmemento=None, protocol=None, port=None, timeout=1200, certpath='/etc/ssl/certs/', certext='.crt', conn_tries=1): """Persistent authenticated session with the REST API server. Two authentication modes are supported: password- and file-based. :param host: IP or resolvable hostname for the REST API server. :param username: User ID for authentication. Optional for file-based authentication. :param password: Authentication password. If specified, password-based authentication is used. If omitted, file-based authentication is used. :param auditmemento: Tag for log entry identification on the REST API server. If omitted, one will be generated. :param protocol: TCP protocol for communication with the REST API server. Must be either 'http' or 'https'. If unspecified, will default to 'http' for file-based local authentication and 'https' otherwise. :param port: TCP port on which the REST API server communicates. If unspecified, will default based on the protocol parameter: protocol='http' => port=12080; protocol='https' => port=12443. :param timeout: See timeout param on requests.Session.request. Default is 20 minutes. :param certpath: Directory in which the certificate file can be found. Certificate file path is constructed as {certpath}{host}{certext}. For example, given host='localhost', certpath='/etc/ssl/certs/', certext='.crt', the certificate file path will be '/etc/ssl/certs/localhost.crt'. This is ignored if protocol is http. :param certext: Certificate file extension. :param conn_tries: Number of times to try connecting to the REST server if a ConnectionError is received. The default, one, means we only try once. We sleep for two seconds (subject to change in future versions) between retries. :return: A logged-on session suitable for passing to the Adapter constructor. """ # Key off lack of password to indicate file-based authentication. In # this case, 'host' is often 'localhost' (or something that resolves to # it), but it's also possible consumers will be using e.g. SSH keys # allowing them to grab the file from a remote host. self.use_file_auth = password is None self.password = password if self.use_file_auth and not username: # Generate a unique username, used by the file auth mechanism username = 'pypowervm_%s' % uuid.uuid4() self.username = username if protocol is None: protocol = 'http' if self.use_file_auth else 'https' if protocol not in c.PORT_DEFAULT_BY_PROTO.keys(): raise ValueError(_('Invalid protocol "%s"') % protocol) self.protocol = protocol self.host = host if host != 'localhost' and protocol == 'http': LOG.warning(_('Unencrypted communication with PowerVM! Revert ' 'configuration to https.')) if port is None: port = c.PORT_DEFAULT_BY_PROTO[self.protocol] self.port = port if not auditmemento: # Assume 'default' unless we can calculate the proper default auditmemento = 'default' if os.name == 'posix': try: auditmemento = pwd.getpwuid(os.getuid())[0] except Exception: LOG.warning(_("Calculating default audit memento failed, " "using 'default'.")) self.auditmemento = auditmemento # Support IPv6 addresses if self.host[0] != '[' and ':' in self.host: self.dest = '%s://[%s]:%i' % (self.protocol, self.host, self.port) else: self.dest = '%s://%s:%i' % (self.protocol, self.host, self.port) self.timeout = timeout self.certpath = certpath self.certext = certext self._lock = threading.RLock() self._logged_in = False self._relogin_unsafe = False self._eventlistener = None # Will be set by _logon() self._sessToken = None self.mc_type = None self.schema_version = None self.traits = None # Record which object initialized the session. This is to protect # against clones created by deepcopy or other methods. self._init_by = id(self) # External session config cfg_module_path = os.environ.get('PYPOWERVM_SESSION_CONFIG', None) if cfg_module_path: import imp imp.load_source('sesscfg', cfg_module_path).session_config(self) self._logon(conn_tries=conn_tries) # HMC should never use file auth. This should never happen - if it # does, it indicates that we got a bad Logon response, or processed it # incorrectly. if self.use_file_auth and self.mc_type == 'HMC': raise pvmex.Error(_("Local authentication not supported on HMC.")) # Set the API traits after logon. self.traits = pvm_traits.APITraits(self) def __del__(self): # Refuse to clean up clones. if self._init_by != id(self): return try: # deleting the session will shutdown the event listener if self.has_event_listener: self._eventlistener.shutdown() finally: self._logoff() def get_event_listener(self): if not self.has_event_listener: LOG.info(_("Setting up event listener for %s"), self.host) self._eventlistener = _EventListener(self) return self._eventlistener @property def has_event_listener(self): return self._eventlistener is not None @staticmethod def _chunkreader(filehandle, chunksize): if hasattr(filehandle, 'read'): while True: d = filehandle.read(chunksize) if not d: break yield d else: for d in filehandle: yield d def request(self, method, path, headers=None, body='', sensitive=False, verify=False, timeout=-1, auditmemento=None, relogin=True, login=False, filehandle=None, chunksize=65536): """Send an HTTP/HTTPS request to a PowerVM interface. :param filehandle: For downloads (with method == 'GET'), a writable file-like (anything with a write() method) to which the download content should be written. For uploads (with method == 'PUT' or 'POST'), this may be a readable file-like (anything with a read() method) or an iterable from which the upload content should be retrieved. When None (the default), response text goes to the body of the returned Response. :param chunksize: For downloads, the content is written to filehandle in increments of (at most) chunksize bytes. For uploads when filehandle is a file-like, the content is sent through the request in increments of (at most) chunksize bytes. For uploads when filehandle is an iterable, this arg is ignored - content chunks are sent through the request in whatever size the iterable yields them. For other request types, this arg is ignored. """ # Don't use mutable default args if headers is None: headers = {} session = requests.Session() session.verify = verify url = self.dest + path # If timeout isn't specified, use session default if timeout == -1: timeout = self.timeout if auditmemento: headers['X-Audit-Memento'] = auditmemento else: headers['X-Audit-Memento'] = self.auditmemento isupload = False isdownload = False if filehandle: if method in ['PUT', 'POST']: isupload = True elif method in ['GET']: isdownload = True else: raise ValueError(_('Unexpected filehandle on %s request') % method) if isupload: LOG.trace('sending %s %s headers=%s body=', method, url, headers if not sensitive else "") else: LOG.trace('sending %s %s headers=%s body=%s', method, url, headers if not sensitive else "", body if not sensitive else "") # Add X-API-Session header after above so it's not printed in log sess_token_try = None if not login: with self._lock: assert self._sessToken, "missing session token" headers['X-API-Session'] = self._sessToken sess_token_try = self._sessToken try: if isupload: response = session.request( method, url, data=self._chunkreader(filehandle, chunksize), headers=headers, timeout=timeout) elif isdownload: response = session.request(method, url, stream=True, headers=headers, timeout=timeout) else: response = session.request(method, url, data=body, headers=headers, timeout=timeout) except rqex.SSLError as e: # TODO(IBM) Get better responses here...this isn't good. msg = '%s for %s %s: %s' % (e.__class__.__name__, method, url, e) LOG.warning(msg) raise pvmex.SSLError(msg) except rqex.ConnectionError as e: msg = '%s for %s %s: %s' % (e.__class__.__name__, method, url, e) LOG.warning(msg) raise pvmex.ConnectionError(msg) except rqex.Timeout as e: msg = '%s for %s %s: %s' % (e.__class__.__name__, method, url, e) LOG.warning(msg) raise pvmex.TimeoutError(msg) except Exception as e: LOG.exception(_('Unexpected error for %(meth)s %(url)s'), {'meth': method, 'url': url}) raise pvmex.Error(_('Unexpected error: %(class)s for %(method)s ' '%(url)s: %(excp)s') % {'class': e.__class__.__name__, 'method': method, 'url': url, 'excp': str(e)}) finally: session.close() # remove X-API-Session header so it won't get printed if not login: try: del headers['X-API-Session'] except KeyError: # something modifying the submitted request headers?? # TODO(IBM): why does this happen and what else may result? pass LOG.trace('result: %s (%s) for %s %s', response.status_code, response.reason, method, url) LOG.trace('response headers: %s', response.headers if not sensitive else "") if response.status_code in [c.HTTPStatus.OK_NO_CONTENT, c.HTTPStatus.NO_CHANGE]: return Response(method, path, response.status_code, response.reason, response.headers, reqheaders=headers, reqbody=body) else: LOG.trace('response body:\n%s', response.text if not sensitive else "") # re-login processing if response.status_code == c.HTTPStatus.UNAUTHORIZED: LOG.debug('Processing HTTP Unauthorized') with self._lock: if not relogin: LOG.debug('Requester specified no re-login') elif self._relogin_unsafe: LOG.warning(_('Re-login has been deemed unsafe. This ' 'Session instance should no longer be ' 'used.')) else: if self._sessToken != sess_token_try: LOG.debug('Re-login done elsewhere for %s', self.host) else: self._logged_in = False LOG.info(_('Attempting re-login %s'), self.host) try: self._logon() except pvmex.Error as e: if e.response: if (e.response.status == c.HTTPStatus.UNAUTHORIZED): # can't continue re-login attempts lest we # lock the account self._relogin_unsafe = True LOG.warning( _('Re-login 401, response body:\n%s'), e.response.body) else: # safe to try re-login again in this case LOG.warning( _('Re-login failed, resp body:\n%s'), e.response.body) else: # safe to try re-login again in this case LOG.warning(_('Re-login failed:\n%s'), e) e.orig_response = Response( method, path, response.status_code, response.reason, response.headers, reqheaders=headers, reqbody=body, body=response.text) raise # Retry the original request try: return self.request(method, path, headers, body, sensitive=sensitive, verify=verify, timeout=timeout, relogin=False) except pvmex.HttpUnauth as e: # This is a special case... normally on a 401 we # would retry login, but we won't here because # we just did that... Handle it specially. LOG.warning( _('Re-attempt failed with another 401, response ' 'body:\n%s'), e.response.body) raise pvmex.Error( _('suspicious HTTP 401 response for %(method)s ' '%(path)s: token is brand new') % {'method': method, 'path': path}) resp = None if not isdownload: resp = Response(method, path, response.status_code, response.reason, response.headers, reqheaders=headers, reqbody=body, body=response.text) if 200 <= response.status_code < 300: if isdownload: for chunk in response.iter_content(chunksize): filehandle.write(chunk) resp = Response(method, path, response.status_code, response.reason, response.headers, reqheaders=headers, reqbody=body) return resp else: if isdownload: errtext = '' for chunk in response.iter_content(chunksize): errtext += chunk resp = Response(method, path, response.status_code, response.reason, response.headers, reqheaders=headers, reqbody=body, body=errtext) raise self._get_httperror(resp) @staticmethod def _get_httperror(resp): """Return (don't raise) an HttpError subclass appropriate to resp.""" status = resp.status if status == c.HTTPStatus.NOT_FOUND: return pvmex.HttpNotFound(resp) if status == c.HTTPStatus.UNAUTHORIZED: return pvmex.HttpUnauth(resp) # Default general HttpError return pvmex.HttpError(resp) def _logon(self, conn_tries=1): """Create an authentication token on the REST server for this Session. :param conn_tries: Number of times to try connecting to the REST server if a ConnectionError is received. The default, one, means we only try once. We sleep for two seconds (subject to change in future versions) between retries. """ def delay_func(try_num, max_tries, *args, **kwargs): delay = 2 LOG.warning(_("Failed to connect to REST server - is the pvm-rest " "service started? Retrying %(try_num)d of " "%(max_tries)d after %(delay)d seconds."), dict(try_num=try_num, max_tries=max_tries - 1, delay=delay, args=args, kwargs=kwargs)) time.sleep(delay) LOG.info(_("Session logging on %s"), self.host) headers = { 'Accept': c.TYPE_TEMPLATE % ('web', 'LogonResponse'), 'Content-Type': c.TYPE_TEMPLATE % ('web', 'LogonRequest') } if self.use_file_auth: body = c.LOGONREQUEST_TEMPLATE_FILE % {'userid': self.username} else: passwd = sax_utils.escape(self.password) body = c.LOGONREQUEST_TEMPLATE_PASS % {'userid': self.username, 'passwd': passwd} # Convert it to a string-type from unicode-type encoded with UTF-8 # Without the socket code will implicitly convert the type with ASCII body = body.encode('utf-8') if self.protocol == 'http' or not self.certpath: # certificate validation is disabled verify = False elif util.validate_certificate(self.host, self.port, self.certpath, self.certext): # Attempt to validate based on certificates stored in self.certpath verify = False else: # Have the requests module validate the certificate verify = True try: # relogin=False to prevent multiple attempts with same credentials resp = retry.retry( tries=conn_tries, delay_func=delay_func, http_codes=[404], retry_except=pvmex.ConnectionError)(self.request)( 'PUT', c.LOGON_PATH, headers=headers, body=body, sensitive=True, verify=verify, relogin=False, login=True) except pvmex.Error as e: if e.response: # strip out sensitive data e.response.reqbody = "" raise # parse out X-API-Session value root = etree.fromstring(resp.body.encode('utf-8')) with self._lock: tok = (self._get_auth_tok_from_file(root, resp) if self.use_file_auth else self._get_auth_tok(root, resp)) self._sessToken = tok self._logged_in = True self.mc_type = resp.headers.get('X-MC-Type', 'HMC') self.schema_version = root.get('schemaVersion') self.traits = pvm_traits.APITraits(self) @staticmethod def _get_auth_tok(root, resp): """Extract session token from password-based Logon response. :param root: etree.fromstring-parsed root of the LogonResponse. :param resp: The entire response payload from the LogonRequest. :return: X-API-Session token for use with subsequent requests. """ tok = root.findtext('{%s}X-API-Session' % c.WEB_NS) if not tok: resp.reqbody = "" msg = _("Failed to parse a session token from the PowerVM " "response.") LOG.error(msg + (_(' Body= %s'), resp.body)) raise pvmex.Error(msg, response=resp) return tok @staticmethod def _get_auth_tok_from_file(root, resp): """Extract session token from file-based Logon response. :param root: etree.fromstring-parsed root of the LogonResponse. :param resp: The entire response payload from the LogonRequest. :return: X-API-Session token for use with subsequent requests. """ tokfile_path = root.findtext('{%s}X-API-SessionFile' % c.WEB_NS) if not tokfile_path: msg = _("Failed to parse a session file path from the PowerVM " "response.") LOG.error(msg + (_(' Body= %s'), resp.body)) raise pvmex.Error(msg, response=resp) try: with open(tokfile_path, 'r') as tokfile: tok = tokfile.read().strip(' \n') except IOError as ioe: if ioe.errno == errno.EACCES: raise pvmex.AuthFileReadError(access_file=str(tokfile_path)) else: raise pvmex.AuthFileAccessError(access_file=str(tokfile_path), error=os.strerror(ioe.errno)) if not tok: msg = _("Token file %s didn't contain a readable session " "token.") % tokfile_path LOG.error(msg) raise pvmex.Error(msg, response=resp) return tok def _logoff(self): with self._lock: if not self._logged_in: return LOG.info(_("Session logging off %s"), self.host) try: # relogin=False to prevent multiple attempts self.request('DELETE', c.LOGON_PATH, relogin=False) except Exception: LOG.exception(_('Problem logging off. Ignoring.')) self._logged_in = False # this should only ever be called when Session has gone out of # scope, but just in case someone calls it directly while requests # are in flight, set _relogin_unsafe so that those requests won't # enter relogin processing when they get an HTTP 401. self._relogin_unsafe = True class Adapter(object): """REST API Adapter for PowerVM remote management.""" def __init__(self, session=None, use_cache=False, helpers=None): """Create a new Adapter instance, connected to a Session. :param session: (Optional) A Session instance. If not specified, a new, local, file-authentication-based Session will be created and used. :param use_cache: Do not use. Caching not supported. :param helpers: A list of decorator methods in which to wrap the HTTP request call. See the pypowervm.helpers package for examples. """ if use_cache: raise pvmex.CacheNotSupportedException() self.session = session if session else Session() self._helpers = self._standardize_helper_list(helpers) self._sys_uuid = None @staticmethod def _standardize_helper_list(helpers): if isinstance(helpers, list) or helpers is None: return helpers else: return [helpers] @property def helpers(self): """Returns a copy of the list of helpers for the adapter.""" return list(self._helpers) if self._helpers else [] @property def sys_uuid(self): if self._sys_uuid is None: # Can't use the wrapper here without a local import resp = self.read('ManagedSystem') self._sys_uuid = resp.feed.entries[0].uuid return self._sys_uuid @property def traits(self): return self.session.traits def _request(self, method, path, helpers=None, **kwds): """Common request method. All Adapter requests will be funnelled through here. This makes a convenient place to attach the Adapter helpers. """ helpers = self._standardize_helper_list(helpers) if helpers is None: helpers = self._helpers # Build the stack of helper functions to call. # The base will always be the session.request method func = self.session.request # Stack the helpers by reversing the order list if helpers is not None: for helper in helpers[::-1]: func = helper(func) # Now just call the function resp = func(method, path, **kwds) # Assuming the response is a Response, attach this adapter to it. if isinstance(resp, Response): resp.adapter = self return resp def create(self, element, root_type, root_id=None, child_type=None, child_id=None, suffix_type=None, suffix_parm=None, detail=None, service='uom', content_service=None, timeout=-1, auditmemento=None, sensitive=False, helpers=None): """Create a new resource. Will build the URI path using the provided arguments. """ self._validate('create', root_type, root_id, child_type, child_id, suffix_type, suffix_parm, detail) path = self.build_path(service, root_type, root_id, child_type, child_id, suffix_type, suffix_parm, detail, xag=[]) return self.create_by_path( element, path, content_service=content_service, timeout=timeout, auditmemento=auditmemento, sensitive=sensitive, helpers=helpers) def create_job(self, job, root_type, root_id=None, child_type=None, child_id=None, timeout=-1, auditmemento=None, sensitive=False, helpers=None): if not job.tag == 'JobRequest': raise ValueError(_('job must be a JobRequest element')) op = job.findtext('RequestedOperation/OperationName') if not op: raise ValueError(_('JobRequest is missing OperationName')) return self.create(job, root_type, root_id, child_type, child_id, suffix_type='do', suffix_parm=op, content_service='web', timeout=timeout, auditmemento=auditmemento, sensitive=sensitive, helpers=helpers) def create_by_path(self, element, path, content_service=None, timeout=-1, auditmemento=None, sensitive=False, helpers=None): """Create a new resource where the URI path is already known.""" path = util.dice_href(path) m = re.search(r'%s(\w+)/(\w+)' % c.API_BASE_PATH, path) if not m: raise ValueError(_('path=%s is not a PowerVM API reference') % path) if not content_service: content_service = m.group(1) headers = {'Accept': 'application/atom+xml; charset=UTF-8'} if re.search('/do/', path): headers['Content-Type'] = c.TYPE_TEMPLATE % (content_service, 'JobRequest') else: # strip off details, if present p = urlparse.urlparse(path).path headers['Content-Type'] = c.TYPE_TEMPLATE % ( content_service, p.rsplit('/', 1)[1]) resp = self._request('PUT', path, helpers=helpers, headers=headers, body=element.toxmlstring(), timeout=timeout, auditmemento=auditmemento, sensitive=sensitive) resp._unmarshal_atom() return resp def read(self, root_type, root_id=None, child_type=None, child_id=None, suffix_type=None, suffix_parm=None, detail=None, service='uom', etag=None, timeout=-1, auditmemento=None, age=-1, xag=None, sensitive=False, helpers=None, add_qp=None): """Retrieve an existing resource. Will build the URI path using the provided arguments. :param root_type: String ROOT REST element type. :param root_id: String ROOT REST element UUID. If unspecified, the feed of root_type is fetched. Required if child_type is specified. :param child_type: String CHILD REST element type. :param child_id: String CHILD REST element UUID. If unspecified, the feed of child_type is fetched. :param suffix_type: Suffix type added to the path (with '/'). For special URIs, like Job requests (e.g. 'do' in .../do/Something). :param suffix_parm: Suffix parameter added to the path (with '/'). For special URIs, like Job requests (e.g. 'Something' in .../do/Something). :param detail: Requested detail level of the response. Obsolete. :param service: REST service type, one of pypowervm.const.SERVICE_BY_NS :param etag: Not used (caching disabled). :param timeout: Timeout in seconds for the HTTP request. :param auditmemento: X-Audit-Memento header registered in the REST server logs for debug purposes, allowing this request to be identified therein. :param age: Not used (caching disabled). :param xag: List of extended attribute group enum values. If unspecified or None, 'None' will be appended. If the empty list (xag=[]), no extended attribute query parameter will be added, resulting in the server's default extended attribute group behavior. :param sensitive: If True, headers and payloads will be hidden in log entries. :param helpers: A list of decorator methods in which to wrap the HTTP request call. See the pypowervm.helpers package for examples. :param add_qp: Optional list of (key, value) tuples to add to the query string of the request. :return: Response object representing the result of the query. """ self._validate('read', root_type, root_id, child_type, child_id, suffix_type, suffix_parm, detail) path = self.build_path(service, root_type, root_id, child_type, child_id, suffix_type, suffix_parm, detail, xag=xag, add_qp=add_qp) return self.read_by_path(path, etag, timeout=timeout, auditmemento=auditmemento, age=age, sensitive=sensitive, helpers=helpers) def read_job(self, job_id, etag=None, timeout=-1, auditmemento=None, sensitive=False, helpers=None): return self.read('jobs', job_id, etag=etag, timeout=timeout, auditmemento=auditmemento, sensitive=sensitive, helpers=helpers) def read_jobs(self, root_type=None, root_id=None, child_type=None, child_id=None, detail=None, etag=None, timeout=-1, auditmemento=None, sensitive=False, helpers=None): return self.read(root_type, root_id, child_type, child_id, suffix_type='jobs', detail=detail, etag=etag, timeout=timeout, auditmemento=auditmemento, sensitive=sensitive, helpers=helpers) def read_by_href(self, href, suffix_type=None, suffix_parm=None, detail=None, etag=None, timeout=-1, auditmemento=None, age=-1, sensitive=False, helpers=None, xag=None): """Retrieve an existing resource based on a link's href.""" o = urlparse.urlparse(href) hostname_mismatch = (o.hostname.lower() != self.session.host.lower()) if hostname_mismatch or o.port != self.session.port: LOG.debug('href=%s will be modified to use %s:%s', href, self.session.host, self.session.port) path = self.extend_path(util.dice_href(href), suffix_type, suffix_parm, detail, xag=xag) return self.read_by_path(path, etag=etag, timeout=timeout, auditmemento=auditmemento, age=age, sensitive=sensitive, helpers=helpers) def read_by_path(self, path, etag=None, timeout=-1, auditmemento=None, age=-1, sensitive=False, helpers=None): """Retrieve an existing resource where URI path is already known.""" path = util.dice_href(path) resp = self._read_by_path(path, etag, timeout, auditmemento, sensitive, helpers=helpers) if 'atom' in resp.reqheaders['Accept']: resp._unmarshal_atom() return resp def _read_by_path(self, path, etag, timeout, auditmemento, sensitive, helpers=None): m = re.search(r'%s(\w+)/(\w+)' % c.API_BASE_PATH, path) if not m: raise ValueError(_('path=%s not a PowerVM API reference') % path) headers = {} json_search_str = (c.UUID_REGEX + '/quick$' + '|/quick/' + r'|\.json$') if re.search(json_search_str, util.dice_href(path, include_query=False, include_fragment=False)): # Successful request will return application/json; errors (like 400 # or 404) will return application/atom+xml. headers['Accept'] = '*/*' else: headers['Accept'] = 'application/atom+xml' if etag: headers['If-None-Match'] = etag resp = self._request('GET', path, helpers=helpers, headers=headers, timeout=timeout, auditmemento=auditmemento, sensitive=sensitive) return resp def update(self, data, etag, root_type, root_id=None, child_type=None, child_id=None, suffix_type=None, service='uom', timeout=-1, auditmemento=None, xag=None, sensitive=False, helpers=None): """Update an existing resource. Will build the URI path using the provided arguments. """ self._validate('update', root_type, root_id, child_type, child_id, suffix_type) path = self.build_path(service, root_type, root_id, child_type, child_id, suffix_type, xag=xag) return self.update_by_path(data, etag, path, timeout=timeout, auditmemento=auditmemento, sensitive=sensitive, helpers=helpers) def update_by_path(self, data, etag, path, timeout=-1, auditmemento=None, sensitive=False, helpers=None): """Update an existing resource where the URI path is already known.""" path = util.dice_href(path) m = re.match(r'%s(\w+)/(\w+)' % c.API_BASE_PATH, path) if not m: raise ValueError(_('path=%s is not a PowerVM API reference') % path) headers = {'Accept': 'application/atom+xml; charset=UTF-8'} if m.group(1) == 'pcm': headers['Content-Type'] = 'application/xml' else: t = path.rsplit('/', 2)[1] headers['Content-Type'] = c.TYPE_TEMPLATE % (m.group(1), t) if etag: headers['If-Match'] = etag if hasattr(data, 'toxmlstring'): body = data.toxmlstring() else: body = data resp = self._request( 'POST', path, helpers=helpers, headers=headers, body=body, timeout=timeout, auditmemento=auditmemento, sensitive=sensitive) resp._unmarshal_atom() return resp def delete(self, root_type, root_id=None, child_type=None, child_id=None, suffix_type=None, suffix_parm=None, service='uom', etag=None, timeout=-1, auditmemento=None, helpers=None): """Delete an existing resource. Will build the URI path using the provided arguments. """ self._validate('delete', root_type, root_id, child_type, child_id, suffix_type, suffix_parm) path = self.build_path(service, root_type, root_id, child_type, child_id, suffix_type, suffix_parm) return self.delete_by_path(path, etag, timeout=timeout, auditmemento=auditmemento, helpers=helpers) def delete_by_href(self, href, etag=None, timeout=-1, auditmemento=None, helpers=None): """Delete an existing resource based on a link's href.""" o = urlparse.urlparse(href) hostname_mismatch = (o.hostname.lower() != self.session.host.lower()) if hostname_mismatch or o.port != self.session.port: LOG.debug('href=%s will be modified to use %s:%s', href, self.session.host, self.session.port) return self.delete_by_path(o.path, etag=etag, timeout=timeout, auditmemento=auditmemento, helpers=helpers) def delete_by_path(self, path, etag=None, timeout=-1, auditmemento=None, helpers=None): """Delete an existing resource where the URI path is already known.""" path = util.dice_href(path, include_query=False, include_fragment=False) m = re.search(r'%s(\w+)/(\w+)' % c.API_BASE_PATH, path) if not m: raise ValueError(_('path=%s is not a PowerVM API reference') % path) headers = {} if etag: headers['If-Match'] = etag return self._request('DELETE', path, helpers=helpers, headers=headers, timeout=timeout, auditmemento=auditmemento) def upload_file(self, filedescr, filehandle, chunksize=65536, timeout=-1, auditmemento=None, replacing=False, helpers=None): try: fileid = filedescr.findtext('FileUUID') mediatype = filedescr.findtext('InternetMediaType') except Exception: raise ValueError(_('Invalid file descriptor')) path = c.API_BASE_PATH + 'web/File/contents/' + fileid headers = {'Accept': 'application/vnd.ibm.powervm.web+xml', 'Content-Type': mediatype} return self._request('POST' if replacing else 'PUT', path, helpers=helpers, headers=headers, timeout=timeout, auditmemento=auditmemento, filehandle=filehandle, chunksize=chunksize) def download_file(self, filedescr, filehandle, chunksize=65536, timeout=-1, auditmemento=None, helpers=None): try: fileid = filedescr.findtext('FileUUID') mediatype = filedescr.findtext('InternetMediaType') except Exception: raise ValueError(_('Invalid file descriptor')) path = c.API_BASE_PATH + 'web/File/contents/' + fileid headers = {'Accept': mediatype} return self._request('GET', path, helpers=helpers, headers=headers, timeout=timeout, auditmemento=auditmemento, filehandle=filehandle, chunksize=chunksize) def build_href( self, root_type, root_id=None, child_type=None, child_id=None, suffix_type=None, suffix_parm=None, detail=None, xag=None, service='uom'): p = self.build_path( service, root_type, root_id, child_type, child_id, suffix_type, suffix_parm, detail, xag=xag) return self.session.dest + p @classmethod def build_path(cls, service, root_type, root_id=None, child_type=None, child_id=None, suffix_type=None, suffix_parm=None, detail=None, xag=None, add_qp=None): path = c.API_BASE_PATH + service + '/' + root_type if root_id: path += '/' + root_id if child_type: path += '/' + child_type if child_id: path += '/' + child_id return cls.extend_path(path, suffix_type=suffix_type, suffix_parm=suffix_parm, detail=detail, xag=xag, add_qp=add_qp) @staticmethod def extend_path(basepath, suffix_type=None, suffix_parm=None, detail=None, xag=None, add_qp=None): """Extend a base path with zero or more of suffix, detail, and xag. :param basepath: The path string to be extended. :param suffix_type: Suffix key (string) to be appended. :param suffix_parm: Suffix parameter value to be appended. Ignored if suffix_type is not specified. :param detail: Value for the 'detail' query parameter. :param xag: List of extended attribute group enum values. If unspecified or None, 'None' will be appended. If the empty list (xag=[]), no extended attribute query parameter will be added, resulting in the server's default extended attribute group behavior. :param add_qp: Optional list of (key, value) tuples to add to the query string of the request. :return: String base path (without protocol://server:port part). """ path = basepath if suffix_type: # operations, do, jobs, cancel, quick, search, ${search-string} path = util.extend_basepath(path, '/' + suffix_type) if suffix_parm: path = util.extend_basepath(path, '/' + suffix_parm) if detail: path += ('&' if '?' in path else '?') + 'detail=' + detail # Explicit xag is always honored as-is. If unspecified, we usually # want to include group=None. However, there are certain classes of # URI from which we want to omit ?group entirely. if xag is None: xagless_suffixes = ('quick', 'do') if suffix_type in xagless_suffixes: xag = [] path = util.check_and_apply_xag(path, xag) if add_qp: parsed = urlparse.urlsplit(path) qparms = urlparse.parse_qsl(parsed.query) if parsed.query else [] qparms.extend(add_qp) qstr = urllib.urlencode(qparms) path = urlparse.urlunsplit((parsed.scheme, parsed.netloc, parsed.path, qstr, parsed.fragment)) return path @staticmethod def _validate(req_method, root_type, root_id=None, child_type=None, child_id=None, suffix_type=None, suffix_parm=None, detail=None): # 'detail' param currently unused if child_type and not root_id: raise ValueError(_('Expected root_id')) if child_id and not child_type: raise ValueError(_('Expected child_type')) if req_method == 'create': if suffix_type: if suffix_type != 'do': raise ValueError(_('Unexpected suffix_type=%s') % suffix_type) if not suffix_parm: raise ValueError(_('Expected suffix_parm')) if child_type and not child_id: raise ValueError(_('Expected child_id')) else: if child_id: raise ValueError(_('Unexpected child_id')) if root_id and not child_type: raise ValueError(_('Unexpected root_id')) elif req_method == 'read': # no read-specific validation at this time pass elif req_method == 'update': if 'preferences' in [root_type, child_type]: if child_id: raise ValueError(_('Unexpected child_id')) if root_id and not child_type: raise ValueError(_('Unexpected root_id')) else: if not root_id: raise ValueError(_('Expected root_id')) if child_type and not child_id: raise ValueError(_('Expected child_id')) if suffix_type is not None and suffix_type != 'cancel': raise ValueError(_('Unexpected suffix_type=%s') % suffix_type) elif req_method == 'delete': if suffix_type: if suffix_type != 'jobs': raise ValueError(_('Unexpected suffix_type=%s') % suffix_type) if not suffix_parm: raise ValueError(_('Expected suffix_parm')) else: if not root_id: raise ValueError(_('Expected root_id')) if child_type and not child_id: raise ValueError(_('Expected child_id')) else: raise ValueError(_('Unexpected req_method=%s') % req_method) class Response(object): """Response to PowerVM API Adapter method invocation.""" def __init__(self, reqmethod, reqpath, status, reason, headers, reqheaders=None, reqbody='', body='', orig_reqpath=''): """Represents an HTTP request/response from Adapter.request(). :param reqmethod: The HTTP method of the request (e.g. 'GET', 'PUT') :param reqpath: The path (not URI) of the request. Construct the URI by prepending Response.adapter.session.dest. :param status: Integer HTTP status code (e.g. 200) :param reason: String HTTP Reason code (e.g. 'No Content') :param headers: Dict of headers from the HTTP response. :param reqheaders: Dict of headers from the HTTP request. :param reqbody: String payload of the HTTP request. :param body: String payload of the HTTP response. :param orig_reqpath: Not used. """ self.reqmethod = reqmethod self.reqpath = reqpath self.reqheaders = reqheaders if reqheaders else {} self.reqbody = reqbody self.status = status self.reason = reason self.headers = headers self.body = body self.feed = None self.entry = None # Set by _request() self.adapter = None def __deepcopy__(self, memo=None): """Produce a deep (except for adapter) copy of this Response.""" ret = self.__class__( self.reqmethod, self.reqpath, self.status, self.reason, copy.deepcopy(self.headers, memo=memo), reqheaders=copy.deepcopy(self.reqheaders, memo=memo), reqbody=self.reqbody, body=self.body) if self.feed is not None: ret.feed = copy.deepcopy(self.feed, memo=memo) if self.entry is not None: ret.entry = copy.deepcopy(self.entry, memo=memo) # Adapter is the one thing not deep-copied ret.adapter = self.adapter return ret @property def etag(self): return self.headers.get('etag', None) @property def atom(self): return self.feed if self.feed else self.entry def _extract_atom(self): """Unmarshal my body and set my feed or entry accordingly. :return: A message indicating the reason for the error, or None if no error occurred. """ err_reason = None root = None try: root = etree.fromstring(self.body) except Exception as e: err_reason = (_('Error parsing XML response from PowerVM: ' '%s') % str(e)) if root is not None and root.tag == str( etree.QName(c.ATOM_NS, 'feed')): self.feed = ent.Feed.unmarshal_atom_feed(root, self) elif root is not None and root.tag == str( etree.QName(c.ATOM_NS, 'entry')): self.entry = ent.Entry.unmarshal_atom_entry(root, self) elif root is not None and '/Debug/' in self.reqpath: # Special case for Debug URIs - caller is expected to make use # of self.body only, and understand how it's formatted. pass elif err_reason is None: err_reason = _('Response is not an Atom feed/entry') return err_reason def _unmarshal_atom(self): err_reason = None if self.body: err_reason = self._extract_atom() elif self.reqmethod == 'GET': if self.status == c.HTTPStatus.OK_NO_CONTENT: if util.is_instance_path(self.reqpath): err_reason = _('Unexpected HTTP 204 for request') else: # PowerVM returns HTTP 204 (No Content) when you # ask for a feed that has no entries. self.feed = ent.Feed({}, []) elif self.status == c.HTTPStatus.NO_CHANGE: pass else: err_reason = _('Unexpectedly empty response body') if err_reason is not None: LOG.error(_('%(err_reason)s:\n' 'request headers: %(reqheaders)s\n\n' 'request body: %(reqbody)s\n\n' 'response headers: %(respheaders)s\n\n' 'response body: %(respbody)s'), {'err_reason': err_reason, 'reqheaders': self.reqheaders, 'reqbody': self.reqbody, 'respheaders': self.headers, 'respbody': self.body}) raise pvmex.AtomError(_('Atom error for %(method)s %(path)s: ' '%(reason)s') % {'method': self.reqmethod, 'path': self.reqpath, 'reason': err_reason}, self) @six.add_metaclass(abc.ABCMeta) class EventListener(object): @abc.abstractmethod def subscribe(self, handler): """Subscribe an EvenHandler to receive events. :param handler: EventHandler """ @abc.abstractmethod def unsubscribe(self, handler): """Unubscribe an EvenHandler from receiving events. :param handler: EventHandler """ @abc.abstractmethod def shutdown(self): """Shutdown this EventListener.""" class _EventListener(EventListener): def __init__(self, session, timeout=-1): """The event listener associated with a Session. This class should not be instantiated directly. Instead construct a Session and use get_event_listener() to create it. :param session: The Session this listener is to use. :param timeout: How long to wait for any events to be returned. -1 = wait indefinitely. """ if session is None: raise ValueError(_('Session must not be None')) if session.has_event_listener: raise ValueError(_('An event listener is already active on the ' 'session.')) self.appid = hashlib.md5(session._sessToken).hexdigest() self.timeout = timeout if timeout != -1 else session.timeout self._lock = threading.RLock() self.handlers = [] self._pthread = None self.host = session.host self.adp = None self._prime(session) def _prime(self, session): try: # Establish a weak reference proxy to the session. This is needed # because we don't want a circular reference to the session. self.adp = Adapter(weakref.proxy(session)) # initialize events, raw_events, evtwraps = self._get_events() except pvmex.Error as e: raise pvmex.Error(_('Failed to initialize event feed listener: ' '%s') % e) if not events.get('general') == 'init': # Something else is sharing this feed! raise ValueError(_('Application id "%s" not unique') % self.appid) # No errors initializing, so dispatch what we recieved. self._dispatch_events(events, raw_events, evtwraps) def subscribe(self, handler): if not isinstance(handler, _EventHandler): raise ValueError('Handler must be an EventHandler') if self.adp is None: raise Exception(_('Shutting down')) with self._lock: if handler in self.handlers: raise ValueError(_('This handler is already subscribed')) self.handlers.append(handler) if not self._pthread: self._pthread = _EventPollThread(self) self._pthread.start() def unsubscribe(self, handler): if not isinstance(handler, _EventHandler): raise ValueError(_('Handler must be an EventHandler')) with self._lock: if handler not in self.handlers: raise ValueError(_('Handler not found in subscriber list')) self.handlers.remove(handler) if not self.handlers: self._pthread.stop() self._pthread = None def shutdown(self): LOG.info(_('Shutting down EventListener for %s'), self.host) with self._lock: for handler in self.handlers: self.unsubscribe(handler) LOG.info(_('EventListener shutdown complete for %s'), self.host) def getevents(self): all_events = self._get_events() # Legacy method returned just the events. return all_events[0] def _get_events(self): """Gets the events and formats them into 'events' and 'raw_events'.""" events = {} raw_events = [] event_wraps = [] resp = None # Read event feed try: resp = self.adp.read('Event?QUEUE_CLIENTKEY_METHOD=' 'USE_APPLICATIONID&QUEUE_APPLICATIONID=%s' % self.appid, timeout=self.timeout) except Exception as e: LOG.warning(_('Error while getting PowerVM events: %s. (Is the ' 'pvm-rest service down?)'), e) # Don't die. The handler will retry. But sleep so we don't thrash time.sleep(5) if resp: # Parse event feed for entry in resp.feed.entries: self._format_events(entry, events, raw_events) # Do this here to avoid circular imports import pypowervm.wrappers.event as event_wrap event_wraps = event_wrap.Event.wrap(resp) return events, raw_events, event_wraps def _format_events(self, entry, events, raw_events): """Formats an event Entry into events and raw events. This method operates on the events and raw_events lists themselves. It does not pass back the results. :param entry: The event entry to format for the list of events. :param events: A dictionary of events to add, remove, or update. :param raw_events: A dictionary of raw events to add to. """ etype = entry.element.findtext('EventType') href = entry.element.findtext('EventData') if etype == 'NEW_CLIENT': events['general'] = 'init' elif etype in ['CACHE_CLEARED', 'MISSING_EVENTS']: # Clears all prior events keys = [k for k in events] for k in keys: del events[k] events['general'] = 'invalidate' elif etype == 'ADD_URI': events[href] = 'add' elif etype == 'DELETE_URI': events[href] = 'delete' elif etype in ['MODIFY_URI', 'INVALID_URI', 'HIDDEN_URI']: if href not in events: events[href] = 'invalidate' elif etype not in ['VISIBLE_URI', 'CUSTOM_CLIENT_EVENT']: LOG.error(_('Unexpected EventType=%s'), etype) # Now format the event for the raw handlers eid = entry.element.findtext('EventID') edetail = entry.element.findtext('EventDetail') raw_events.append({'EventType': etype, 'EventData': href, 'EventID': eid, 'EventDetail': edetail}) def _dispatch_events(self, events, raw_events, wrap_events): """Invoke appropriate EventHandler 'process' callback. :param events: Events dict of the format {: } - see docstring for EventHandler.process. :param raw_events: List of event dicts of the format {'EventType': , 'EventData': , 'EventID': , 'EventDetail': } :param wrap_events: List of pypowervm.wrappers.event.Event wrappers. """ def call_handler(handler): try: if isinstance(handler, WrapperEventHandler): handler.process(wrap_events) elif isinstance(handler, RawEventHandler): handler.process(raw_events) else: handler.process(events) except Exception: LOG.exception(_('Error while processing PowerVM events')) # Notify subscribers with self._lock: for hndlr in self.handlers: call_handler(hndlr) @six.add_metaclass(abc.ABCMeta) class _EventHandler(object): """Common class for all Event handlers. Event handlers are called to process events from the EventListener. """ @abc.abstractmethod def process(self, events): """Process the event that comes back from the API. :param events: Events from the API. """ pass @six.add_metaclass(abc.ABCMeta) class EventHandler(_EventHandler): """Used to handle events from the API. The session can poll for events back from the API. An event will give a small indication of something that has occurred within the system. An example may be a ClientNetworkAdapter being created against an LPAR. Implement this class and add it to the Session's event listener to process events back from the API. """ @abc.abstractmethod def process(self, events): """Process the event that comes back from the API. :param events: A dictionary of events that has come back from the system. Format: - Key -> URI of event - Value -> Action of event. May be one of the following: add, delete or invalidate A special key of 'general' may exist. The value for this is init or invalidate. init indicates that the whole system is being initialized. An invalidate indicates that the API event system has been refreshed and the user should do a clean get of the data it needs. """ pass @six.add_metaclass(abc.ABCMeta) class RawEventHandler(_EventHandler): """Used to handle raw events from the API. With this handler, no processing is done on the events. The events will be passed as a sequence of dicts. Implement this class and add it to the Session's event listener to process events back from the API. """ @abc.abstractmethod def process(self, events): """Process the event that comes back from the API. :param events: A sequence of event dicts that has come back from the system. Format: [ { 'EventType': , 'EventID': , 'EventData': , 'EventDetail': }, ] """ pass @six.add_metaclass(abc.ABCMeta) class WrapperEventHandler(_EventHandler): """Used to handle wrapped events from the API. With this handler, no processing is done on the events. The events will be passed as a list of pypowervm.wrappers.event.Event. Implement this class and add it to the Session's event listener to process events back from the API. """ @abc.abstractmethod def process(self, events): """Process the event that comes back from the API. :param events: A list of pypowervm.wrappers.event.Event that has come back from the system. See that wrapper class for details. """ pass class _EventPollThread(threading.Thread): def __init__(self, eventlistener): threading.Thread.__init__(self) self.eventlistener = eventlistener self.done = False def run(self): while not self.done: events, raw_events, evtwraps = self.eventlistener._get_events() self.eventlistener._dispatch_events(events, raw_events, evtwraps) def stop(self): self.done = True pypowervm-1.1.24/pypowervm/locale/0000775000175000017500000000000013571367172016560 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/ru/0000775000175000017500000000000013571367172017206 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/ru/pypowervm.po0000664000175000017500000022527213571367171021627 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "ÐедопуÑтимый протокол \"%s\"" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "Ðезашифрованный обмен данными Ñ PowerVM! Верните HTTPS в конфигурации." #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "Ðе удалоÑÑŒ вычиÑлить напоминание Ð´Ð»Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ Ð¿Ð¾ умолчанию, будет иÑпользоватьÑÑ 'default'." #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "Ð›Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ñ Ð½Ðµ поддерживаетÑÑ Ð² HMC." #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "ÐаÑтройка обработчика Ñобытий Ð´Ð»Ñ %s" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "ÐÐµÐ¿Ñ€ÐµÐ´Ð²Ð¸Ð´ÐµÐ½Ð½Ð°Ñ ÑÑылка на файл в запроÑе %s" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "ÐÐµÐ¿Ñ€ÐµÐ´Ð²Ð¸Ð´ÐµÐ½Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° Ð´Ð»Ñ %(meth)s %(url)s" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "ÐÐµÐ¿Ñ€ÐµÐ´Ð²Ð¸Ð´ÐµÐ½Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°: %(class)s Ð´Ð»Ñ %(method)s %(url)s: %(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "Повторный вход раÑценен как небезопаÑный. Этот ÑкземплÑÑ€ ÑеанÑа больше не должен " "иÑпользоватьÑÑ." #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "Попытка повторного входа %s" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "Повторный вход 401, тело ответа:\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "Сбой повторного входа, тело ответа: \n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "Сбой повторного входа:\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "Сбой повторного входа Ñ ÐµÑ‰Ðµ одним кодом 401, тело ответа:\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "подозрительный ответ HTTP 401 Ð´Ð»Ñ %(method)s %(path)s: маркер новый" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "Ðе удалоÑÑŒ подключитьÑÑ Ðº Ñерверу REST. Служба pvm-rest запущена? " "Повтор %(try_num)d из %(max_tries)d через %(delay)d Ñ." #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "СеанÑовый вход в %s" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "Ошибка анализа маркера ÑеанÑа из ответа PowerVM." #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " Тело= %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "Ошибка анализа пути к файлам ÑеанÑа из ответа PowerVM." #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "Ð’ файле маркеров %s не оказалоÑÑŒ маркера ÑеанÑа, который можно прочитать." #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "Выход из ÑеанÑа %s" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "Ðеполадка при выходе из ÑиÑтемы. Она проигнорирована." #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "задание должно быть Ñлементом JobRequest" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "Ð’ JobRequest отÑутÑтвует OperationName" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "путь=%s не ÑвлÑетÑÑ ÑÑылкой на API PowerVM" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "path=%s не ÑвлÑетÑÑ ÑÑылкой на API PowerVM" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "ÐедопуÑтимый деÑкриптор файла" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "ОжидалÑÑ root_id" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "ОжидалÑÑ child_type" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "Ðеожиданный suffix_type=%s" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "ОжидалÑÑ suffix_parm" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "ОжидалÑÑ child_id" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "Ðеожиданный child_id" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "Ðеожиданный root_id" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "Ðеожиданный req_method=%s" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "Ошибка анализа ответа XML из PowerVM: %s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "Ответ не ÑвлÑетÑÑ Ð»ÐµÐ½Ñ‚Ð¾Ð¹/запиÑью Atom" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "Ðеожиданный код HTTP 204 Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "Ðеожиданно пуÑтое тело ответа" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "заголовки запроÑа: %(reqheaders)s\n" "\n" "тело запроÑа: %(reqbody)s\n" "\n" "заголовки ответа: %(respheaders)s\n" "\n" "тело ответа: %(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "Ошибка Atom Ð´Ð»Ñ %(method)s %(path)s: %(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "Ð¡ÐµÐ°Ð½Ñ Ð½Ðµ должен быть None" #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "Обработчик Ñобытий уже активен в ÑеанÑе." #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "Ðе удалоÑÑŒ инициализировать получатель ленты Ñобытий: %s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "ИД Ð¿Ñ€Ð¸Ð»Ð¾Ð¶ÐµÐ½Ð¸Ñ \"%s\" неуникальный" #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "ВыключаетÑÑ" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "Этот обработчик уже имеет подпиÑку" #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "Обработчик должен быть ÑкземплÑром EventHandler" #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "Обработчик не найден в ÑпиÑке подпиÑчиков" #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "Завершение работы EventListener Ð´Ð»Ñ %s" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "Завершение работы EventListener выполнено Ð´Ð»Ñ %s" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "Ошибка при получении Ñобытий PowerVM: %s. (Работает ли Ñлужба pvm-rest?)" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "Ðеожиданное значение EventType=%s" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "Ошибка обработки Ñобытий PowerVM" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "Ðе удалоÑÑŒ получить подходÑщий физичеÑкий порт FC Ð´Ð»Ñ WWPN %(wwpn)s. Возможно," " недоÑтаточно раÑширенных групп атрибутов VIOS. URI VIOS" " Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа: %(vio_uri)s." #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "Ðе найден Ñлемент: %(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "Ðе найден LPAR: %(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "Ðдаптер не найден" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "Сбой операции %(operation_name)s. %(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ñ '%(operation_name)s' не выполнена. Ðе удалоÑÑŒ выполнить задачу за" " %(seconds)d Ñек." #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "Ðевозможно завершить работу ОС в виртуальной машине %(lpar_nm)s, так как " "Ñоединение RMC Ñ Ñтой машиной не активно." #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Ðе удалоÑÑŒ выключить виртуальную машину %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "При выключении виртуальной машины %(lpar_nm)s возник тайм-аут по иÑтечении %(timeout)d " "Ñек." #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Ðе удалоÑÑŒ включить виртуальную машину %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "При включении виртуальной машины %(lpar_nm)s возник тайм-аут по иÑтечении %(timeout)d " "Ñек." #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "Ðе удалоÑÑŒ удалить VLAN %(vlan_id)d, поÑкольку Ñто идентификатор оÑновной VLAN" " в другом Ñетевом моÑте." #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "Ðе удалоÑÑŒ предоÑтавить VLAN %(vlan_id)d. По вÑей видимоÑти она ÑодержитÑÑ Ð½Ð° " "уÑтройÑтве '%(dev_name)s' на виртуальном Ñервере ввода-вывода %(vios)s. Это уÑтройÑтво не" " подключено ни к какому Ñетевому моÑту (общий адаптер Ethernet). " "Удалите уÑтройÑтво вручную или добавьте его к Ñетевому моÑту Ð´Ð»Ñ " "продолжениÑ." #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "ЛогичеÑкий накопитель Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %(lu_name)s уже ÑущеÑтвует в общем пуле " "памÑти %(ssp_name)s." #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "Ðе удалоÑÑŒ найти физичеÑкий порт Ð´Ð»Ñ ÑвÑÐ·Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ порта Fibre Channel. " "Возможные причины: нет доÑтупа к VIOS или неправильнаÑ" " ÑÐ¿ÐµÑ†Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ñ Ñ„Ð¸Ð·Ð¸Ñ‡ÐµÑких портов Fibre Channel." #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "Ðе удалоÑÑŒ запуÑтить конÑоль на виртуальной машине. API pypowervm работает" " не в локальном режиме. Развернуть конÑоль можно только еÑли " "pypowervm работает вмеÑте Ñ API PowerVM." #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "WrapperTask %(name)s не имеет подзадач!" #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "FeedTask не может Ñодержать пуÑтую ленту." #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема отказала в доÑтупе к файлу %(access_file)s." #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема обнаружила ошибку ввода-вывода при чтении файла %(access_file)s: " "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "Сбой задачи миграции. %(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "Ðе найден иÑточник загрузки Ð´Ð»Ñ Ð’Ðœ %(vm_name)s" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "Ðе удалоÑÑŒ получить кодировку pg83 Ð´Ð»Ñ Ð¶ÐµÑткого диÑка %(dev_name)s. Возможно, " "Ðтрибут parent_entry не задан. Причиной может быть иÑпользование PV, " "полученного через неподдерживаемую цепочку ÑвойÑтв. PV должен быть доÑтупен " "через VIOS.phys_vols, VG.phys_vols или " "VIOS.scsi_mappings[n].backing_storage." #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "Ðе удалоÑÑŒ изменить ÑвÑзь Ñлемента ÑиÑтемы Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð² ÑвÑзÑÑ… vSCSI. ОжидалоÑÑŒ найти " "только одну ÑоответÑтвующую ÑвÑзь; найдено %(num_mappings)d." #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "Ðе удалоÑÑŒ изменить ÑвÑзь Ñлемента ÑиÑтемы Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð² ÑвÑзÑÑ… vSCSI. СвÑзь Ð´Ð»Ñ " "Ñлемента ÑиÑтемы Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ %(stg_name)s уже ÑущеÑтвует Ð´Ð»Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ñкого LPAR %(lpar_uuid)s." #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "УÑтройÑтво %(devname)s обнаружено %(count)d раз. ОжидалоÑÑŒ его найти только " "один раз." #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "Ð’ FeedTask %(ft_name)s возникло неÑколько иÑключительных Ñитуаций:\n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "ОжидалÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ один раздел управлениÑ; обнаружено %(count)d." #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "ОжидалоÑÑŒ найти ровно один раздел Ñ Ð˜Ð” %(lpar_id)d. Ðайдено " "%(count)d." #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "Ðе найден уровень по умолчанию в общем пуле Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ %(ssp_name)s." #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "УÑтройÑтво Ñ UDID %(udid)s не найдено ни на одном из " "VIOS." #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "ÐедоÑтаточно VIOS Ð´Ð»Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶ÐºÐ¸ в виртуальном машине" " уÑтройÑтва Ñ UDID %(udid)s." #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "Ожидаемые фабрики (%(fabrics)s) не найдены ни на одном из Ñерверов " "VIOS." #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "Ðе удалоÑÑŒ перекомпоновать виртуальную машину. Она иÑпользует тип ввода-вывода " "%(io_type)s, который не поддерживаетÑÑ Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐºÐ¾Ð¼Ð¿Ð¾Ð½Ð¾Ð²ÐºÐ¸ виртуальной машины." #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "ЧиÑло разъемов виртуального Fibre Channel в целевой ÑиÑтеме (%(rebuild_slots)d) не" " ÑоответÑтвует чиÑлу разъемов в клиентÑкой ÑиÑтеме (%(original_slots)d). " "Ðе удалоÑÑŒ перекомпоновать Ñту виртуальную машину в данной ÑиÑтеме." #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "Ð”Ð»Ñ Ñ€ÐµÐ³Ð¸Ñтрации информации о разъемах Ñетевого уÑтройÑтва необходим " "адаптер CNA или VNIC. ВмеÑто Ñтого предоÑтавлено Ñледующее: %(wrapper)s." #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "ÐедоÑтаточно активных Ñерверов VIOS. ОжидалоÑÑŒ " "%(exp)d; обнаружено %(act)d." #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "Ðе доÑтупен ни один Ñервер VIOS. Ожидание, когда VIOS " "Ñтанет активным, в течение %(wait_time)d Ñ. Проверьте ÑвÑзь RMC " "между PowerVM NovaLink и Ñерверами VIOS." #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "Ðе найдены адаптеры SR-IOV в режиме SRIOV и в ÑоÑтоÑнии выполнениÑ.\n" "РаÑположение | Режим | СоÑтоÑние\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "Ðевозможно выполнить Ñ‚Ñ€ÐµÐ±Ð¾Ð²Ð°Ð½Ð¸Ñ Ðº избыточноÑти Ð´Ð»Ñ %(red)d. Ðайдено %(found_vfs)d" " пригодных базовых уÑтройÑтв." #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "УправлÑÐµÐ¼Ð°Ñ ÑиÑтема не поддерживает vNIC." #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "Ðет активных VIOS, поддерживающих vNIC." #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "Задан уровень избыточноÑти, равный %(red)d, однако управлÑÐµÐ¼Ð°Ñ ÑиÑтема не" " поддерживает аварийное переключение vNIC." #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "Уровень избыточноÑти равен %(red)d, однако нет ни одного активного " "VIOS, поддерживающего аварийное переключение vNIC." #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "Ðе удалоÑÑŒ найти группу томов %(vol_grp)s Ð´Ð»Ñ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ " "оптичеÑкого ноÑителÑ. Ðе удалоÑÑŒ Ñоздать хранилище ноÑителей." #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "Обновление управлÑемой ÑиÑтемы не выполнено, так как запрошено изменение" " одного или неÑкольких физичеÑких портов SR-IOV, которые иÑпользуютÑÑ vNIC.\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "Ðе удалоÑÑŒ Ñоздать виртуальный терминал на оÑнове VNC: %(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "КÑш адаптера не поддерживаетÑÑ." #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "Значение '%(value)s' недопуÑтимо Ð´Ð»Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð° '%(enum)s'. ДопуÑтимые значениÑ: " "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "Ðе найден VIOS Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %(vios_name)s." #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "Ðе найдена группа томов Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %(vg_name)s." #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "Раздел Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %(part_name)s не ÑвлÑетÑÑ Ñ€Ð°Ð·Ð´ÐµÐ»Ð¾Ð¼ IBMi." #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "ПуÑтой параметр раздела в функции PanelJob." #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ñ„ÑƒÐ½ÐºÑ†Ð¸Ð¸ панели %(op_name)s. Одна из %(valid_ops)s " "ожидалаÑÑŒ." #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "Ðе удалоÑÑŒ выполнить поиÑк iSCSI Ð´Ð»Ñ VIOS %(vios_uuid)s. Код возврата: %(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "Ðе удалоÑÑŒ выполнить выход из iSCSI Ð´Ð»Ñ VIOS %(vios_uuid)s. Код возврата: %(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "Ðе удалоÑÑŒ удалить iSCSI Ð´Ð»Ñ VIOS %(vios_uuid)s. Код возврата: %(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "Vstor %(stor_udid)s не найден Ð´Ð»Ñ VIOS %(vios_uuid)s." #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "ÐŸÑ€ÐµÐ´Ð»Ð¾Ð¶ÐµÐ½Ð½Ð°Ñ Ð³Ñ€ÑƒÐ¿Ð¿Ð° раÑширенных атрибутов '%(arg_xag)s' не Ñовпадает Ñ ÑущеÑтвующей " "группой раÑширенных атрибутов '%(path_xag)s'" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "Сертификат проÑрочен." #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "ÐžÐ±Ñ‰Ð°Ñ Ð´Ð»Ð¸Ð½Ð° префикÑа и ÑуффикÑа не может превышать %d Ñимволов." #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "ÐžÐ±Ñ‰Ð°Ñ Ð´Ð»Ð¸Ð½Ð° должна быть не меньше 1 Ñимвола." #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "Длина параметра имени должна быть не меньше одного Ñимвола." #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "Длина параметра имени не должна превышать %d Ñимволов, еÑли Ð´Ð»Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð° trunk_ok указано значение False." #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "Ошибка разработчика: чаÑÑ‚Ð¸Ñ‡Ð½Ð°Ñ ÑÐ¿ÐµÑ†Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ñ Ñ€Ð¾Ð´Ð¸Ñ‚ÐµÐ»ÑŒÑкого объекта." #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "Ошибка разработчика: parent_type должен быть типом Ñхемы Ñтроки или " "производным клаÑÑом Wrapper." #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "ÐедопуÑтимое значение '%(bad_val)s'. ОжидалоÑÑŒ одно из Ñледующих значений: %(good_vals)s - или ÑпиÑок." #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "ЗÐПРОС: %s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "ОТВЕТ: %s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "Ожидание Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÑющихÑÑ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¹ передачи. LU маркера: %s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "Отказ в пользу выполнÑющейÑÑ Ð¿ÐµÑ€ÐµÐ´Ð°Ñ‡Ð¸." #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "Отказ от передачи в пользу маркера %s." #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "ИÑпользование уже переданного LU образа %s." #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "Создание LU маркера %s" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "Передача в LU %(lu)s образа (маркер %(mkr)s)." #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "Удаление Ñбойного LU %s." #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "Ðе удалоÑÑŒ найти виртуальный коммутатор %s в ÑиÑтеме." #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "Ðе найдена допуÑÑ‚Ð¸Ð¼Ð°Ñ VLAN Ð´Ð»Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ коммутатора %s." #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "Ошибка Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð´Ð¾Ð¿Ð¾Ð»Ð½Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð¹ нагрузки Ð´Ð»Ñ Ð¿Ð°Ð¼Ñти хоÑта Ñ UUID '%(host)s': " "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "ИÑтек тайм-аут ожиданиÑ, когда ÑоÑтоÑние RMC вÑех включенных VIOS " "изменитÑÑ Ð½Ð° активное. Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ ÑоÑтавило %(time)d Ñ. Серверы VIOS, " "которые не Ñтали активными: %(vioses)s." #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "ПредполагаетÑÑ, что разъем без опиÑÐ°Ð½Ð¸Ñ Ð¿Ñ€ÐµÐ´Ð½Ð°Ð·Ð½Ð°Ñ‡ÐµÐ½ Ð´Ð»Ñ Ñ„Ð¸Ð·Ð¸Ñ‡ÐµÑкого ввода-вывода: %s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "Раздел %s уже включен." #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "Раздел %s уже выключен." #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "ÐаÑтройка add_parms как dict больше не поддерживаетÑÑ. Укажите ÑкземплÑÑ€ %s " "в качеÑтве замены." #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "Ðе удалоÑÑŒ завершить работу ОС IBMi в обычном режиме. Будет Ñделана попытка немедленно завершить работу ОС. " "Раздел: %s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Ðе удалоÑÑŒ немедленно завершить работу ОС IBMi. Будет Ñделана попытка завершить работу VSP в обычном режиме. " "Раздел: %s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "Ð’ процеÑÑе немедленного Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ñ€Ð°Ð±Ð¾Ñ‚Ñ‹ ОС, отличной от IBMi, возник тайм-аут. Будет Ñделана попытка завершить работу VSP аппаратным ÑпоÑобом. " "Раздел: %s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Ðе удалоÑÑŒ немедленно завершить работу ОС, отличной от IBMi. Будет Ñделана попытка завершить работу VSP в обычном режиме. " "Раздел: %s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "ВыполнÑетÑÑ Ð°Ð¿Ð¿Ð°Ñ€Ð°Ñ‚Ð½Ð¾Ðµ завершение работы VSP Ñ Ñ‚Ð°Ð¹Ð¼-аутом по умолчанию. Раздел: %s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "ÐŸÐ¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ° Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ ÑвÑзей SCSI." #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Обнаружена ÑущеÑÑ‚Ð²ÑƒÑŽÑ‰Ð°Ñ Ð¿Ñ€Ð¸Ð²Ñзка Ñлемента ÑиÑтемы Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ %(stg_type)s Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %(stg_name)s из " "VIOS %(vios_name)s Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ñким LPAR %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Создание привÑзки Ñлемента памÑти %(stg_type)s Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %(stg_name)s из " "VIOS %(vios_name)s Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ñким LPAR %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "Одновременно Ð½ÐµÐ»ÑŒÐ·Ñ ÑƒÐºÐ°Ð·Ñ‹Ð²Ð°Ñ‚ÑŒ match_func и stg_elem." #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "Метод register_cna уÑтарел. ИÑпользуйте метод register_vnet. " "" #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "Метод drop_cna уÑтарел! ИÑпользуйте метод drop_vnet." #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "ФизичеÑкий порт SR-IOV в раÑположении %(loc_code)s ÑвлÑетÑÑ Ð±Ð°Ð·Ð¾Ð²Ñ‹Ð¼ Ð´Ð»Ñ vNIC," " принадлежащего LPAR %(lpar_name)s (UUID LPAR: %(lpar_uuid)s; UUID vNIC: " "%(vnic_uuid)s)." #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "Изменение меток Ñледующих физичеÑких портов SR-IOV, неÑÐ¼Ð¾Ñ‚Ñ€Ñ Ð½Ð° то, что " "они иÑпользуютÑÑ Ð´Ð»Ñ vNIC:" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "Ðевозможно удалить vio_file Ñ UUID %s. Его необходимо удалить вручную." #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "При передаче возникла неполадка. Будет Ñделан повтор." #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "Метод crt_lu_linked_clone уÑтарел. ИÑпользуйте метод crt_lu " "(clone=src_lu, size=lu_size_gb)." #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "У диÑкового логичеÑкого Ð½Ð°ÐºÐ¾Ð¿Ð¸Ñ‚ÐµÐ»Ñ %(luname)s нет базового LU образа. (UDID: %(udid)s) " #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "Ðе удалоÑÑŒ найти новый vDisk при передаче файла." #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "УÑтройÑтво проигнорировано, поÑкольку у него нет UDID:\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "УÑтройÑтво %s не найдено в ÑпиÑке." #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "Удаление виртуального диÑка %(vdisk)s из группы томов %(vg)s" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "Удаление виртуального оптичеÑкого Ð½Ð°ÐºÐ¾Ð¿Ð¸Ñ‚ÐµÐ»Ñ %(vopt)s из группы томов %(vg)s" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "Удаление LU %(lu_name)s (UDID %(lu_udid)s)" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "LU %(lu_name)s не найден, возможно, он был удален вне обычного потока. " "(UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "LU образа %(lu_name)s удалÑетÑÑ, так как он больше не иÑпользуетÑÑ. (UDID: " "%(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "Базовый LU %(lu_name)s не найден. (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "Ошибка разработчика: необходимо указать уровень или lufeed." #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "Ошибка разработчика: параметр lufeed должен Ñодержать LUEnt EntryWrappers." #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "Удаление LU %(lu_name)s (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "Игнорирование HttpError из-за LU %(lu_name)s, который мог быть удален отдельно." " (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "Удаление %(num_maps)d изолированных ÑвÑзей %(stg_type)s из VIOS " "%(vios_name)s." #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "Удаление %(num_maps)d ÑвÑзей виртуального Fibre Channel без портов из VIOS %(vios_name)s." #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "Удаление %(num_maps)d %(stg_type)s ÑвÑзей, ÑвÑзанных Ñ Ð˜Ð” LPAR " "%(lpar_id)d, из VIOS %(vios_name)s." #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "ПамÑть %(stg_name)s типа %(stg_type)s не удалена, поÑкольку не удалоÑÑŒ " "определить, иÑпользуетÑÑ Ð»Ð¸ она. Проверка вручную и очиÑтка " "могут потребоватьÑÑ." #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "Процедура очиÑтки реÑурÑов Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¾Ð¸Ð³Ð½Ð¾Ñ€Ð¸Ñ€Ð¾Ð²Ð°Ð»Ð° Ñлемент ÑиÑтемы Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ %(stg_name)s, поÑкольку он " "непредвиденного типа %(stg_type)s." #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "Удаление Ñледующих %(vdcount)d виртуальных диÑков из VIOS %(vios)s: " "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "Удаление Ñледующих %(vocount)d виртуальных оптичеÑких накопителей из VIOS %(vios)s: " "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "ПропуÑк очиÑтки ÑвÑзей %(stg_type)s из VIOS %(vios_name)s Ð´Ð»Ñ " "Ñледующих ИД LPAR, так как Ñти LPAR ÑущеÑтвуют: %(lpar_ids)s" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "Ðе удалоÑÑŒ найти подходÑщий VIOS. Возможно, Ð¿Ð¾Ð»ÐµÐ·Ð½Ð°Ñ Ð½Ð°Ð³Ñ€ÑƒÐ·ÐºÐ° оказалаÑÑŒ " "недоÑтаточной. Данные полезной нагрузки:\n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "Ð”Ð»Ñ ÑоответÑтвующей ÑвÑзи порта VFC не задан базовый порт. Добавление %(port)s в " "ÑвÑзь Ð´Ð»Ñ Ñледующих WWPN клиента: %(wwpns)s" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "Возникла ошибка при запроÑе хранилища виртуальных оптичеÑких ноÑителей. " "Попытка повторной уÑтановки ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ Ñ…Ñ€Ð°Ð½Ð¸Ð»Ð¸Ñ‰ÐµÐ¼ виртуальных оптичеÑких " "ноÑителей." #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "Ðе удалоÑÑŒ закрыть vterm." #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "ÐедопуÑтимый вывод при открытии vterm. Попытка ÑброÑа vterm. Ошибка: %s" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "Обработчик запроÑов VNCSocket принимает запроÑÑ‹ через ip=%(ip)s, порт=%(port)s" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "Ошибка ÑоглаÑÐ¾Ð²Ð°Ð½Ð¸Ñ SSL Ð´Ð»Ñ VNCRepeater: %s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "Ошибка поиÑка жеÑтких диÑков; будет очищена ÑÑ‚Ð°Ñ€Ð°Ñ Ð¿Ð°Ð¼Ñть Ð´Ð»Ñ Ð˜Ð” LPAR %s и " "выполнена Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€Ð½Ð°Ñ Ð¿Ð¾Ð¿Ñ‹Ñ‚ÐºÐ°." #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "ВоÑÑтановление LUA уÑпешно завершено. Ðайденное уÑтройÑтво: %s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "Ошибка ITL: %s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "УÑтройÑтво %s в данный момент занÑто." #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "Обнаружено уÑтройÑтво %s Ñ Ð½ÐµÐ¸Ð·Ð²ÐµÑтным UDID." #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "Ðе удалоÑÑŒ найти уÑтройÑтво: %s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "Ошибка CLIRunner: %s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "Задание LUARecovery QUERY_INVENTORY уÑпешно выполнено, но результат не Ñодержит ни " "OutputXML, ни StdOut." #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORY Ñоздал недопуÑтимый фрагмент XML (%(chunk)s). Ошибка: %(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "Ðе найден деÑкриптор pg83 в выводе XML:\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "Команда ISCSI уÑпешно завершена" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "Ð¡ÐµÐ°Ð½Ñ ISCSI уже ÑущеÑтвует и активен" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "Команда ISCSI выполнена Ð´Ð»Ñ Ð½ÐµÐ¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÐ¼Ð¾Ð³Ð¾ хоÑта VIOS. " #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "Ð’ ходе поиÑка ISCSI в базе данных ODM обнаружены уÑтаревшие запиÑи." #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "Ðе удалоÑÑŒ найти ÑÐµÐ°Ð½Ñ ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "Ðе найдены запиÑи/целевые объекты/ÑеанÑÑ‹/порталы Ð´Ð»Ñ Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸" #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "Ð’ ходе Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ ÐºÐ¾Ð¼Ð°Ð½Ð´Ñ‹ ISCSI возникла внутреннÑÑ Ð¾ÑˆÐ¸Ð±ÐºÐ° = %s" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "Общий код ошибки ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "Сбой входа в ÑиÑтему в ÑеанÑе ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "ÐедопуÑтимые аргументы команды ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "Тайм-аут Ð¿Ð¾Ð´ÐºÐ»ÑŽÑ‡ÐµÐ½Ð¸Ñ ISCSI." #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "Команде ISCSI не удалоÑÑŒ найти хоÑÑ‚ " #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "Команда ISCSI возвратила непредвиденное ÑоÑтоÑние = %s" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "Команда ISCSI выполнена в неподдерживаемом VIOS " #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "ОÑвободите уÑтаревшие уÑтройÑтва Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð² LPAR Ñ Ð˜Ð” %s и повторите поиÑк iSCSI. " #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "Ð˜Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ показателÑÑ… недоÑтупна. Возможно, показатели были " "недавно инициализированы." #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "Это теÑÑ‚" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "Это Ñообщение, Ð´Ð»Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ð¾Ð³Ð¾ не ÑущеÑтвует перевод" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "КоÑффициент логичеÑких процеÑÑоров должен быть между 0,05 и 1,0. Значение: %s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð´Ð»Ð¸Ð½Ð° имени логичеÑкого раздела. ИмÑ: %s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "Ð’ поле '%(field)s' указано недопуÑтимое значение: '%(value)s'" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "Значение None недопуÑтимо." #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "Значение '%(value)s' недопуÑтимо Ð´Ð»Ñ Ð¿Ð¾Ð»Ñ '%(field)s'. ДопуÑтимые " "варианты: %(choices)s" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "Значение Ð¿Ð¾Ð»Ñ '%(field)s' меньше минимального. Значение: %(value)s; " "Минимальное значение: %(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "Значение Ð¿Ð¾Ð»Ñ '%(field)s' больше макÑимального. Значение: %(value)s; " "МакÑимальное значение: %(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "Ð’ '%(desired_field)s' указано значение больше '%(max_field)s'. " "Требуемое значение: %(desired)s МакÑимальное значение: %(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "Ð’ '%(desired_field)s' указано значение меньше '%(min_field)s'. " "Требуемое значение: %(desired)s Минимальное значение: %(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "Значение объема памÑти не кратно размеру логичеÑкого блока памÑти " "(%(lmb_size)s) хоÑта. Значение: %(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "УправлÑÐµÐ¼Ð°Ñ ÑиÑтема не поддерживает раÑширение активной памÑти. Возможно, " "Значение коÑффициента раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ '%(value)s' недопуÑтимо." #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "Значение раÑÑˆÐ¸Ñ€ÐµÐ½Ð¸Ñ Ð°ÐºÑ‚Ð¸Ð²Ð½Ð¾Ð¹ памÑти должно быть не меньше 1.0 и " "не больше 10.0. Значение 0 тоже допуÑтимо; оно означает, что" " поддержка AME выключена. Значение '%(value)s' недопуÑтимо." #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "Попытка %(retry)d из %(total)d Ð´Ð»Ñ URI %(uri)s. Ошибка: " "извеÑтный код ответа повторной попытки: %(resp_code)s" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "Попытка %(retry)d из %(total)d завершилаÑÑŒ неудачно. Будет Ñделан повтор. ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑитуациÑ:\n" " %(except)s." #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "Следует указать EntryWrapper или EntryWrapperGetter." #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "Ðеобходимо указать допуÑтимую подзадачу." #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "ПовторÑющееÑÑ Ð¸Ð¼Ñ 'provides' %s." #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %s не имеет подзадач; выполнение пуÑтой операции." #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "Ðеобходимо указать ÑпиÑок EntryWrappers или FeedGetter." #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %s не имеет подзадач; выполнение пуÑтой операции." #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "Ð’ FeedTask %s возникло неÑколько иÑключительных Ñитуаций. Их опиÑÐ°Ð½Ð¸Ñ Ð¿Ð¾-отдельноÑти приведены" " ниже." #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "ÐедоÑтаточно %(res_name)s на хоÑте Ð´Ð»Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð¹ машины " "'%(instance_name)s' (запрошено %(requested)s, доÑтупно %(avail)s)" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "памÑть" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "Ð’Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð° должна быть выключена перед изменением минимального или " "макÑимального объема памÑти. Выключите виртуальную машину %s и повторите попытку." #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "Ð’Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð° должна быть выключена перед изменением " "коÑффициента раÑширениÑ. Выключите виртуальную машину %s и повторите попытку." #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "ПроцеÑÑоры" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "логичеÑкие процеÑÑоры" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "Ð’Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð° должна быть выключена перед изменением минимального или " "макÑимального чиÑла процеÑÑоров. Выключите виртуальную машину %s и повторите попытку." #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "Ð’Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ Ð¼Ð°ÑˆÐ¸Ð½Ð° должна быть выключена перед изменением минимального или " "макÑимального чиÑла логичеÑких процеÑÑоров. Выключите виртуальную машину %s и повторите попытку." #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "Прежде чем изменÑть режим ÑовмеÑтимоÑти процеÑÑора, виртуальную машину " "необходимо выключить. Выключите виртуальную машину %s и повторите попытку." #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "Прежде чем изменÑть режим процеÑÑора, виртуальную машину " "обÑлуживаниÑ. Выключите виртуальную машину %s и повторите попытку." #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "Предпочитаемое количеÑтво процеÑÑоров (%(vcpus)d) не должно превышать макÑимально допуÑтимое " "количеÑтво процеÑÑоров на раздел (%(max_allowed)d) Ð´Ð»Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð¹ машины " "'%(instance_name)s'." #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "МакÑимальное количеÑтво процеÑÑоров (%(vcpus)d) не должно превышать " "ограничение на количеÑтво процеÑÑоров (%(max_allowed)d) Ð´Ð»Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð¾Ð¹ машины " "'%(instance_name)s'." #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "Прежде чем изменÑть функцию упрощенного удаленного перезапуÑка, " "необходимо выключить виртуальную машину. Выключите виртуальную машину %s и повторите попытку." #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "Ð’ разделе нет активных Ñоединений RMC." #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "Ð’ разделе отÑутÑтвует Ð°ÐºÑ‚Ð¸Ð²Ð½Ð°Ñ Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ DLPAR Ð´Ð»Ñ %s." #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "РеÑурÑÑ‹ ввода-вывода" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "ПамÑть" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "ПроцеÑÑоры" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "ÐедопуÑтимый KeylockPos '%s'." #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "ÐедопуÑтимое значение BootMode '%s'." #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "IOSlot.adapter уÑтарел! Следует иÑпользовать IOSlot.io_adapter." #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "Ðе удалоÑÑŒ определить главную конÑоль ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ MTMS (тип ÑиÑтемы, модель, " "Ñерийный номер) по ИД %(identifier)s, так как нет %(param)s, помеченных как" " Ð³Ð»Ð°Ð²Ð½Ð°Ñ ÐºÐ¾Ð½Ñоль Ð´Ð»Ñ Ð¿ÑƒÐ»Ð°." #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "Ðе удалоÑÑŒ указать uuid." #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "Ðе удалоÑÑŒ преобразовать %(property_name)s='%(value)s' в объект %(pvmobject)s" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "ÐÐµÐ»ÑŒÐ·Ñ ÑƒÐºÐ°Ð·Ð°Ñ‚ÑŒ href Ð´Ð»Ñ Ð½ÐµÑкольких ÑÑылок. \n" "Путь: %{path}s\n" "КоличеÑтво найденных ÑÑылок: %{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "ÐÐµÐ»ÑŒÐ·Ñ Ñоздать и добавить в оболочку Ñлемент без тега." #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "Ð’ ответе отÑутÑтвует ÑвойÑтво 'entry'." #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "Ðеобходимо указать ответ или ÑущноÑтный объект Ð´Ð»Ñ Ñ€Ð°Ð·Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ. Получено %s" #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "Ошибка разработчика: укажите 'parent' или ('parent_type' и 'parent_uuid') длÑ" " Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð´Ð¾Ñ‡ÐµÑ€Ð½ÐµÐ³Ð¾ объекта." #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "Укажите 'uuid' или 'root_id' при запроÑе корневого объекта." #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "Должны быть указаны Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ parent_type и parent_uuid при получении дочерней " "ленты или запиÑи." #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "Укажите родительÑкий UUID в параметре parent_uuid." #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "Укажите 'uuid' или 'child_id' при запроÑе дочернего объекта." #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "РодительÑкий UUID указан без родительÑкого типа." #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "Ð”Ð»Ñ Ð¼ÐµÑ‚Ð¾Ð´Ð° search() требуетÑÑ Ð¾Ð´Ð¸Ð½ аргумент ключ=значение." #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "КлаÑÑ Ð¾Ð±Ð¾Ð»Ð¾Ñ‡ÐºÐ¸ %(class)s не поддерживает ключ поиÑка '%(key)s'." #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "Параметр 'xag' в EntryWrapper.update уÑтарел. Ð’ лучшем Ñлучае он" " не будет иметь Ñффекта. Ð’ худшем - могут возникнуть неуÑтранимые " "ошибки неÑоответÑÑ‚Ð²Ð¸Ñ etag." #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "Ðет такого дочернего Ñлемента." #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "Ðевозможно указать UUID Ð´Ð»Ñ Ð¾Ð±Ð¾Ð»Ð¾Ñ‡ÐºÐ¸ без метаданных." #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "ÐедопуÑтимое значение uuid: %s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "Ðеобходимо указать подклаÑÑ Wrapper." #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "Ðеобходимо указать и родительÑкий клаÑÑ, и родительÑкий UUID, или не указывать ни одного из Ñтих значений." #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "Мониторинг Ð·Ð°Ð´Ð°Ð½Ð¸Ñ %(job_id)s в течение %(time)i Ñ." #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "Отправлен Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð½Ð° отмену Ð·Ð°Ð´Ð°Ð½Ð¸Ñ %(job_id)s. Задание будет опрашиватьÑÑ Ð½ÐµÐ¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð½Ð¾Ðµ времÑ" " на предмет Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ñ€Ð°Ð±Ð¾Ñ‚Ñ‹." #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "Задание %s не удалено. Задание находитÑÑ Ð² ÑоÑтоÑнии выполнениÑ." #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "LPAR не находитÑÑ Ð² активном ÑоÑтоÑнии." #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "Ð¦ÐµÐ»ÐµÐ²Ð°Ñ ÑиÑтема не поддерживает мобильные LPAR IBM i." #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "Ð’ LPAR IBM i нет ограниченных реÑурÑов ввода-вывода." #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "ИÑÑ…Ð¾Ð´Ð½Ð°Ñ ÑиÑтема не поддерживает мобильные LPAR IBM i." #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "Ð’ LPAR нет активных Ñоединений RMC." #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "LPAR ÑвлÑетÑÑ Ñ€Ð°Ð·Ð´ÐµÐ»Ð¾Ð¼ управлениÑ" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "LPAR недоÑтупен Ð´Ð»Ñ LPM, поÑкольку он не поддерживает DLPAR." #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "Это не то ÑвойÑтво, которое вам нужно. ИÑпользуйте srr_enabled в " "Ñреде NovaLink." #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "ÐедопуÑтимое значение IPLSrc '%s'." #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "Это ÑвойÑтво уÑтарело. ИÑпользуйте pci_subsys_dev_id." #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "Это ÑвойÑтво уÑтарело. ИÑпользуйте pci_rev_id." #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "Это ÑвойÑтво уÑтарело. ИÑпользуйте pci_subsys_vendor_id." #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "Это ÑвойÑтво уÑтарело. ИÑпользуйте drc_index." #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "Это ÑвойÑтво уÑтарело. ИÑпользуйте drc_name." #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ ÑÐ¿ÐµÑ†Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ñ Ñ€Ð¾Ð´Ð¸Ñ‚ÐµÐ»ÑŒÑкого объекта Ð´Ð»Ñ CNA.create." #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PV Ñодержал зашифрованный деÑкриптор pg83 \"%(pg83_raw)s\", но его не удалоÑÑŒ раÑшифровать " "(%(type_error)s)." #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "СвойÑтво 'xags' клаÑÑа EntryWrapper VIOS уÑтарело. " " Следует иÑпользовать Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¸Ð· pypowervm.const.XAG." #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "Раздел типа VIOS не поддерживает LPM" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "ÐÐµÐ»ÑŒÐ·Ñ ÑƒÐºÐ°Ð·Ñ‹Ð²Ð°Ñ‚ÑŒ LUA целевого уÑтройÑтва без базового уÑтройÑтва хранениÑ!" # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/locale/pt-BR/0000775000175000017500000000000013571367172017504 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/pt-BR/pypowervm.po0000664000175000017500000017523413571367171022127 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=2; plural=n>1;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "Protocolo inválido \"%s\"" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "Comunicação não criptografada com o PowerVM! Reverta a configuração para HTTPS." #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "Falha ao calcular o memento de auditoria padrão, usando 'padrão'." #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "Autenticação local não suportada no HMC." #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "Configurando listener de eventos para %s" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "Manipulação de arquivos inesperada na solicitação %s" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "Erro inesperado para %(meth)s %(url)s" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "Erro inesperado: %(class)s para %(method)s %(url)s: %(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "O novo login foi considerado não seguro. Esta instância de sessão não deve mais " "ser usada." #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "Tentando novo login %s" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "Novo login 401, corpo de resposta:\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "Falha no novo login, corpo de resposta:\n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "Novo login com falha:\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "Falha na nova tentativa com outro 401, corpo de resposta:\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "resposta HTTP 401 suspeita para %(method)s %(path)s: o token é completamente novo" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "Falha ao conectar-se ao servidor REST - o serviço pvm-rest foi iniciado? " "Tentando %(try_num)d de %(max_tries)d após %(delay)d segundos." #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "Criação de log de sessão ativada %s" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "Falha ao analisar um token de sessão a partir da resposta do PowerVM." #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " Corpo = %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "Falha ao analisar um caminho de arquivo de sessão a partir da resposta do PowerVM." #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "O arquivo de token %s não continha um token de sessão legível." #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "Criação de log de sessão desativada %s" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "Problema ao efetuar logoff. Ignorando." #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "a tarefa deve ser um elemento JobRequest" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "OperationName está ausente de JobRequest" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "path=%s não é uma referência da API do PowerVM" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "path=%s não é uma referência de API do PowerVM" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "Descritor de arquivo inválido" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "root_id esperado" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "child_type esperado" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "suffix_type=%s inesperado" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "suffix_parm esperado" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "child_id esperado" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "child_id inesperado" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "root_id inesperado" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "req_method=%s inesperado" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "Erro ao analisar a resposta XML do PowerVM: %s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "A resposta não é uma entrada ou um feed Atom" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "HTTP 204 inesperado para solicitação" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "Corpo de resposta vazio inesperado" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "cabeçalhos de solicitação: %(reqheaders)s\n" "\n" "corpo da solicitação: %(reqbody)s\n" "\n" "cabeçalhos de resposta: %(respheaders)s\n" "\n" "corpo de resposta: %(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "Erro do Atom para %(method)s %(path)s: %(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "A sessão não deve ser Nenhuma" #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "Um listener de eventos já está ativo na sessão." #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "Falha ao inicializar o listener de feed de eventos: %s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "ID do aplicativo \"%s\" não exclusivo" #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "Encerrando" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "Este manipulador já está inscrito" #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "O manipulador deve ser EventHandler" #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "Manipulador não localizado na lista de assinantes" #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "Encerrando o EventListener para %s" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "Encerramento de EventListener concluído para %s" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "Erro ao obter eventos do PowerVM: %s. (O serviço pvm-rest está inativo?)" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "EventType=%s inesperado" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "Erro ao processar eventos do PowerVM" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "Não é possível derivar uma porta FC física adequada para WWPN %(wwpn)s. Os" " Grupos de Atributos Estendidos VIOS podem ter sido insuficientes. O URI VIOS" " para a consulta era %(vio_uri)s." #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "Elemento não localizado: %(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "LPAR não localizado: %(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "Adaptador não localizado" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "A operação '%(operation_name)s' falhou. %(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "A operação '%(operation_name)s' falhou. Falha ao concluir a tarefa em" " %(seconds)d segundos." #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "Não é possível executar o encerramento do S.O. na máquina virtual %(lpar_nm)s porque sua " "conexão do RMC não está ativa." #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Falha ao desligar Máquina Virtual %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "Desligue a máquina virtual %(lpar_nm)s, que atingiu o tempo limite após %(timeout)d " "serviço." #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Falha ao ligar a Máquina Virtual %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "Ligue a máquina virtual %(lpar_nm)s, que atingiu o tempo limite após %(timeout)d " "serviço." #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "Não é possível remover %(vlan_id)d, pois ele é o Identificador Primário da VLAN em" " uma ponte de rede diferente." #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "Não é possível provisionar a VLAN %(vlan_id)d. Ela parece estar contida no " "dispositivo '%(dev_name)s' no Virtual I/O Server %(vios)s. Esse dispositivo não está" " conectado a nenhuma ponte de rede (adaptador Ethernet compartilhado). " "remova manualmente o dispositivo ou inclua-o na Ponte de rede antes " "de continuar." #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "Uma Unidade Lógica com o nome %(lu_name)s já existe no conjunto de armazenamento compartilhado " "%(ssp_name)s." #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "Não é possível localizar uma porta física para a qual mapear uma porta Fibre Channel virtual. " "Isto ocorre devido a um Virtual I/O Server estar indisponível ou a uma" " especificação das portas incorreta para as portas do fibre channel físicas." #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "Não é possível iniciar o console na Máquina Virtual. A API pypowervm está" " em execução em um modo não local. O console pode ser implementada apenas quando " "pypowervm estiver colocalizado com a API do PowerVM." #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "WrapperTask %(name)s não tem subtarefas!" #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "FeedTask não pode ter um feed vazio." #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "O OS (sistema operacional) negou acesso ao arquivo %(access_file)s." #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "O sistema operacional encontrou um erro de E/S ao tentar ler o arquivo %(access_file)s: " "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "A tarefa de migração falhou. %(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "Nenhuma origem de carregamento foi localizada para a VM %(vm_name)s" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "Não é possível derivar a codificação pg83 para o %(dev_name)s. Os " "atributo parent_entry não está configurado. O motivo pode ser o uso de uma PV (entrada-pai) " "obtida por meio de uma cadeia de propriedade não suportada. A PV deve ser acessada " "por meio de VIOS.phys_vols, VG.phys_vols ou " "VIOS.scsi_mappings[n].backing_storage." #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "Não é possível remapear o elemento de armazenamento do mapeamento vSCSI. Esperava-se localizar " "exatamente um mapeamento de correspondência, mas foram localizados %(num_mappings)d." #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "Não é possível remapear o elemento de armazenamento do mapeamento vSCSI. Um mapeamento para o elemento de armazenamento " "%(stg_name)s já existe para a LPAR do cliente %(lpar_uuid)s." #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "Localizado dispositivo %(devname)s %(count)d vezes. Era esperado localizá-lo no máximo " "uma vez." #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "FeedTask %(ft_name)s experimentou múltiplas exceções:\n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "Esperava localizar exatamente uma partição de gerenciamento; localizada %(count)d." #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "Era esperado localizar exatamente uma partição com o ID %(lpar_id)d. Localizou-se " "%(count)d." #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "Não foi possível localizar a Camada padrão no Conjunto de Armazenamentos Compartilhados %(ssp_name)s." #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "O dispositivo com UDID %(udid)s não foi localizado em nenhum Virtual I/O " "Server." #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "Não há Virtual I/O Servers suficientes para suportar o dispositivo da máquina virtual" " com UDID %(udid)s." #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "A malha esperada (%(fabrics)s) não foi localizada em nenhum dos Virtual " "I/O Servers." #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "Não é possível reconstruir a máquina virtual. Ela está usando um tipo de E/S de " "%(io_type)s que não é suportado para reconstrução de VM." #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "O número de slots de VFC no sistema de destino (%(rebuild_slots)d) não" " corresponde ao número de slots no sistema do cliente (%(original_slots)d). " "Não é possível reconstruir esta máquina virtual neste sistema." #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "Para registrar as informações do slot do dispositivo de rede, um adaptador CNA ou VNIC " "é necessário. Em vez disso, o seguinte foi fornecido: %(wrapper)s." #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "Não há Virtual I/O Servers ativos disponíveis suficientes. Esperada " "%(exp)d; esperados, %(act)d localizados." #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "Nenhum Virtual I/O Server está disponível. Tentativa de esperar um VIOS para " "tornar-se ativo por %(wait_time)d segundos. Verifique o RMC " "entre o PowerVM NovaLink e os Virtual I/O Servers." #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "Não foi possível localizar nenhum adaptador SR-IOV no modo Sriov e no estado Executando.\n" "Localização | Modo | Estado\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "Não é possível cumprir o requisito de redundância de %(red)d. Localizado %(found_vfs)d" " dispositivos auxiliares viáveis." #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "O sistema gerenciado não é compatível com vNIC." #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "Não há vNIC-capable VIOSes ativos." #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "A redundância de %(red)d foi especificada, mas o sistema gerenciado não é capaz de failover de vNIC" " ." #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "A redundância de %(red)d foi especificada, mas não há vNIC ativo " "VIOSs aptos para failover." #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "Não é possível localizar o grupo de volumes %(vol_grp)s para armazenar a mídia " "virtual ótica. Impossível criar o repositório de mídia." #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "A atualização ManagedSystem não foi tentada porque foram solicitadas mudanças" " para um ou mais portas físicas SR-IOV que estão em uso pelos vNICs.\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "Não é possível criar VNC baseado em terminal virtual: %(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "O cache do adaptador não é suportado." #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "Valor inválido '%(value)s' para '%(enum)s'. Valores válidos são: " "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "Nenhum VIOS localizado com o nome %(vios_name)s." #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "Nenhum grupo de volumes localizado com o nome %(vg_name)s." #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "A partição com o nome %(part_name)s não é uma partição do IBMi." #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "O argumento de partição da função PanelJob está vazio." #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "A operação de função do painel %(op_name)s é inválida. Um de %(valid_ops)s " "é esperado." #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "A descoberta ISCSI falhou para o VIOS %(vios_uuid)s. Código de retorno: %(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "O logout ISCSI falhou para o VIOS %(vios_uuid)s. Código de retorno: %(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "A remoção do ISCSI falhou para o VIOS %(vios_uuid)s. Código de retorno: %(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "Vstor %(stor_udid)s não localizado para o VIOS %(vios_uuid)s." #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "O grupo de atributos estendido proposto '%(arg_xag)s' não corresponde ao " "grupo de atributos estendidos existente '%(path_xag)s'" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "O certificado expirou." #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "Prefixo e sufixo juntos não pode ter mais de %d caracteres." #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "O comprimento total deve ser de pelo menos 1 caractere." #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "O parâmetro de nome deve ter pelo menos um caractere de comprimento." #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "O parâmetro de nome não deve exceder %d caracteres quando o parâmetro trunk_ok for False." #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "Erro do desenvolvedor: especificação-pai parcial." #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "Erro do desenvolvedor: parent_type deve ser um tipo de esquema de sequência ou uma " "subclasse de Wrapper." #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "Valor inválido '%(bad_val)s'. Esperado um de %(good_vals)s ou uma lista." #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "SOLICITAÇÃO: %s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "RESPOSTA: %s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "Esperando a conclusão de upload(s) em andamento. LU(s) (unidade lógica) marcadora(s): %s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "Abdicando em favor do upload em andamento." #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "Abdicando do upload em favor do marcador %s." #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "Usando LU da imagem já transferida por upload %s." #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "Criando LU marcadora %s" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "Fazendo upload para a LU da imagem %(lu)s (marcadora %(mkr)s)." #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "Removendo LU (unidade lógica) com falha %s." #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "Impossível localizar o Comutador Virtual %s no sistema." #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "Não é possível localizar uma VLAN válida para o comutador virtual %s." #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "Erro ao obter a sobrecarga da memória do host para o host com UUID '%(host)s': " "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "Tempo limite atingido ao aguardar que o estado RMC de todos os Virtual I/O " "Servidores que devem estar ativos. O tempo de espera foi de: %(time)d segundos. VIOSs que " "não se tornaram ativos foram: %(vioses)s." #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "Assumindo que o slot com menos descrição seja a E/S física: %s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "Partição %s já ligada." #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "A partição %s já está desligada." #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "Especificar add_parms como um dic foi descontinuado. Especifique uma instância %s " "." #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "Encerramento normal do SO IBMi com falha. Tentando encerramento imediato do SO. " "Partição: %s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Encerramento imediato do SO IBMi com falha. Tentando encerramento normal VSP. " "Partição: %s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "O encerramento imediato do SO não IBMi atingiu o tempo limite. Tentando encerramento forçado VSP. " "Partição: %s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Encerramento imediato de SO não IBMi com falha. Tentando encerramento normal VSP. " "Partição: %s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "Encerramento forçado VSP com tempo limite padrão. Partição: %s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "Tentando a modificação do Mapeamento SCSI novamente." #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Localizado o mapeamento existente do elemento de armazenamento %(stg_type)s %(stg_name)s do " "Virtual I/O Server %(vios_name)s para clientes da LPAR %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Criando Mapeamento do elemento de armazenamento %(stg_type)s %(stg_name)s do " "Virtual I/O Server %(vios_name)s para clientes da LPAR %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "Não deve especificar match_func e stg_elem." #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "O método register_cna foi descontinuado! Use o register_vnet " "correspondente." #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "O método drop_cna foi descontinuado! Use o método drop_vnet." #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "A porta física SR-IOV no local %(loc_code)s está auxiliando um vNIC pertencente à" " LPAR (partição lógica) %(lpar_name)s (UUID da LPAR: %(lpar_uuid)s; UUID do vNIC: " "%(vnic_uuid)s)." #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "Fazendo mudanças nos rótulos das seguintes portas físicas SR-IOV, embora " "estejam em uso pelo vNICs:" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "Falha ao excluir vio_file com UUID %s. Ele deve ser excluído manualmente." #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "Foi encontrado um problema ao fazer upload. Tentará novamente." #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "O método crt_lu_linked_clone foi descontinuado! Use o método crt_lu " "(clone=src_lu, size=lu_size_gb)." #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "A unidade lógica de disco %(luname)s não tem nenhuma LU (unidade lógica) de imagem auxiliar. (UDID: %(udid)s) " #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "Não é possível localizar novo vDisk no upload do arquivo." #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "Ignorando dispositivo porque um UDID está ausente:\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "Dispositivo %s não localizado na lista." #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "Excluindo disco virtual %(vdisk)s do grupo de volume %(vg)s" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "Excluindo dispositivo ótico virtual %(vopt)s do grupo de volume %(vg)s" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "Removendo unidade lógica %(lu_name)s (UDID %(lu_udid)s)" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "A unidade lógica %(lu_name)s não foi localizada - ela pode ter sido excluída fora da banda. " "(UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "Removendo LU da imagem %(lu_name)s porque ela não está mais em uso. (UDID: " "%(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "A LU (unidade lógica) auxiliar %(lu_name)s não foi localizada. (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "Erro do desenvolvedor: camada ou lufeed é necessária." #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "Erro do desenvolvedor: o parâmetro lufeed deve incluir LUEnt EntryWrappers." #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "Excluindo a unidade lógica %(lu_name)s (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "Ignorando HttpError para a unidade lógica %(lu_name)s pode ter sido excluído fora da banda." " (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "Removendo %(num_maps)d mapeamentos %(stg_type)s órfãos do VIOS " "%(vios_name)s." #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "Removendo %(num_maps)d mapeamentos de VFC sem porta do VIOS %(vios_name)s." #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "Removendo mapeamentos %(num_maps)d %(stg_type)s associados ao ID da LPAR " "%(lpar_id)d do VIOS %(vios_name)s." #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "Não removendo armazenamento %(stg_name)s do tipo %(stg_type)s porque não é possível " "Determinar se ele ainda está em uso. Verificação e " "limpeza manuais podem ser necessárias." #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "Limpeza de armazenamento ignorando elemento de armazenamento %(stg_name)s porque ele é do " "tipo inesperado %(stg_type)s." #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "Limpando os seguintes %(vdcount)d discos virtuais do VIOS%(vios)s: " "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "Limpando as seguintes %(vocount)d unidades óticas virtuais do VIOS%(vios)s: " "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "Ignorando a limpeza dos mapeamentos %(stg_type)s do VIOS %(vios_name)s para os " "seguintes IDs da LPAR, pois essas LPARs existem: %(lpar_ids)s" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "Não é possível localizar VIOS apropriado. A carga útil fornecido provavelmente era " "insuficiente. Os dados de carga útil são:\n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "O mapa da porta VFC correspondente não possui porta auxiliar configurada. Incluindo %(port)s no " "mapeamento para wwpns do cliente: %(wwpns)s" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "Ocorreu um erro ao consultar o repositório de mídia ótica virtual. " "Tentativa de restabelecer a conexão com uma mídia ótica virtual " "repositório YUM." #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "Impossível fechar vterm." #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "Saída inválida no vterm aberto. Tentando reconfigurar o vterm. O erro foi %s" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "Listener VNCSocket atendendo no IP=%(ip)s, porta=%(port)s" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "Erro na negociação do SSL para o repetidor VNC: %s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "descoberta de hdisk falhou; irá limpar o armazenamento antigo para IDs de LPAR %s e " "tentar novamente." #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "Recuperação do LUA com sucesso. Dispositivo localizado: %s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "Erro de ITL encontrado: %s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "O dispositivo %s está atualmente em uso." #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "Dispositivo %s descoberto com UDID desconhecido." #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "Falha ao descobrir o dispositivo: %s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "Erro no CLIRunner: %s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "A tarefa LUARecovery de QUERY_INVENTORY foi bem-sucedida, mas o resultado não contém " "OutputXML nem StdOut (saída padrão)." #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORY produziu um chunk inválido de XML (%(chunk)s). Erro: %(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "Falha ao localizar o descritor pg83 na saída XML:\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "Comando da ISCSI concluído com êxito" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "A sessão da ISCSI já existe e efetuou login" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "O comando da ISCSI foi executado em um VIOS não suportado, host." #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "A descoberta da ISCSI localizou entradas antigas no banco de dados do ODM." #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "Não foi possível localizar a sessão da ISCSI " #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "Nenhum registro/destino/sessão/portal foi localizado no qual executar a operação" #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "O comando da ISCSI falhou com status de erro interno = %s" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "Código de erro genérico da ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "Falha de login de sessão da ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "Argumentos inválidos do comando da ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "O cronômetro de conexão da ISCSI expirou ao tentar se conectar." #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "Não foi possível para o comando da ISCSI consultar o host" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "O comando da ISCSI retornou um status inesperado = %s" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "O comando da ISCSI foi executado em um VIOS não suportado " #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "Limpe o armazenamento antigo para IDs de LPAR %s e tente novamente a descoberta da iSCSI." #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "Os dados de métrica não estão disponíveis. Isso pode ocorrer devido às métricas que estão sendo " "inicializadas recentemente." #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "Isso é um teste" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "Esta é uma mensagem para a qual uma tradução não existe" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "O fator de unidades do processador deve estar entre 0,05 e 1,0. Valor: %s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "O nome da partição lógica possui um comprimento inválido. Nome: %s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "O campo '%(field)s' tem o valor inválido: '%(value)s'" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "O valor None não é válido." #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "O valor '%(value)s' não é válido para o campo '%(field)s' com opções " "aceitáveis: %(choices)s" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "O campo '%(field)s' tem um valor abaixo do mínimo. Valor: %(value)s; " "Mínimo: %(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "O campo '%(field)s' tem um valor acima do máximo. Valor: %(value)s; " "Máximo: %(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "O '%(desired_field)s' tem um valor acima do valor '%(max_field)s'. " "Desejado: %(desired)s Máximo: %(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "O '%(desired_field)s' tem um valor abaixo do valor '%(min_field)s'. " "Desejado: %(desired)s Mínimo: %(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "O valor de memória não é um múltiplo do tamanho de bloco da memória lógico " "(%(lmb_size)s) do host. Valor: %(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "O sistema gerenciado não suporta a expansão de memória ativa. Os " "valor do fator de expansão '%(value)s' não é válido." #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "O valor de expansão de memória ativa deve ser superior ou igual a 1,0 e " "inferior ou igual a 10,0. O valor 0 também é válido e indica que" " O AME está desativado. '%(value)s' não é válido." #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "Tentativa %(retry)d do total de %(total)d para URI %(uri)s. O erro foi um " "um código de resposta de nova tentativa conhecido: %(resp_code)s" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "A tentativa %(retry)d de %(total)d falhou. Tentará novamente. A exceção foi:\n" " %(except)s." #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "Deve fornecer EntryWrapper ou EntryWrapperGetter." #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "Deve fornecer um subtarefa válida." #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "Duplicar 'fornece' nome %s." #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %s não tem subtarefas; execução sem operação." #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "Deve fornecer uma lista de EntryWrappers ou FeedGetter." #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %s não tem subtarefas; execução sem operação." #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "FeedTask %s experimentou múltiplas exceções. Elas são registradas individualmente" " abaixo." #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "%(res_name)s disponíveis insuficientes no host para a máquina virtual " "'%(instance_name)s' (%(requested)s solicitado, %(avail)s disponível)" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "memória" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "A máquina virtual deverá ser desligada antes de mudar as unidades do processador mínima ou " "memória máxima. Desligue a máquina virtual %s e tente novamente." #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "A máquina virtual deverá ser desligada antes de mudar o fator de " "fator. Desligue a máquina virtual %s e tente novamente." #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "CPUs" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "unidades de processamento" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "A máquina virtual deverá ser desligada antes de mudar as unidades do processador mínima ou " "processadores máximos. Desligue a máquina virtual %s e tente novamente." #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "A máquina virtual deverá ser desligada antes de mudar as unidades do processador mínima ou " "máximo de unidades de processador. Desligue a máquina virtual %s e tente novamente." #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "A máquina virtual deverá ser desligada antes de mudar o modo " "modo de compatibilidade. Desligue a máquina virtual %s e tente novamente." #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "A máquina virtual deverá ser desligada antes de mudar o modo " "HVM. Desligue a máquina virtual %s e tente novamente." #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "Os processadores desejados (%(vcpus)d) não podem estar acima do máximo permitido " "processadores por partição (%(max_allowed)d) para a máquina virtual " "'%(instance_name)s'." #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "O máximo de processadores (%(vcpus)d) não pode estar acima do " "limite do processador de capacidade (%(max_allowed)d) para a máquina virtual " "'%(instance_name)s'." #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "A máquina virtual deverá ser desligada antes de mudar o recurso " "recurso de reinicialização remota. Desligue a máquina virtual %s e tente novamente." #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "A partição não tem uma conexão RMC ativa." #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "A partição não tem uma capacidade DLPAR ativa para %s." #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "E/S" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "Memória" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "Processadores" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "KeylockPos '%s' inválido." #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "BootMode '%s' inválido." #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "O IOSlot.adapter foi descontinuado! Use o IOSlot.io_adapter." #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "Não é possível determinar o MTMS (tipo de máquina, modelo, número de série) do console de gerenciamento principal " "a partir de %(identifier)s porque nenhum %(param)s foi marcado como o" " console principal para o conjunto." #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "Não é possível configurar uuid." #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "Não é possível converter %(property_name)s='%(value)s' no objeto %(pvmobject)s" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "Recusando definição de href sobre vários links.\n" "Caminho: %{path}s\n" "Número de links localizados: %{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "Recusando-se a construir e agrupar um Elemento sem uma identificação." #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "A resposta está sem a propriedade 'entrada'." #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "Deve fornecer um Resposta ou Entrada para agrupar. Obtido %s" #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "Erro do desenvolvedor: especifique 'parent' ou ('parent_type' e 'parent_uuid') para" " recuperar um objeto CHILD." #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "Especifique 'uuid' ou 'root_id' ao solicitar um objeto ROOT." #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "O parent_type e o parent_uuid são necessários ao recuperar um " "feed ou entrada CHILD." #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "Especifique o UUID do pai por meio do parâmetro parent_uuid." #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "Especifique 'uuid' ou 'child_id' ao solicitar um objeto CHILD." #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "UUID pai especificado sem tipo pai." #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "O método de pesquisa () requer exatamente um argumento de chave=valor." #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "A classe do wrapper %(class)s não suporta a chave de procura '%(key)s'." #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "O parâmetro 'xag' para EntryWrapper.update foi descontinuado! Na melhor das hipóteses, o uso" " resultará em um no-op. Na pior, fornecerá erros de incompatibilidade de " "etag irreparáveis." #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "Não existe esse elemento-filho." #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "Não é possível configurar o UUID no Wrapper sem Metadados." #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "valor de uuid inválido: %s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "Deve-se especificar uma subclasse de Wrapper." #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "Deve-se especificar a classe-pai e o UUID pai ou nenhum." #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "Tarefa %(job_id)s monitorando por %(time)i segundos." #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "Emitindo solicitação de cancelamento para a tarefa %(job_id)s. Pesquisará a tarefa indefinidamente" " para finalização." #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "Tarefa %s não excluída. Tarefa em estado de execução." #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "LPAR não está em um estado ativo." #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "O sistema de destino não tem a capacidade de mobilidade da LPAR do IBM i." #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "IBM i LPAR não tem E/S restrita." #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "Sistema de origem não tem Capacidade de Mobilidade do IBM i LPAR." #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "LPAR não tem uma conexão RMC ativa." #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "LPAR é a partição de gerenciamento" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "LPAR não está disponível para LPM devido a recursos DLPAR ausente." #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "Esta não é a propriedade que você está procurando. Use srr_enabled em um " "ambiente NovaLink." #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "IPLSrc '%s' inválido." #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "Esta propriedade foi descontinuada! Use pci_subsys_dev_id no lugar." #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "Esta propriedade foi descontinuada! Use pci_rev_id no lugar." #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "Esta propriedade foi descontinuada! Use pci_subsys_vendor_id no lugar." #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "Esta propriedade foi descontinuada! Use drc_index no lugar." #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "Esta propriedade foi descontinuada! Use drc_name no lugar." #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "Especificação inválida de pai para CNA.create." #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PV codificou o descritor pg83 \"%(pg83_raw)s\", mas falhou ao decodificar " "(%(type_error)s)." #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "A propriedade 'xags' da classe EntryWrapper do VIOS foi descontinuada! " " use os valores do pypowervm.const.XAG." #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "A partição do tipo VIOS não é compatível com LPM" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "Não é possível especificar o dispositivo LUA de destino sem um dispositivo de armazenamento auxiliar." # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/locale/pypowervm.pot0000664000175000017500000011721613571367171021363 0ustar neoneo00000000000000# Translations template for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "" #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "" #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "" #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "" #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr "" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "" #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "" #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "" #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "" #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "" #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "" #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "" #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "" #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "" #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "" #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "" #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "" #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "" #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "" #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "" #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "" #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "" #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "" #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "" #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "" #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "" #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "" #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "" #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "" #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "" #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "" #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "" #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "" #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "" #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "" #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "" #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "" #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "" #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "" #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "" #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "" #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "" #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "" #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "" #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "" #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "" #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "" #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "" #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "" #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "" #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "" #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "" #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "" #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "" #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "" #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "" #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "" #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "" #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "" #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "" #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "" #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "" #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "" #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "" #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "" #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "" #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "" #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "" #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "" #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "" #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "" #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "" #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "" #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "" #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "" #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "" #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "" #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "" #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "" #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "" #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "" #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "" #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "" #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "" #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "" #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "" #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "" #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "" #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "" #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "" pypowervm-1.1.24/pypowervm/locale/ko/0000775000175000017500000000000013571367172017171 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/ko/pypowervm.po0000664000175000017500000020221113571367171021576 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=1; plural=0;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "올바르지 ì•Šì€ í”„ë¡œí† ì½œ \"%s\"" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "PowerVM! Revert êµ¬ì„±ì„ ì‚¬ìš©í•œ https í†µì‹ ì´ ì•”í˜¸í™”ë˜ì§€ 않았습니다." #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "기본 auditmemento ê³„ì‚°ì´ ì‹¤íŒ¨í•¨, 'default' 사용 중." #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "HMCì—서 로컬 ì¸ì¦ì´ ì§€ì›ë˜ì§€ 않습니다." #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "%sì— ëŒ€í•œ ì´ë²¤íЏ 리스너 설정 중" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "%s ìš”ì²­ì— ì˜ˆê¸°ì¹˜ ì•Šì€ filehandle" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "%(meth)s %(url)sì— ëŒ€í•œ 예기치 ì•Šì€ ì˜¤ë¥˜" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "예기치 ì•Šì€ ì˜¤ë¥˜: %(method)s %(url)sì— ëŒ€í•œ %(class)s: %(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "재로그ì¸ì´ 안전하지 ì•Šì€ ê²ƒ 같습니다. ì´ ì„¸ì…˜ ì¸ìŠ¤í„´ìŠ¤ë¥¼ ë” ì´ìƒ 사용하지 " "마십시오." #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "%s ìž¬ë¡œê·¸ì¸ ì‹œë„ ì¤‘" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "401 재로그ì¸, ì‘답 본문:\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "ìž¬ë¡œê·¸ì¸ ì‹¤íŒ¨, ì‘답 본문:\n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "ìž¬ë¡œê·¸ì¸ ì‹¤íŒ¨í•¨:\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "다른 401ì—서 재시ë„ê°€ 실패함, ì‘답 본문:\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "%(method)s %(path)sì— ëŒ€í•´ ì˜ì‹¬ìŠ¤ëŸ¬ìš´ HTTP 401 ì‘ë‹µì´ ìžˆìŒ: 토í°ì€ 새 항목임" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "REST ì„œë²„ì— ì—°ê²°í•˜ëŠ” ë° ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. pvm-rest 서비스가 시작ë˜ì—ˆìŠµë‹ˆê¹Œ? " "%(delay)dì´ˆ í›„ì— ìµœëŒ€ %(max_tries)d번 중 %(try_num)d번 재시ë„합니다." #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "%sì—서 세션 로그온 중" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "PowerVM ì‘ë‹µì˜ ì„¸ì…˜ 토í°ì„ 구문 ë¶„ì„하는 ë° ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤." #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " 본문= %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "PowerVM ì‘답ì—서 세션 íŒŒì¼ ê²½ë¡œë¥¼ 구문 ë¶„ì„하는 ë° ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤." #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "í† í° íŒŒì¼ %sì— ì½ì„ 수 있는 세션 토í°ì´ í¬í•¨ë˜ì–´ 있지 않습니다." #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "%sì—서 세션 로그오프 중" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "로그오프 ì¤‘ì— ë¬¸ì œê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤. ì´ë¥¼ 무시합니다." #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "ìž‘ì—…ì€ JobRequest 요소여야 합니다." #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "JobRequestì— OperationNameì´ ëˆ„ë½ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "path=%sì´(ê°€) PowerVM API 참조가 아님" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "path=%sì´(ê°€) PowerVM API 참조가 아님" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "올바르지 ì•Šì€ íŒŒì¼ ë””ìŠ¤í¬ë¦½í„°" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "예ìƒí•œ root_id" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "예ìƒí•œ child_type" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "예ìƒí•˜ì§€ 못한 suffix_type=%s" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "예ìƒí•œ suffix_parm" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "예ìƒí•œ child_id" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "예ìƒí•˜ì§€ 못한 child_id" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "예ìƒí•˜ì§€ 못한 root_id" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "예ìƒí•˜ì§€ 못한 req_method=%s" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "PowerVMì—서 XML ì‘ë‹µì„ êµ¬ë¬¸ ë¶„ì„하는 중 오류 ë°œìƒ: %s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "ì‘ë‹µì´ Atom 피드/í•­ëª©ì´ ì•„ë‹˜" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "ìš”ì²­ì— ëŒ€í•´ 예ìƒí•˜ì§€ 못한 HTTP 204" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "예ìƒí•˜ì§€ 못한 빈 ì‘답 본문" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "요청 í—¤ë”: %(reqheaders)s\n" "\n" "요청 본문: %(reqbody)s\n" "\n" "ì‘답 í—¤ë”: %(respheaders)s\n" "\n" "ì‘답 본문: %(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "%(method)s %(path)sì— ëŒ€í•œ Atom 오류: %(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "ì„¸ì…˜ì´ ì—†ìŒì´ì–´ì•¼ 합니다." #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "세션ì—서 ì´ë²¤íЏ 리스너가 ì´ë¯¸ 활성화ë˜ì–´ 있습니다." #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "ì´ë²¤íЏ 피드 리스너를 초기화하지 못함: %s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "애플리케ì´ì…˜ ID \"%s\"ì´(ê°€) 고유하지 않습니다." #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "종료 중" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "ì´ í•¸ë“¤ëŸ¬ëŠ” ì´ë¯¸ 가입ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "핸들러가 EventHandler여야 합니다." #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "핸들러가 ê°€ìž…ìž ëª©ë¡ì— 없습니다." #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "%sì— ëŒ€í•œ EventListener 종료 중" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "%sì— ëŒ€í•œ EventListener 종료가 완료ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "PowerVM ì´ë²¤íŠ¸ë¥¼ 가져오는 ì¤‘ì— ì˜¤ë¥˜ ë°œìƒ: %s. (pvm-rest 서비스가 중지ë˜ì—ˆìŠµë‹ˆê¹Œ?)" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "예ìƒí•˜ì§€ 못한 EventType=%s" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "PowerVM ì´ë²¤íŠ¸ë¥¼ 처리하는 ì¤‘ì— ì˜¤ë¥˜ ë°œìƒ" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "WWPN %(wwpn)sì— ëŒ€í•´ ì í•©í•œ ë¬¼ë¦¬ì  FC í¬íŠ¸ë¥¼ 유ë„í•  수 없습니다. " " VIOS 확장 ì†ì„± ê·¸ë£¹ì´ ì¶©ë¶„í•˜ì§€ ì•Šì•˜ì„ ìˆ˜ 있습니다. ì¡°íšŒì˜ VIOS URI는" " %(vio_uri)s입니다." #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "요소를 ì°¾ì„ ìˆ˜ ì—†ìŒ: %(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "LPARì„ ì°¾ì„ ìˆ˜ ì—†ìŒ: %(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "어댑터를 ì°¾ì„ ìˆ˜ ì—†ìŒ" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "'%(operation_name)s' ì¡°ìž‘ì´ ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. %(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "'%(operation_name)s' ì¡°ìž‘ì´ ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. %(seconds)dì´ˆì—" " 태스í¬ë¥¼ 완료하는 ë° ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤." #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "해당 RMC ì—°ê²°ì´ í™œì„±ì´ ì•„ë‹ˆë¯€ë¡œ ê°€ìƒ ë¨¸ì‹  %(lpar_nm)sì—서 OS 시스템 종료를 수행할 수 " "없습니다." #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "ê°€ìƒ ë¨¸ì‹  %(lpar_nm)sì˜ ì „ì›ì„ ë„는 ë° ì‹¤íŒ¨í•¨: %(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "ê°€ìƒ ë¨¸ì‹  %(lpar_nm)sì˜ ì „ì› ì°¨ë‹¨ì´ %(timeout)dì´ˆ 후 ì œí•œì‹œê°„ì´ " "초과ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "ê°€ìƒ ë¨¸ì‹  %(lpar_nm)sì˜ ì „ì›ì„ 켜는 ë° ì‹¤íŒ¨í•¨: %(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "ê°€ìƒ ë¨¸ì‹  %(lpar_nm)sì˜ ì „ì› ê³µê¸‰ì´ %(timeout)dì´ˆ 후 ì œí•œì‹œê°„ì´ " "초과ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "다른 ë„¤íŠ¸ì›Œí¬ ë¸Œë¦¿ì§€ì˜ 1ì°¨ VLAN IDì´ë¯€ë¡œ VLAN %(vlan_id)dì„(를)" " 제거할 수 없습니다." #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "VLAN %(vlan_id)dì„(를) 프로비저ë‹í•  수 없습니다. Virtual I/O Server " "%(vios)sì˜ '%(dev_name)s' ìž¥ì¹˜ì— í¬í•¨ëœ 것으로 보입니다. 장치가" " ë„¤íŠ¸ì›Œí¬ ë¸Œë¦¿ì§€(공유 ì´ë”ë„· 어댑터)ì— ì—°ê²°ë˜ì§€ 않았습니다. 계ì†í•˜ê¸° " "ì „ì— ìˆ˜ë™ìœ¼ë¡œ 장치를 제거하거나 ë„¤íŠ¸ì›Œí¬ " "ë¸Œë¦¿ì§€ì— ì¶”ê°€í•˜ì‹­ì‹œì˜¤." #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "ì´ë¦„ì´ %(lu_name)sì¸ ë…¼ë¦¬ 장치가 ì´ë¯¸ 공유 스토리지 " "í’€ %(ssp_name)sì— ìžˆìŠµë‹ˆë‹¤." #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "ê°€ìƒ íŒŒì´ë²„ ì±„ë„ í¬íŠ¸ë¥¼ 맵핑할 ë¬¼ë¦¬ì  í¬íŠ¸ë¥¼ ì°¾ì„ ìˆ˜ 없습니다. " "ì´ëŠ” Virtual I/O Serverê°€ 사용 불가능하거나 ë¬¼ë¦¬ì  íŒŒì´ë²„ ì±„ë„ í¬íŠ¸ì— ëŒ€í•œ í¬íЏ 스펙ì´" " ì ì ˆí•˜ì§€ 않기 때문입니다." #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "ê°€ìƒ ë¨¸ì‹ ì— ëŒ€í•œ ì½˜ì†”ì„ ì‹œìž‘í•  수 없습니다. pypowervm APIê°€" " ë¡œì»¬ì´ ì•„ë‹Œ 모드ì—서 실행 중입니다. ì½˜ì†”ì€ pypowervmì´ PowerVM API와 " "공존하는 경우ì—ë§Œ ë°°ì¹˜ë  ìˆ˜ 있습니다." #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "WrapperTask %(name)sì— í•˜ìœ„ 태스í¬ê°€ 없습니다!" #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "FeedTaskì—서는 피드가 비어 ìžˆì„ ìˆ˜ 없습니다." #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "OSê°€ %(access_file)s 파ì¼ì— 대한 액세스를 거부했습니다." #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "%(access_file)s 파ì¼ì„ ì½ëŠ” ì¤‘ì— OSì—서 I/O 오류가 ë°œìƒí•¨: " "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "마ì´ê·¸ë ˆì´ì…˜ 태스í¬ì— 실패했습니다. %(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "VM %(vm_name)sì— ëŒ€í•œ 로드 소스를 ì°¾ì„ ìˆ˜ ì—†ìŒ" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "hdisk %(dev_name)sì— ëŒ€í•œ pg83 ì¸ì½”ë”©ì„ ìœ ë„í•  수 없습니다. " "parent_entry ì†ì„±ì´ 설정ë˜ì§€ 않았습니다. ì§€ì›ë˜ì§€ 않는 " "특성 ì²´ì¸ì„ 통해 ì–»ì€ PV를 사용하기 ë•Œë¬¸ì¼ ìˆ˜ 있습니다. " "VIOS.phys_vols, VG.phys_vols ë˜ëŠ” " "VIOS.scsi_mappings[n].backing_storage를 통해 PV를 액세스해야 합니다." #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "vSCSI ë§µí•‘ì˜ ìŠ¤í† ë¦¬ì§€ 요소를 다시 맵핑할 수 없습니다. 정확히 " "í•˜ë‚˜ì˜ ì¼ì¹˜í•˜ëŠ” ë§µí•‘ì„ ì˜ˆìƒí–ˆì§€ë§Œ %(num_mappings)d개를 찾았습니다." #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "vSCSI ë§µí•‘ì˜ ìŠ¤í† ë¦¬ì§€ 요소를 다시 맵핑할 수 없습니다. 스토리지 " "요소 %(stg_name)sì— ëŒ€í•œ ë§µí•‘ì´ í´ë¼ì´ì–¸íЏ LPAR %(lpar_uuid)sì— ì´ë¯¸ 있습니다." #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "%(devname)s 장치를 %(count)d번 ì°¾ìŒ. ì´ ìž¥ì¹˜ë¥¼ 최대 한 번 ì°¾ì„ ê²ƒìœ¼ë¡œ " "예ìƒí–ˆìŠµë‹ˆë‹¤." #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "FeedTask %(ft_name)sì—서 여러 예외가 ë°œìƒí–ˆìŠµë‹ˆë‹¤. \n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "정확히 í•˜ë‚˜ì˜ ê´€ë¦¬ íŒŒí‹°ì…˜ì„ ì°¾ì„ ê²ƒì„ ì˜ˆìƒí–ˆì§€ë§Œ %(count)d개를 찾았습니다." #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "%(lpar_id)d ID를 가진 íŒŒí‹°ì…˜ì„ ì •í™•ížˆ 1ê°œ ì°¾ì„ ê²ƒìœ¼ë¡œ 예ìƒí•¨. ì°¾ì€ ê°œìˆ˜: " "%(count)d." #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "공유 스토리지 í’€ %(ssp_name)sì—서 기본 티어를 ì°¾ì„ ìˆ˜ 없었습니다." #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "Virtual I/O Serverì—서 UDIDê°€ %(udid)sì¸ ìž¥ì¹˜ë¥¼ ì°¾ì„ ìˆ˜ " "없었습니다." #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "UDID %(udid)sì˜ ê°€ìƒ ë¨¸ì‹  장치를 ì§€ì›í•˜ê¸°ì—는 Virtual I/O Serverê°€" " 충분하지 않습니다." #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "Virtual I/O Serverì—서 ì˜ˆìƒ íŒ¨ë¸Œë¦­(%(fabrics)s)ì„ ì°¾ì„ ìˆ˜ " "없습니다." #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "ê°€ìƒ ë¨¸ì‹ ì„ ìž¬ë¹Œë“œí•  수 없습니다. %(io_type)sì˜ I/O ìœ í˜•ì„ ì‚¬ìš© 중ì´ë©° " "ì´ëŠ” VM ìž¬ë¹Œë“œì— ì§€ì›ë˜ì§€ 않습니다." #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "ëŒ€ìƒ ì‹œìŠ¤í…œì˜ VFC 슬롯 수(%(rebuild_slots)d)ê°€" " í´ë¼ì´ì–¸íЏ 시스템(%(original_slots)d)ì˜ ìŠ¬ë¡¯ 수와 ì¼ì¹˜í•˜ì§€ 않습니다. " "ì´ ì‹œìŠ¤í…œì—서 ì´ ê°€ìƒ ë¨¸ì‹ ì„ ìž¬ë¹Œë“œí•  수 없습니다." #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "ë„¤íŠ¸ì›Œí¬ ìž¥ì¹˜ì˜ ìŠ¬ë¡¯ 정보를 등ë¡í•˜ë ¤ë©´ CNA ë˜ëŠ” VNIC 어댑터가 " "필요합니다. 대신 다ìŒì´ 제공ë¨: %(wrapper)s." #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "사용 가능한 활성 Virtual I/O Serverê°€ 충분하지 않습니다. 예ìƒì€ " "%(exp)dê°œì´ë‚˜ %(act)d개를 찾았습니다." #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "Virtual I/O Server를 사용할 수 없습니다. %(wait_time)dì´ˆ ë™ì•ˆ VIOSê°€ " "í™œì„±ì´ ë˜ê¸°ë¥¼ 기다렸습니다. PowerVM NovaLink ë° " "Virtual I/O Server ê°„ì˜ RMC ì—°ê²°ì„ í™•ì¸í•˜ì‹­ì‹œì˜¤." #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "Sriov ëª¨ë“œì— ìžˆê³  실행 중 ìƒíƒœì¸ SR-IOV 어댑터를 ì°¾ì„ ìˆ˜ 없었습니다.\n" "위치 | 모드 | ìƒíƒœ\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "%(red)dì˜ ì¤‘ë³µì„± ìš”êµ¬ì‚¬í•­ì„ ìˆ˜í–‰í•  수 없습니다. %(found_vfs)d" " 실행 가능한 백업 장치를 찾았습니다." #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "관리 ì‹œìŠ¤í…œì´ vNIC 가능하지 않습니다." #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "활성 vNIC 가능 VIOSê°€ 없습니다." #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "%(red)dì˜ ì¤‘ë³µì„±ì´ ì§€ì •ë˜ì—ˆì§€ë§Œ 관리 ëŒ€ìƒ ì‹œìŠ¤í…œì´ vNIC" " 장애 복구를 사용할 수 없습니다." #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "%(red)dì˜ ì¤‘ë³µì„±ì´ ì§€ì •ë˜ì—ˆì§€ë§Œ 활성 vNIC 장애 복구 " "가능 VIOSê°€ 없습니다." #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "ê°€ìƒ ê´‘í•™ 매체가 ì €ìž¥ë  ë³¼ë¥¨ 그룹 %(vol_grp)sì„(를) " "ì°¾ì„ ìˆ˜ 없습니다. 매체 저장소를 작성할 수 없습니다." #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "vNICì—서 사용 ì¤‘ì¸ í•˜ë‚˜ ì´ìƒì˜ SR-IOV ë¬¼ë¦¬ì  í¬íŠ¸ì— ëŒ€í•œ ë³€ê²½ì´ ìš”ì²­ë˜ì—ˆìœ¼ë¯€ë¡œ" " 관리 시스템 ì—…ë°ì´íŠ¸ê°€ 시ë„ë˜ì§€ 않았습니다.\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "VNC 기반 ê°€ìƒ í„°ë¯¸ë„ì„ ìž‘ì„±í•  수 ì—†ìŒ: %(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "어댑터 ìºì‹œê°€ ì§€ì›ë˜ì§€ 않습니다." #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "'%(enum)s'ì— ì˜¬ë°”ë¥´ì§€ ì•Šì€ ê°’ '%(value)s'입니다. 올바른 ê°’ì€ ë‹¤ìŒê³¼ 같습니다. " "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "ì´ë¦„ì´ %(vios_name)sì¸ VIOS를 ì°¾ì„ ìˆ˜ 없습니다." #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "ì´ë¦„ì´ %(vg_name)sì¸ ë³¼ë¥¨ ê·¸ë£¹ì„ ì°¾ì„ ìˆ˜ 없습니다." #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "ì´ë¦„ì´ %(part_name)sì¸ íŒŒí‹°ì…˜ì€ IBMi íŒŒí‹°ì…˜ì´ ì•„ë‹™ë‹ˆë‹¤." #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "PanelJob 기능 파티션 ì¸ìˆ˜ê°€ 비어 있습니다." #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "íŒ¨ë„ ê¸°ëŠ¥ ì¡°ìž‘ %(op_name)sì´(ê°€) 올바르지 않습니다. %(valid_ops)s 중 하나가 " "예ìƒë©ë‹ˆë‹¤." #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "VIOS %(vios_uuid)sì— ëŒ€í•œ ISCSI를 검색할 수 없습니다. 리턴 코드: %(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "VIOS %(vios_uuid)sì— ëŒ€í•œ ISCSI를 로그아웃할 수 없습니다. 리턴 코드: %(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "VIOS %(vios_uuid)sì— ëŒ€í•œ ISCSI 제거가 실패했습니다. 리턴 코드: %(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "Vstor %(stor_udid)sì´(ê°€) VIOS %(vios_uuid)sì— ëŒ€í•´ 발견ë˜ì§€ 않았습니다." #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "ì œì•ˆëœ í™•ìž¥ ì†ì„± 그룹 '%(arg_xag)s'ì´(ê°€) 기존 " "확장 ì†ì„± 그룹 '%(path_xag)s'ê³¼(와) ì¼ì¹˜í•˜ì§€ 않습니다." #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "ì¸ì¦ì„œê°€ 만료ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "ì ‘ë‘ë¶€ ë° ì ‘ë¯¸ë¶€ëŠ” ëª¨ë‘ %dìžë¥¼ 초과하지 않아야 합니다." #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "ì´ ê¸¸ì´ëŠ” 한 ë¬¸ìž ì´ìƒì´ì–´ì•¼ 합니다." #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "ì´ë¦„ 매개변수 길ì´ëŠ” 한 ë¬¸ìž ì´ìƒì´ì–´ì•¼ 합니다." #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "trunk_okê°€ Falseì¸ ê²½ìš° ì´ë¦„ 매개변수는 %dìžë¥¼ 초과해서는 안ë©ë‹ˆë‹¤." #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "ê°œë°œìž ì˜¤ë¥˜: 부분 ìƒìœ„ 스펙." #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "ê°œë°œìž ì˜¤ë¥˜: parent_typeì€ ë¬¸ìžì—´ 스키마 ë˜ëŠ” ëž©í¼ " "서브í´ëž˜ìŠ¤ì—¬ì•¼ 합니다." #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "'%(bad_val)s' ê°’ì´ ì˜¬ë°”ë¥´ì§€ 않습니다. %(good_vals)s 중 하나 ë˜ëŠ” 목ë¡ì„ 예ìƒí–ˆìŠµë‹ˆë‹¤." #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "요청: %s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "ì‘답: %s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "ì§„í–‰ ì¤‘ì¸ ì—…ë¡œë“œê°€ 완료ë˜ê¸°ë¥¼ 기다리는 중입니다. 마커 LU: %s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "ì§„í–‰ ì¤‘ì¸ ì—…ë¡œë“œë¥¼ 위해 í¬ê¸°í•˜ëŠ” 중입니다." #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "마커 %sì„(를) 위해 업로드를 í¬ê¸°í•˜ëŠ” 중입니다." #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "ì´ë¯¸ ì—…ë¡œë“œëœ ì´ë¯¸ì§€ LU %sì„(를) 사용 중입니다." #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "마커 LU %sì„(를) 작성하는 중" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "ì´ë¯¸ì§€ LU %(lu)s(마커 %(mkr)s)ì— ì—…ë¡œë“œí•˜ëŠ” 중입니다." #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "실패한 LU %sì„(를) 제거하는 중입니다." #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "시스템ì—서 ê°€ìƒ ìŠ¤ìœ„ì¹˜ %sì„(를) ì°¾ì„ ìˆ˜ 없습니다." #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "ê°€ìƒ ìŠ¤ìœ„ì¹˜ %sì— ëŒ€í•œ 올바른 VLANì„ ì°¾ì„ ìˆ˜ 없습니다." #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "UUIDê°€ '%(host)s'ì¸ í˜¸ìŠ¤íŠ¸ì— ëŒ€í•œ 호스트 메모리 오버헤드를 가져오는 ì¤‘ì— ì˜¤ë¥˜ ë°œìƒ: " "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "Virtual I/O Serverì—서 ì „ì›ì´ 켜져 있는 모든 RMC ìƒíƒœê°€ í™œì„±ì´ ë˜ê¸°ë¥¼ 기다리는 ë™ì•ˆ " "ì œí•œì‹œê°„ì´ ì´ˆê³¼ë˜ì—ˆìŠµë‹ˆë‹¤. 대기한 시간: %(time)dì´ˆ. í™œì„±ì´ ë˜ì§€ ì•Šì€ " "VIOS: %(vioses)s." #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "ì„¤ëª…ì´ ì—†ëŠ” ìŠ¬ë¡¯ì´ ë¬¼ë¦¬ì  I/Oë¼ê³  간주함: %s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "파티션 %sì´(ê°€) ì´ë¯¸ ì „ì›ì´ 공급ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "파티션 %sì´(ê°€) ì´ë¯¸ ì „ì›ì´ 차단ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "사전으로 add_parmsì„ ì§€ì •í•˜ëŠ” ë°©ë²•ì€ ë” ì´ìƒ 사용ë˜ì§€ 않습니다. 대신 %s ì¸ìŠ¤í„´ìŠ¤ë¥¼ " "지정하십시오." #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "IBMi OS ì •ìƒ ì‹œìŠ¤í…œ ì¢…ë£Œì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. OS 즉시 시스템 종료를 ì‹œë„ ì¤‘ìž…ë‹ˆë‹¤. " "파티션: %s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "IBMi OS 즉시 시스템 ì¢…ë£Œì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. VSP ì •ìƒ ì‹œìŠ¤í…œ 종료를 ì‹œë„ ì¤‘ìž…ë‹ˆë‹¤. " "파티션: %s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "비IBMi OS 즉시 시스템 종료가 ì œí•œì‹œê°„ì„ ì´ˆê³¼í–ˆìŠµë‹ˆë‹¤. VSP 하드 시스템 종료를 ì‹œë„ ì¤‘ìž…ë‹ˆë‹¤. " "파티션: %s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "비IBMi OS 즉시 시스템 ì¢…ë£Œì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. VSP ì •ìƒ ì‹œìŠ¤í…œ 종료를 ì‹œë„ ì¤‘ìž…ë‹ˆë‹¤. " "파티션: %s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "기본 제한시간으로 VSP 하드 시스템 종료합니다. 파티션: %s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "SCSI 맵핑 ìˆ˜ì •ì„ ìž¬ì‹œë„ ì¤‘ìž…ë‹ˆë‹¤." #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Virtual I/O Server %(vios_name)sì—서 í´ë¼ì´ì–¸íЏ LPAR %(lpar_uuid)s(으)로 %(stg_type)s 스토리지 요소 %(stg_name)sì˜ ê¸°ì¡´ ë§µí•‘ì„ " "찾았습니다." #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Virtual I/O Server %(vios_name)sì—서 í´ë¼ì´ì–¸íЏ LPAR %(lpar_uuid)s(으)로 %(stg_type)s 스토리지 요소 %(stg_name)sì˜ ë§µí•‘ì„ " "작성 중입니다." #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "match_func ë° stg_elemì„ ëª¨ë‘ ì§€ì •í•´ì„œëŠ” 안ë©ë‹ˆë‹¤." #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "register_cna 메소드는 ë” ì´ìƒ 사용ë˜ì§€ 않습니다! register_vnet 메소드를 " "사용하십시오." #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "drop_cna 메소드는 ë” ì´ìƒ 사용ë˜ì§€ 않습니다. drop_vnet 메소드를 사용하십시오." #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "위치 %(loc_code)sì˜ SR-IOV ë¬¼ë¦¬ì  í¬íŠ¸ê°€ LPAR %(lpar_name)sì— ì†í•˜ëŠ” vNICì„" " 백업합니다(LPAR UUID: %(lpar_uuid)s, vNIC UUID: " "%(vnic_uuid)s)." #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "vNICì—서 사용 중ì´ì§€ë§Œ ë‹¤ìŒ SR-IOV ë¬¼ë¦¬ì  í¬íЏ ë ˆì´ë¸”ì„ " "변경하는 중입니다." #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "UUID %sì¸ vio_fileì„ ì‚­ì œí•˜ëŠ” ë° ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. 수ë™ìœ¼ë¡œ 삭제해야 합니다." #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "업로드 ì¤‘ì— ë¬¸ì œê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤. 재시ë„합니다." #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "crt_lu_linked_clone 메소드는 ë” ì´ìƒ 사용ë˜ì§€ 않습니다! crt_lu " "메소드(clone=src_lu, size=lu_size_gb)를 사용하십시오." #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "ë””ìŠ¤í¬ ë…¼ë¦¬ 장치 %(luname)sì— ë°±ì—… ì´ë¯¸ì§€ LUê°€ 없습니다. (UDID: %(udid)s) " #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "íŒŒì¼ ì—…ë¡œë“œì—서 새 vDisk를 ì°¾ì„ ìˆ˜ 없습니다." #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "UDIDê°€ 없기 ë•Œë¬¸ì— ìž¥ì¹˜ë¥¼ 무시하는 중:\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "%s 장치가 목ë¡ì— 없습니다." #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "볼륨 그룹 %(vg)sì—서 ê°€ìƒ ë””ìŠ¤í¬ %(vdisk)s ì‚­ì œ 중" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "볼륨 그룹 %(vg)sì—서 ê°€ìƒ ê´‘í•™ 장치 %(vopt)s ì‚­ì œ 중" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "LU %(lu_name)s(UDID %(lu_udid)s) 제거 중" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "LU %(lu_name)sì„(를) ì°¾ì„ ìˆ˜ 없습니다. 밴드ì—서 ì‚­ì œë˜ì—ˆì„ 수 있습니다. " "(UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "ì´ë¯¸ì§€ LU %(lu_name)sì´(ê°€) ë” ì´ìƒ 사용ë˜ì§€ 않으므로 ì´ë¥¼ 제거하는 중입니다. (UDID: " "%(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "백업 LU %(lu_name)sì„(를) ì°¾ì„ ìˆ˜ 없습니다. (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "ê°œë°œìž ì˜¤ë¥˜: tier ë˜ëŠ” lufeedê°€ 필요합니다." #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "ê°œë°œìž ì˜¤ë¥˜: lufeed 매개변수가 LUEnt EntryWrapper를 구성해야 합니다." #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "LU %(lu_name)s(UDID: %(lu_udid)s) ì‚­ì œ 중" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "LU %(lu_name)sì— ëŒ€í•œ HttpError를 무시합니다. 밴드ì—서 ì‚­ì œë˜ì—ˆì„ 수 있습니다." " (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "%(num_maps)dê°œì˜ orphan %(stg_type)s ë§µí•‘ì„ VIOS %(vios_name)sì—서 " "%(vios_name)s." #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "VIOS %(vios_name)sì—서 í¬íЏ 없는 %(num_maps)d VFC ë§µí•‘ì„ ì œê±°í•˜ëŠ” 중입니다." #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "LPAR ID %(lpar_id)dê³¼(와) ì—°ê´€ëœ %(num_maps)dê°œì˜ %(stg_type)s ë§µí•‘ì„ " "VIOS %(vios_name)sì—서 제거 중입니다." #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "사용 중ì¸ì§€ íŒë³„í•  수 없기 ë•Œë¬¸ì— %(stg_type)s ìœ í˜•ì˜ %(stg_name)s " "스토리지를 제거하지 않습니다. 수ë™ìœ¼ë¡œ ìœ íš¨ì„±ì„ í™•ì¸í•˜ê³  " "정리해야 í•  ìˆ˜ë„ ìžˆìŠµë‹ˆë‹¤." #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "스토리지 제거ì—서 %(stg_name)s 스토리지 요소가 예ìƒí•˜ì§€ 못한 " "%(stg_type)s 유형ì´ê¸° ë•Œë¬¸ì— ë¬´ì‹œí•©ë‹ˆë‹¤." #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "VIOS %(vios)sì—서 ë‹¤ìŒ %(vdcount)d ê°€ìƒ ë””ìŠ¤í¬ ì œê±° 중: " "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "VIOS %(vios)sì—서 ë‹¤ìŒ %(vocount)d ê°€ìƒ ê´‘í•™ 제거 중: " "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "해당 LPARê°€ 있으므로 LPAR ID %(lpar_ids)sì— ëŒ€í•œ VIOS %(vios_name)sì—서 " " %(stg_type)s ë§µí•‘ì˜ ì œê±°ë¥¼ 건너뛰는 중입니다." #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "ì ì ˆí•œ VIOS를 ì°¾ì„ ìˆ˜ 없습니다. ì œê³µëœ íŽ˜ì´ë¡œë“œê°€ 충분하지 " "ì•Šì•˜ì„ ìˆ˜ 있습니다. 페ì´ë¡œë“œ ë°ì´í„°ëŠ” 다ìŒê³¼ 같습니다.\n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "ì¼ì¹˜í•˜ëŠ” VFC í¬íЏ ë§µì— ë°±ì—… í¬íЏ 세트가 없습니다. í´ë¼ì´ì–¸íЏ wwpns: %(wwpns)sì— ëŒ€í•œ " "ë§µí•‘ì— %(port)s 추가" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "ê°€ìƒ ê´‘í•™ 매체 저장소를 조회하는 ì¤‘ì— ì˜¤ë¥˜ê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤. " "ê°€ìƒ ê´‘í•™ 매체 ì €ìž¥ì†Œì™€ì˜ ì—°ê²°ì„ ë‹¤ì‹œ 설정하려고 시ë„하는 " "중입니다." #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "vtermì„ ë‹«ì„ ìˆ˜ 없습니다." #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "vterm ì—´ê¸°ì˜ ì¶œë ¥ì´ ì˜¬ë°”ë¥´ì§€ 않습니다. vtermì„ ë‹¤ì‹œ 설정하십시오. 오류는 %s입니다." #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "VNCSocket 리스너가 다ìŒì„ ì²­ì·¨ 중임: ip=%(ip)s í¬íЏ=%(port)s" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "VNC ë¦¬í”¼í„°ì˜ SSL ì¡°ì • ì¤‘ì— ì˜¤ë¥˜ ë°œìƒ: %s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "hdisk ê²€ìƒ‰ì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. LPAR ID %sì˜ ì‹œê°„ì´ ê²½ê³¼ëœ(stale) 스토리지를 제거하고 " "재시ë„합니다." #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "LUA를 복구했습니다. ë°œê²¬ëœ ìž¥ì¹˜: %s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "ITL 오류 ë°œìƒ: %s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "%s 장치가 현재 사용 중입니다." #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "%s 장치가 알 수 없는 UDID로 검색ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "장치 검색 실패: %s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "CLIRunner 오류: %s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "QUERY_INVENTORY LUARecovery ìž‘ì—…ì´ ì„±ê³µí–ˆì§€ë§Œ, ê²°ê³¼ì— " "OutputXML ë˜ëŠ” StdOutì´ í¬í•¨ë˜ì§€ 않습니다." #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORYì—서 XMLì˜ ì˜¬ë°”ë¥´ì§€ ì•Šì€ ì²­í¬(%(chunk)s)를 ìƒì„±í–ˆìŠµë‹ˆë‹¤. 오류: %(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "XML 출력ì—서 pg83 디스í¬ë¦½í„°ë¥¼ 찾지 못함:\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "ISCSI ëª…ë ¹ì´ ì™„ë£Œë¨" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "ISCSI ì„¸ì…˜ì´ ì´ë¯¸ 있으며 로그ì¸ë¨" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "ì§€ì›ë˜ì§€ 않는 VIOS, 호스트ì—서 ISCSI ëª…ë ¹ì„ ìˆ˜í–‰í–ˆìŠµë‹ˆë‹¤." #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "ISCSI ê²€ìƒ‰ì„ í†µí•´ ODM ë°ì´í„°ë² ì´ìФì—서 ì‹œê°„ì´ ê²½ê³¼ëœ(stale) í•­ëª©ì„ ì°¾ì•˜ìŠµë‹ˆë‹¤." #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "ISCSI ì„¸ì…˜ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ " #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "오í¼ë ˆì´ì…˜ì„ 실행할 레코드/대ìƒ/세션/í¬í„¸ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ" #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "ISCSI ëª…ë ¹ì´ ë‚´ë¶€ 오류 ìƒíƒœ = %s와 함께 실패함" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "ISCSI ì¼ë°˜ 오류 코드" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "ISCSI 세션 ë¡œê·¸ì¸ ì‹¤íŒ¨" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "ISCSI ëª…ë ¹ì— ì˜¬ë°”ë¥´ì§€ ì•Šì€ ì¸ìˆ˜ê°€ 있ìŒ" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "ì—°ê²°ì„ ì‹œë„하는 ì¤‘ì— ISCSI ì—°ê²° 타ì´ë¨¸ê°€ 만료ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "ISCSI ëª…ë ¹ì´ í˜¸ìŠ¤íŠ¸ë¥¼ 검색할 수 ì—†ìŒ" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "ISCSI ëª…ë ¹ì´ ì˜ˆìƒì¹˜ 못한 ìƒíƒœ = %sì„(를) 리턴함" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "ISCSI ëª…ë ¹ì´ ì§€ì›ë˜ì§€ 않는 VIOSì—서 ìˆ˜í–‰ë¨ " #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "LPAR IDs %sì— ëŒ€í•œ ì‹œê°„ì´ ê²½ê³¼ëœ(stale) 스토리지를 제거하고 iSCSI ê²€ìƒ‰ì„ ìž¬ì‹œë„하십시오." #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "메트릭 ë°ì´í„°ë¥¼ 사용할 수 없습니다. ì´ëŠ” ë©”íŠ¸ë¦­ì´ ìµœê·¼ì— ì´ˆê¸°í™”ë˜ì—ˆê¸° ë•Œë¬¸ì¼ ìˆ˜ " "있습니다." #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "테스트입니다." #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "다ìŒì€ 변환ë˜ì§€ ì•Šì€ ë©”ì‹œì§€ìž…ë‹ˆë‹¤." #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "프로세서 단위 ì¸ìˆ˜ëŠ” 0.05 - 1.0 사ì´ì—¬ì•¼ 합니다. ê°’: %s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "논리 파티션 ì´ë¦„ì˜ ê¸¸ì´ê°€ 올바르지 않습니다. ì´ë¦„: %s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "'%(field)s' í•„ë“œì˜ ê°’ì´ ì˜¬ë°”ë¥´ì§€ 않ìŒ: '%(value)s'" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "ì—†ìŒ ê°’ì€ ì˜¬ë°”ë¥´ì§€ 않습니다." #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "ê°’ '%(value)s'ì´(ê°€) 허용 가능한 ì„ íƒí•­ëª© %(choices)sì´(ê°€) 있는 " "'%(field)s' í•„ë“œì— ëŒ€í•´ 올바르지 않습니다." #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "필드 '%(field)s'ì— ìµœì†Œê°’ ë¯¸ë§Œì˜ ê°’ì´ ìžˆìŠµë‹ˆë‹¤. ê°’: %(value)s, " "최소값: %(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "필드 '%(field)s'ì— ìµœëŒ€ê°’ì„ ì´ˆê³¼í•˜ëŠ” ê°’ì´ ìžˆìŠµë‹ˆë‹¤. ê°’: %(value)s, " "최대값: %(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "%(desired_field)s'ì˜ ê°’ì´ '%(max_field)s' 값보다 í½ë‹ˆë‹¤. " "ì›í•˜ëŠ” ê°’: %(desired)s 최대값: %(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "%(desired_field)s'ì˜ ê°’ì´ '%(min_field)s' 값보다 작습니다. " "ì›í•˜ëŠ” ê°’: %(desired)s 최소값: %(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "메모리 ê°’ì´ í˜¸ìŠ¤íŠ¸ì˜ ë…¼ë¦¬ 메모리 ë¸”ë¡ í¬ê¸°(%(lmb_size)s)ì˜ ë°°ìˆ˜ê°€ " "아닙니다. ê°’: %(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "관리 ëŒ€ìƒ ì‹œìŠ¤í…œì´ í™œì„± 메모리 í™•ìž¥ì„ ì§€ì›í•˜ì§€ 않습니다. " "확장 계수 ê°’ '%(value)s'ì´(ê°€) 올바르지 않습니다." #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "활성 메모리 확장 ê°’ì€ 1.0 ì´ìƒ ë° 10.0 ì´í•˜ì—¬ì•¼ 합니다. " "0 ê°’ì€ ìœ íš¨í•˜ê³  AMEê°€ í•´ì œë˜ì–´ 있ìŒì„ 나타냅니다." " AMEê°€ 오프 ìƒíƒœìž…니다. '%(value)s'ì´(ê°€) 올바르지 않습니다." #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "URI %(uri)sì— ëŒ€í•´ ì´ %(total)d회 중 %(retry)d회를 시ë„합니다. 오류는 알려진 " "ìž¬ì‹œë„ ì‘답 코드입니다. %(resp_code)s" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "%(total)d번 중 %(retry)d번 시ë„ì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. 재시ë„합니다. 예외:\n" " %(except)s." #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "EntryWrapper ë˜ëŠ” EntryWrapperGetter를 제공해야 합니다." #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "올바른 하위 태스í¬ë¥¼ 제공해야 합니다." #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "'provides' ì´ë¦„ %sì´(ê°€) 중복ë©ë‹ˆë‹¤." #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %sì— í•˜ìœ„ 태스í¬ê°€ 없습니다. ì¡°ìž‘ì´ ì‹¤í–‰ë˜ì§€ 않습니다." #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "EntryWrappers ë˜ëŠ” FeedGetterì˜ ëª©ë¡ì„ 제공해야 합니다." #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %sì— í•˜ìœ„ 태스í¬ê°€ 없습니다. ì¡°ìž‘ì´ ì‹¤í–‰ë˜ì§€ 않습니다." #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "FeedTask %sì—서 여러 예외가 ë°œìƒí–ˆìŠµë‹ˆë‹¤. ì´ëŠ” ì•„ëž˜ì— ê°œë³„ì ìœ¼ë¡œ" " 로깅ë©ë‹ˆë‹¤." #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "ê°€ìƒ ë¨¸ì‹  '%(instance_name)s'ì— ëŒ€í•œ 호스트ì—서 사용 가능한 %(res_name)sì´(ê°€) 충분하지 " "않ìŒ(%(requested)s 요청ë¨, %(avail)s 사용 가능)" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "메모리" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "최소 ë˜ëŠ” 최대 프로세서를 변경하기 ì „ì— ë¨¼ì € ê°€ìƒ ë¨¸ì‹ ì˜ ì „ì›ì„ " "꺼야 합니다. ê°€ìƒ ë¨¸ì‹  %sì˜ ì „ì›ì„ ë„ê³  다시 시ë„하십시오." #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "확장 ìš”ì¸ì„ 변경하기 ì „ì— ê°€ìƒ ë¨¸ì‹ ì˜ ì „ì›ì„ 꺼야 합니다. " " ê°€ìƒ ë¨¸ì‹  %sì˜ ì „ì›ì„ ë„ê³  다시 시ë„하십시오." #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "CPU" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "처리 장치" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "최소 ë˜ëŠ” 최대 프로세서를 변경하기 ì „ì— ë¨¼ì € ê°€ìƒ ë¨¸ì‹ ì˜ ì „ì›ì„ " "꺼야 합니다. ê°€ìƒ ë¨¸ì‹  %sì˜ ì „ì›ì„ ë„ê³  다시 시ë„하십시오." #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "최소 ë˜ëŠ” 최대 프로세서를 변경하기 ì „ì— ë¨¼ì € ê°€ìƒ ë¨¸ì‹ ì˜ ì „ì›ì„ " "꺼야 합니다. ê°€ìƒ ë¨¸ì‹  %sì˜ ì „ì›ì„ ë„ê³  다시 시ë„하십시오." #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "프로세서 호환 모드를 변경하기 ì „ì— ë¨¼ì € ê°€ìƒ ë¨¸ì‹ ì˜ ì „ì›ì„ " "꺼야 합니다. ê°€ìƒ ë¨¸ì‹  %sì˜ ì „ì›ì„ ë„ê³  다시 시ë„하십시오." #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "처리 모드를 변경하기 ì „ì— ë¨¼ì € ê°€ìƒ ë¨¸ì‹ ì˜ ì „ì›ì„ 꺼야 합니다. " "들어가지 못하게 했습니다. ê°€ìƒ ë¨¸ì‹  %sì˜ ì „ì›ì„ ë„ê³  다시 시ë„하십시오." #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "ì›í•˜ëŠ” 프로세서(%(vcpus)d)ê°€ ê°€ìƒ ë¨¸ì‹  '%(instance_name)s'ì— " "대해 파티션당 허용ë˜ëŠ” 최대 프로세서 수(%(max_allowed)d)를 " "'%(instance_name)s'." #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "최대 프로세서(%(vcpus)d)ê°€ ê°€ìƒ ë¨¸ì‹  '%(instance_name)s'ì— ëŒ€í•œ " "최대 시스템 용량 프로세서 한계(%(max_allowed)d)를 " "'%(instance_name)s'." #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "단순 ì›ê²© 다시 시작 ê¸°ëŠ¥ì„ ë³€ê²½í•˜ê¸° ì „ì— ë¨¼ì € ê°€ìƒ ë¨¸ì‹ ì˜ ì „ì›ì„ " "꺼야 합니다. ê°€ìƒ ë¨¸ì‹  %sì˜ ì „ì›ì„ ë„ê³  다시 시ë„하십시오." #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "íŒŒí‹°ì…˜ì— í™œì„± RMC ì—°ê²°ì´ ì—†ìŠµë‹ˆë‹¤." #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "íŒŒí‹°ì…˜ì— %sì— ëŒ€í•œ 활성 DLPAR ê¸°ëŠ¥ì´ ì—†ìŠµë‹ˆë‹¤." #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "I/O" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "메모리" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "프로세서" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "올바르지 ì•Šì€ KeylockPos %s입니다." #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "올바르지 ì•Šì€ BootMode %s입니다." #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "IOSlot.adapterê°€ ë” ì´ìƒ 사용ë˜ì§€ 않습니다. 대신 IOSlot.io_adapter를 사용하십시오." #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "마스터 관리 콘솔 MTMS(머신 유형, 모ë¸, ì¼ë ¨ 번호)를 " "%(identifier)sì—서 확ì¸í•  수 없습니다. í’€ì— ëŒ€í•œ 마스터 콘솔로 í‘œì‹œëœ %(param)sì´(ê°€)" " 없기 때문입니다." #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "UUID를 설정할 수 없습니다." #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "오브ì íЏ %(pvmobject)sì˜ %(property_name)s='%(value)s'ì„(를) 변환할 수 ì—†ìŒ" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "다중 ë§í¬ì—서 href 설정 ê±°ë¶€ 중입니다.\n" "경로: %{path}s\n" "ì°¾ì€ ë§í¬ 수: %{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "태그가 없는 ìš”ì†Œì˜ êµ¬ì„± ë° ëž©í•‘ì„ ê±°ë¶€ 중입니다." #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "ì‘답ì—서 'entry' íŠ¹ì„±ì´ ëˆ„ë½ë˜ì—ˆìŠµë‹ˆë‹¤." #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "랩핑할 ì‘답 ë˜ëŠ” í•­ëª©ì„ ì œê³µí•´ì•¼ 합니다. %sì„(를) 가져왔습니다." #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "ê°œë°œìž ì˜¤ë¥˜: CHILD 오브ì íŠ¸ë¥¼ 검색하려면 'parent' ë˜ëŠ”" " ('parent_type' ë° 'parent_uuid')를 지정하십시오." #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "ROOT 오브ì íŠ¸ë¥¼ 요청할 경우 'uuid' ë˜ëŠ” 'root_id'를 지정하십시오." #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "CHILD 피드 ë˜ëŠ” í•­ëª©ì„ ê²€ìƒ‰í•  경우 parent_type ë° parent_uuidê°€ 둘 다 " "필요합니다." #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "parent_uuid 매개변수를 통해 ìƒìœ„ì˜ UUID를 지정하십시오." #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "CHILD 오브ì íŠ¸ë¥¼ 요청할 경우 'uuid' ë˜ëŠ” 'child_id'를 지정하십시오." #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "ìƒìœ„ ìœ í˜•ì´ ì—†ì´ ì§€ì •ëœ ìƒìœ„ UUID입니다." #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "search() 메소드ì—는 정확히 í•˜ë‚˜ì˜ í‚¤=ê°’ ì¸ìˆ˜ê°€ 필요합니다." #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "ëž©í¼ í´ëž˜ìФ %(class)sì´(ê°€) 검색 키 '%(key)s'ì„(를) ì§€ì›í•˜ì§€ 않습니다." #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "EntryWrapper.updateì— ëŒ€í•œ 'xag' 매개변수는 ë” ì´ìƒ 사용ë˜ì§€ 않습니다! 최ìƒì˜ 경우, ì´ë¥¼ 사용하면" " ì¡°ìž‘ ì—†ìŒ ê²°ê³¼ê°€ ë°œìƒí•©ë‹ˆë‹¤. ìµœì•…ì˜ ê²½ìš° êµì •í•  수 없는 etag 불ì¼ì¹˜ 오류가 " "ë°œìƒí•©ë‹ˆë‹¤." #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "해당 하위 요소가 없습니다." #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "메타ë°ì´í„°ê°€ 없는 ëž©í¼ì—서 UUID를 설정할 수 없습니다." #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "UUID ê°’ì´ ì˜¬ë°”ë¥´ì§€ 않ìŒ: %s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "ëž©í¼ ì„œë¸Œí´ëž˜ìŠ¤ë¥¼ 지정해야 합니다." #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "ìƒìœ„ í´ëž˜ìФ ë° ìƒìœ„ UUID를 ëª¨ë‘ ì§€ì •í•˜ê±°ë‚˜ ëª¨ë‘ ì§€ì •í•˜ì§€ 않아야 합니다." #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "%(time)iì´ˆ ë™ì•ˆ %(job_id)s 작업 ëª¨ë‹ˆí„°ë§ ì¤‘." #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "작업 %(job_id)sì— ëŒ€í•œ 취소 ìš”ì²­ì„ ì‹¤í–‰í•˜ëŠ” 중입니다. ì¢…ë£Œë  ë•Œê¹Œì§€ ìž‘ì—…ì´ ë¬´í•œì •" " í´ë§ë©ë‹ˆë‹¤." #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "%s ìž‘ì—…ì´ ì‚­ì œë˜ì§€ 않았습니다. ìž‘ì—…ì´ ì‹¤í–‰ 중 ìƒíƒœìž…니다." #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "LPARì´ í™œì„± ìƒíƒœê°€ 아닙니다." #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "ëŒ€ìƒ ì‹œìŠ¤í…œì— IBM i LPAR ì´ë™ ê¸°ëŠ¥ì´ ì—†ìŠµë‹ˆë‹¤." #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "IBM i LPARì—서는 I/Oê°€ 제한ë˜ì§€ 않습니다." #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "소스 ì‹œìŠ¤í…œì— IBM i LPAR ì´ë™ ê¸°ëŠ¥ì´ ì—†ìŠµë‹ˆë‹¤." #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "LPARì— í™œì„± RMC ì—°ê²°ì´ ì—†ìŠµë‹ˆë‹¤." #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "LPARì€ ê´€ë¦¬ 파티션임" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "DLPAR ê¸°ëŠ¥ì´ ëˆ„ë½ë˜ì–´ LPARì„ LPMì— ì‚¬ìš©í•  수 없습니다." #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "ì´ëŠ” 찾고 있는 íŠ¹ì„±ì´ ì•„ë‹™ë‹ˆë‹¤. NovaLink 환경ì—서 " "srr_enabled를 사용하십시오." #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "올바르지 ì•Šì€ IPLSrc %s입니다." #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "ì´ íŠ¹ì„±ì€ ë” ì´ìƒ 사용ë˜ì§€ 않습니다! 대신 pci_subsys_dev_id를 사용하십시오." #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "ì´ íŠ¹ì„±ì€ ë” ì´ìƒ 사용ë˜ì§€ 않습니다! 대신 pci_rev_id를 사용하십시오." #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "ì´ íŠ¹ì„±ì€ ë” ì´ìƒ 사용ë˜ì§€ 않습니다! 대신 pci_subsys_vendor_id를 사용하십시오." #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "ì´ íŠ¹ì„±ì€ ë” ì´ìƒ 사용ë˜ì§€ 않습니다! 대신 drc_index를 사용하십시오." #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "ì´ íŠ¹ì„±ì€ ë” ì´ìƒ 사용ë˜ì§€ 않습니다! 대신 drc_nameì„ ì‚¬ìš©í•˜ì‹­ì‹œì˜¤." #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "CNA.createì— ëŒ€í•´ 올바르지 ì•Šì€ ìƒìœ„ 스펙입니다." #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PVê°€ pg83 디스í¬ë¦½í„° \"%(pg83_raw)s\"ì„(를) ì¸ì½”딩했지만 디코드하는 ë° " "(%(type_error)s)." #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "VIOS EntryWrapper í´ëž˜ìŠ¤ì˜ 'xags' íŠ¹ì„±ì´ ë” ì´ìƒ 사용ë˜ì§€ 않습니다. 다ìŒ" " 대신 pypowervm.const.XAGì˜ ê°’ì„ ì‚¬ìš©í•˜ì‹­ì‹œì˜¤." #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "VIOS ìœ í˜•ì˜ íŒŒí‹°ì…˜ì—서 LPMì„ ìž‘ë™í•  수 ì—†ìŒ" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "ì§€ì›ë˜ëŠ” 스토리지 장치 ì—†ì´ ëŒ€ìƒ ìž¥ì¹˜ LUA를 지정할 수 없습니다." # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/locale/fr/0000775000175000017500000000000013571367172017167 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/fr/pypowervm.po0000664000175000017500000017525613571367171021616 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=2; plural=n>1;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "Protocole non valide \"%s\"" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "Communication non chiffrée avec PowerVM. Passez la configuration en https." #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "Echec du calcul du mémento d'audit par défaut, utilisation de 'default'." #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "Authentification locale non prise en charge sur la console HMC." #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "Configuration du programme d'écoute d'événement pour %s" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "Descripteur de fichier inattendu sur la demande %s" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "Erreur inattendue pour %(meth)s %(url)s" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "Erreur inattendue : %(class)s pour %(method)s %(url)s : %(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "La reconnexion a été jugée non sécurisée. Cette instance de session " "ne doit plus être utilisée." #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "Tentative de nouvelle connexion %s" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "Reconnexion 401, corps de réponse :\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "Echec de reconnexion, corps de réponse :\n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "Echec de la reconnexion :\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "Echec nouvelle tentative avec à nouveau 401, corps de réponse :\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "Réponse HTTP 401 suspecte pour %(method)s %(path)s : jeton totalement nouveau" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "Echec de connexion au serveur REST - le service pvm-rest est -il démarré ? " "Essai %(try_num)d sur %(max_tries)d après %(delay)d secondes." #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "Connexion de session sur %s" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "Echec d'analyse d'un jeton de session à partir de la réponse PowerVM." #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " Corps= %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "Echec d'analyse d'un chemin de fichier de session à partir de la réponse PowerVM." #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "Le fichier de jeton %s ne contenait pas de jeton de session lisible." #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "Déconnexion de session %s" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "Problème de déconnexion. Erreur ignorée." #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "le travail doit être un élément JobRequest" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "JobRequest sans OperationName" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "path=%s n'est pas une référence d'API PowerVM" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "path=%s n'est pas une référence d'API PowerVM" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "Descripteur de fichier non valide" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "root_id attendu" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "child_type attendu" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "suffix_type=%s inattendu" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "suffix_parm attendu" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "child_id attendu" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "child_id inattendu" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "root_id inattendu" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "req_method=%s inattendu" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "Erreur d'analyse syntaxique de réponse XML provenant de PowerVM : %s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "La réponse n'est pas un flux/une entrée Atom" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "HTTP 204 inattendu pour la demande" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "Corps de réponse vide inattendu" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "En-têtes de demande : %(reqheaders)s\n" "\n" "Corps de demande : %(reqbody)s\n" "\n" "En-têtes de réponse : %(respheaders)s\n" "\n" "Corps de réponse : %(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "Erreur Atom pour %(method)s %(path)s : %(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "La session ne doit pas être Aucun" #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "Un programme d'écoute d'événement est déjà actif sur la session." #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "Echec d'initialisation du programme d'écoute de flux d'événements : %s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "ID application \"%s\" non unique" #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "En cours d'arrêt" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "Ce gestionnaire est déjà abonné" #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "Le gestionnaire doit être de type EventHandler" #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "Gestionnaire introuvable dans la liste d'abonnés" #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "Arrêt du EventListener pour %s" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "Arrêt du EventListener terminé pour %s" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "Erreur d'obtention d'événements PowerVM : %s. (service pvm-rest arrêté ?)" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "EventType=%s inattendu" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "Erreur lors du traitement d'événements PowerVM" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "Dérivation impossible du port FC physique approprié pour le WWPN %(wwpn)s." " Groupes d'attributs étendus VIOS peut-être insuffisants. L'URI VIOS" " de la requête était %(vio_uri)s." #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "Elément introuvable : %(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "Partition logique introuvable : %(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "Adaptateur introuvable" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "L'opération '%(operation_name)s' a échoué. %(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "Echec de l'opération '%(operation_name)s'. Echec d'exécution de la tâche" " en %(seconds)d secondes." #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "Impossible d'arrêter le système d'exploitation sur la machine virtuelle " "%(lpar_nm)s, la connexion RMC n'est pas active." #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Echec de mise hors tension de la machine virtuelle %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "Dépassement du délai de mise hors tension de la machine virtuelle %(lpar_nm)s " "après %(timeout)d secondes." #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Echec de mise sous tension de la machine virtuelle %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "Dépassement du délai de mise sous tension de la machine virtuelle %(lpar_nm)s " "après %(timeout)d secondes." #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "Impossible de retirer le VLAN %(vlan_id)d car il s'agit de l'ID VLAN" " principal sur un autre pont de réseau." #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "Mise à disposition du VLAN %(vlan_id)d impossible. Il semble être " "contenu sur l'unité %(dev_name)s' du serveur Virtual I/O Server %(vios)s." " Cette unité n'est connectée à aucun pont réseau (carte SEA). " "Retirez manuellement l'unité ou ajoutez-la au pont de réseau " "avant de continuer." #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "Une unité logique avec le nom %(lu_name)s existe déjà sur le pool de " "stockage partagé %(ssp_name)s." #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "Impossible de trouver un port physique auquel mapper un port FC. " "Soit un serveur Virtual I/O Server est indisponible, soit une" " spécification de port n'est pas adaptée à des ports FC physiques." #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "Impossible de démarrer la console sur la machine virtuelle. L'API" " pypowervm s'exécute en mode non local. La console peut être déployée " "uniquement si pypowervm se trouve au même endroit que l'API PowerVM." #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "La tâche WrapperTask %(name)s ne comporte pas de sous-tâche." #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "La tâche FeedTask ne peut pas avoir de flux vide." #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "Le système d'exploitation a refusé l'accès au fichier %(access_file)s." #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "Erreur d'E-S rencontrée par l'OS pdt tentative de lecture du fichier %(access_file)s : " "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "La tâche de migration a échoué. %(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "Aucune source de chargement pour la machine virtuelle %(vm_name)s" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "Impossible de dériver le codage pg83 pour le hdisk %(dev_name)s. " "Attribut parent_entry non défini. Causse possible : utilisation d'un PV " "obtenu via une chaîne de propriété non prise en charge. Le PV doit être " "accessible via VIOS.phys_vols, VG.phys_vols, ou " "VIOS.scsi_mappings[n].backing_storage." #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "Impossible de remapper l'élément de stockage du mappage vSCSI. " "Précisément un mappage correspondant attendu, trouvé %(num_mappings)d." #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "Impossible de remapper l'élément de stockage du mappage vSCSI. Un mappage " "de l'élément de stockage %(stg_name)s existe déjà vers LPAR client %(lpar_uuid)s." #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "Unité %(devname)s détectée %(count)d fois ; Attendue au plus " "une fois." #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "FeedTask %(ft_name)s a rencontré plusieurs exceptions :\n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "Précisément une partition de gestion attendue ; trouvé %(count)d." #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "Précisément une partition attendue avec l'ID %(lpar_id)d ; " "%(count)d." #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "Niveau par défaut introuvable sur le pool de stockage partagé %(ssp_name)s." #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "L'unité avec l'UDID %(udid)s est introuvable sur l'un des serveurs " "Virtual I/O Server." #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "Nombre de serveurs Virtual I/O Server insuffisant pour prendre en charge" " l'unité de la machine virtuelle avec l'UDID %(udid)s." #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "Ensembles de noeuds attendus (%(fabrics)s) introuvables sur aucun des " "serveurs Virtual I/O Server." #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "Impossible de régénérer la machine virtuelle. Celle-ci utilise " "un type d'E-S %(io_type)s non pris en charge pour la régénération " " de machine virtuelle." #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "Le nombre d'emplacements VFC sur le système cible (%(rebuild_slots)d) ne" " correspond pas au nombre sur le système client %(original_slots)d). " "Impossible de régénérer cette machine virtuelle sur le système." #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "Pour enregistrer les infos d'emplacement de l'unité réseau, un " "adaptateur CNA ou VNIC est requis. Fourni à la place : %(wrapper)s." #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "Nombre insuffisant de serveurs Virtual I/O Server actifs dispo. " "Attendu %(exp)d ; trouvé %(act)d." #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "Aucun serveur VIOS disponible. Tentative d'attente d'un VIOS " "disponible pendant %(wait_time)d s. Vérifiez la connectivité " "RMC entre PowerVM NovaLink et les serveurs Virtual I/O Server." #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "Adaptateurs SR-IOV introuvables en mode Sriov et à l'état d'exécution.\n" "Emplacement | Mode | Etat\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "Impossible de satisfaire les exigences de redondance de %(red)d. %(found_vfs)d" " unité(s) de support viable(s) détectée(s)." #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "Système géré non compatible vNIC." #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "Aucun VIOS compatible vNIC actif." #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "Redondance de %(red)d spécifiée, mais le système géré n'est pas compatible" " avec la reprise de contrôleur vNIC." #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "Redondance de %(red)d spécifiée, mais il n'y a aucun serveur VIOS " "compatible avec la reprise de contrôleur vNIC." #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "Impossible de localiser le groupe de volumes %(vol_grp)s dans lequel " "stocker le support optique virtuel. Impossible de créer " " le référentiel de supports." #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "Mise à jour de ManagedSystem non tentée car changements demandés pour ou" " ou plusieurs ports physiques SR-IOV utilisés par des contrôleurs vNIC.\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "Impossible de créer un terminal virtuel de base VNC : %(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "Cache d'adaptateur non pris en charge." #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "Valeur '%(value)s' non valide pour '%(enum)s'. Valeurs admises : " "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "Aucun VIOS portant le nom %(vios_name)s trouvé." #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "Aucun groupe de volumes portant le nom %(vg_name)s trouvé." #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "La partition avec le nom %(part_name)s n'est pas une partition IBM i." #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "L'argument de partition de fonction PanelJob est vide." #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "Opération de fonction de panneau %(op_name)s non valide. Une de %(valid_ops)s " "attendue." #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "Echec de reconnaissance ISCSI pour le VIOS %(vios_uuid)s. Code retour : %(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "Echec de déconnexion ISCSI pour le VIOS %(vios_uuid)s. Code retour : %(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "Echec de retrait iSCSI pour le VIOS %(vios_uuid)s. Code retour : %(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "Vstor %(stor_udid)s introuvable pour le VIOS %(vios_uuid)s." #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "Le groupe d'attributs étendus proposé '%(arg_xag)s' ne correspond pas " "au groupe existant '%(path_xag)s'" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "Le certificat est arrivé à expiration." #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "Au total, préfixe et suffixe ne doivent pas dépasser %d caractères." #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "La longueur totale doit être d'au moins 1 caractère." #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "La longueur du paramètre de nom doit être d'au moins un caractère." #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "Le paramètre de nom ne doit pas dépasser %d caractères quand trunk_ok a la valeur Faux." #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "Erreur développeur : spécification parent partielle." #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "Erreur développeur : parent_type doit être un type de schéma chaîne " " schéma ou un sous-classe d'encapsuleur (Wrapper)" #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "Valeur non valide '%(bad_val)s'. Attendue : une parmi %(good_vals)s ou une liste." #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "DEMANDE : %s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "REPONSE : %s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "Attente de fin du ou des téléchargements en cours. LU marqueur : %s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "Abdication en faveur de l'importation en cours." #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "Abdication du téléchargement en faveur du marqueur %s." #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "Utilisation d'unité logique d'image déjà téléchargée %s." #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "Création d'unité logique de marqueur %s" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "Importation vers image unité logique %(lu)s (marqueur %(mkr)s)." #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "Retrait d'unité logique en échec %s." #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "Commutateur virtuel %s introuvable sur le système." #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "VLAN valide introuvable pour le commutateur virtuel %s." #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "Erreur d'obtention de surcharge de mémoire hôte pour l'hôte avec l'UUID '%(host)s' : " "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "Délai d'attente dépassé pour l'état RMC de tous les serveurs Virtual I/O " "Server sous tension d'être actifs. Temps d'attente : %(time)d s. VIOS non " "passés à l'état actif : %(vioses)s." #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "Emplacement sans description considéré comme E-S physique : %s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "Partition %s déjà sous tension." #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "Partition %s déjà hors tension." #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "Spécification de add_parms car un dico est obsolète. Indiquez une " "instance %s à la place." #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "Echec d'arrêt normal du système d'exploitation IBMi. Tentative d'arrêt " "immédiat. " "Partition : %s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Echec d'arrêt immédiat du système d'exploitation IBMi. Tentative d'arrêt " "normal VSP. " "Partition : %s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "Délai d'attente d'arrêt immédiat OS non-IBMi dépassé. Tentative arrêt " "immédiat VSP. Partition : %s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Echec d'arrêt immédiat système d'exploitation non-IBMi. Tentative d'arrêt " "normal VSP. " "Partition : %s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "Arrêt immédiat VSP avec délai d'attente par défaut. Partition : %s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "Nouvelle tentative de modification du mappage SCSI." #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Mappage existant détecté d'élément de stockage %(stg_type)s %(stg_name)s " "depuis le serveur Virtual I/O Server %(vios_name)s vers la partition " "client %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Création du mappage de l'élément de stockage %(stg_type)s %(stg_name)s " "depuis le serveur Virtual I/O Server %(vios_name)s vers la partition " "client %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "match_func et stg_elem ne doivent pas être spécifiés ensemble." #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "Méthode register_cna obsolète. Utilisez la méthode register_vnet " "à la place." #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "Méthode drop_cna obsolète. Utilisez la méthode drop_vnet." #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "Le port physique SR-IOV à l'emplacement %(loc_code)s supporte un vNIC" " de la LPAR %(lpar_name)s (UUID LPAR : %(lpar_uuid)s, UUID vNIC : " "%(vnic_uuid)s)." #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "Changements apportés aux labels de port physique SR-IOV suivants même " "s'ils sont utilisés par des contrôleurs vNIC :" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "Echec de suppression de vio_file avec UUID %s. Suppression manuelle requise." #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "Incident lors du téléchargement. Nouvelle tentative." #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "Méthode crt_lu_linked_clone obsolète. Utilisez la méthode crt_lu " "(clone=src_lu, size=lu_size_gb)." #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "L'unité logique de disque %(luname)s n'a pas d'unité logique d'image de support. (UDID : %(udid)s) " #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "Impossible de localiser le nouveau vDisk sur le téléchargement de fichier." #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "Unité ignorée car son UDID est absent :\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "Unité %s introuvable dans la liste." #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "Suppression du disque virtuel %(vdisk)s du groupe de volumes %(vg)s" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "Suppression de l'unité optique virtuelle %(vopt)s du groupe de volumes %(vg)s" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "Retrait de l'unité logique %(lu_name)s (UDID %(lu_udid)s)" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "Unité logique %(lu_name)s introuvable - a peut-être été supprimée. " "en externe. (UDID : %(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "Retrait de l'unité logique d'image %(lu_name)s car plus utilisée. (UDID : " "%(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "Unité logique support %(lu_name)s introuvable. (UDID : %(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "Erreur développeur : tier ou lufeed requis." #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "Erreur développeur : le paramètre lufeed doit inclure LUEnt EntryWrappers." #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "Suppression de l'unité logique %(lu_name)s (UDID : %(lu_udid)s)" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "HttpError ignoré pour l'unité logique %(lu_name)s peut-être été supprimé en externe." " en externe. (UDID : %(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "Retrait de %(num_maps)d mappages %(stg_type)s orphelin du VIOS " "%(vios_name)s." #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "Retrait de %(num_maps)d mappages VFC sans port du VIOS %(vios_name)s." #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "Retrait de %(num_maps)d mappages %(stg_type)s associés à l'ID partition " "logique %(lpar_id)d depuis le VIOS %(vios_name)s." #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "Pas de retrait du stockage %(stg_name)s de type %(stg_type)s car pas " "possible de déterminer si toujours utilisé. Vérification et nettoyage " "manuels peut-être nécessaires." #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "Elément de stockage %(stg_name)s ignoré par l'épuration car de type " "inattendu %(stg_type)s." #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "Epuration des %(vdcount)d disques virtuels suivants du VIOS %(vios)s : " "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "Epuration des %(vocount)d unités optiques virtuelles suivantes du VIOS %(vios)s : " "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "Epuration ignorée des mappages %(stg_type)s du VIOS %(vios_name)s pour " "les ID LPAR car ces partitions logiques existent : %(lpar_ids)s" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "Impossible de trouver le VIOS approprié. Le contenu fourni semble " "insuffisant. Données de contenu : \n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "La mappe de port VFC correspondante n'a pas de port de support défini. " "Ajout de %(port)s au mappage pour les clients WWPN client : %(wwpns)s" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "Erreur pdt l'interrogation du référentiel de supports optiques virtuels. " "Tentative de restauration de la connexion à un référentiel de supports " "optiques virtuels." #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "Impossible de fermer vterm." #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "Sortie non valide sur vterm ouvert. Tentative de réinitialisation du vterm. Erreur : %s" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "Prog. écoute VNCSocket en mode écoute sur ip=%(ip)s port=%(port)s" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "Erreur de négociation SSL pour le répéteur VNC : %s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "Echec détection hdisk ; épurer stockage périmé pour ID LPAR %s et " "réessayer." #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "La reprise de LUA a abouti. Unité trouvée : %s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "Erreur ITL rencontrée : %s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "Unité %s actuellement utilisée." #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "Unité %s détectée avec UDID inconnu." #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "Echec de reconnaissance de l'unité : %s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "Erreur CLIRunner : %s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "Le travail QUERY_INVENTORY LUARecovery a abouti, mais le résultat " "ne contenait ni OutputXML ni StdOut." #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORY a produit un bloc XML non valide (%(chunk)s). Erreur : %(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "Echec de recherche du descripteur pg83 dans la sortie XML :\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "La commande ISCSI a abouti" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "La session ISCSI existe déjà et est ouverte" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "Commande ISCSI effectuée sur un VIOS, hôte, non pris en charge." #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "La reconnaissance ISCSI a trouvé des entrées périmées dans la base de données ODM." #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "Session ISCSI introuvable " #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "Aucun(e) enregistrement/cible/session/portail trouvé(e) sur lequel/laquelle exécuter l'opération." #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "La commande ISCSI a échoué avec le statut d'erreur interne = %s" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "Code d'erreur générique ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "Echec de connexion de session ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "Arguments de commande ISCSI non valides" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "Temporisateur de connexion ISCSI arrivé à expiration lors de la tentative de connexion." #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "La commande ISCSI n'a pas pu rechercher l'hôte" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "La commande ISCSI a renvoyé un statut inattendu = %s" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "Commande ISCSI effectuée sur un VIOS non pris en charge " #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "Stockage d'éléments périmés pour les ID LPAR %s et nouvelle tentative de reconnaissance iSCSI." #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "Données de mesure indisponibles. Cause possible : mesures " "récemment initialisées." #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "Ceci est un test" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "Il n'existe pas de traduction pour ce message" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "Le facteur d'unités processeur doit être compris entre 0,05 et 1,0. Valeur : %s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "Longueur de partition logique non valide. Nom : %s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "La zone '%(field)s' comporte une valeur non valide : '%(value)s'" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "Valeur Aucun non valide." #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "La valeur '%(value)s' n'est pas valide pour la zone '%(field)s' avec " "des options acceptables : %(choices)s" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "Valeur zone '%(field)s' inférieure au minimum. Valeur : %(value)s ; " "Minimum : %(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "Valeur zone '%(field)s' supérieure au maximum. Valeur : %(value)s ; " "Maximum : %(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "La valeur de '%(desired_field)s' est supérieure à la valeur '%(max_field)s'. " "Souhaité : %(desired)s Maximum : %(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "La valeur de '%(desired_field)s' est inférieure à la valeur '%(min_field)s'. " "Souhaité : %(desired)s Minimum : %(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "La valeur de mémoire n'est pas un multiple de la taille de bloc de " "mémoire logique (%(lmb_size)s) de l'hôte. Valeur : %(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "Le système géré n'est pas compatible Active Memory Expansion. " "La valeur du facteur d'expansion '%(value)s' n'est pas valide." #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "La valeur Active Memory Expansion doit être supérieure ou égale à 1,0 et " "inférieure ou égale à 10,0. La valeur de 0, également valide, indique" " qu'AME est désactivé. '%(value)s' n'est pas une valeur valide." #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "Tentative %(retry)d sur un total de %(total)d pour l'URI %(uri)s. Erreur " "code de réponse de nouvel essai : %(resp_code)s" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "Echec tentative %(retry)d sur %(total)d. Nouvelle tentative. Exception :\n" " %(except)s." #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "Vous devez indiquer EntryWrapper ou EntryWrapperGetter." #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "Indiquez une sous-tâche valide." #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "Nom 'provides' en double %s." #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %s ne comporte pas de sous-tâche. Pas d'opération exécutée." #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "Indiquez une liste de EntryWrappers ou un FeedGetter." #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %s n'a aucun sous-tâches; exécution instruction no-op." #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "FeedTask %s a rencontré plusieurs exceptions. Consignation individuelle" " ci-après." #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "Ressources %(res_name)s disponibles insuffisantes sur l'hôte pour la " "machine virtuelle '%(instance_name)s' (%(requested)s demandées, " "%(avail)s disponibles)" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "mémoire" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "La machine virtuelle doit être hors tension avant changement de la " "mémoire min. ou max. Mettez la machine %s hors tension et réessayez." #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "La machine virtuelle doit être mise hors tension avant changement du " "d'expansion. Mettez la machine %s hors tension et réessayez." #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "UC" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "unités de traitement" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "La machine virtuelle doit être hors tension avant changement de la " "min. ou max. de processeurs. Mettez la machine %s hors tension et réessayez." #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "La machine virtuelle doit être hors tension avant changement de la " "min. ou max. d'unités processeur. Mettez la machine %s hors tension et réessayez." #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "La machine virtuelle doit être hors tension avant changement du mode de " "compatibilité. Mettez la machine %s hors tension et réessayez." #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "La machine virtuelle doit être hors tension avant changement du mode de " "maintenance. Mettez la machine %s hors tension et réessayez." #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "Le nombre de processeurs souhaité %(vcpus)d) ne doit pas dépasser le " "nombre maximal autorisé par partition (%(max_allowed)d) pour la machine " "'%(instance_name)s'." #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "Le nombre maximal de processeurs (%(vcpus)d) ne doit pas dépasser la " "limite processeur de capacité système (%(max_allowed)d) pour la machine " "'%(instance_name)s'." #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "La machine virtuelle doit être mise hors tension avt changement de la fonction " "de redémarrage à distance. Mettez la machine %s hors tension et réessayez." #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "La partition n'a pas de connexion RMC active." #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "La partition n'a pas de fonction DLPAR active pour %s." #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "E-S" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "Mémoire" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "Processeurs" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "KeylockPos non valide '%s'." #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "BootMode non valide '%s'." #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "IOSlot.adapter obsolète. Utilisez IOSlot.io_adapter à la place." #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "Impossible de déterminer code MTMS (type machine, modèle, n° série) de " "machine, modèle, n° série) depuis %(identifier)s car aucune %(param)s" " marqué comme console maître du pool." #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "Impossible de définir l'uuid." #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "Impossible de convertir %(property_name)s='%(value)s' dans l'objet %(pvmobject)s" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "Refus de définition de href sur plusieurs liens.\n" "Chemin : %{path}s\n" "Nombre de liens trouvés : %{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "Refus de construction et d'encapsulation d'élément sans balise." #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "Propriété 'entry' manquante dans la réponse." #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "Indiquez une réponse ou une entrée pour l'encapsulation. Obtenu %s" #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "Erreur développeur : indiquez 'parent' ou ('parent_type' et 'parent_uuid')" " pour extraire un objet CHILD." #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "Indiquez 'uuid' ou 'root_id' lors de la demande d'objet ROOT." #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "parent_type et parent_uuid obligatoires quand extraction d'un flux ou " "d'une entrée CHILD." #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "Indiquez l'UUID du parent via le paramètre parent_uuid." #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "Indiquez 'uuid' ou 'child_id' lors de la demande d'objet CHILD." #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "UUID parent spécifié sans type parent." #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "La méthode search() requiert exactement un argument key=value." #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "La classe d'encapsuleur %(class)s ne prend pas en charge le critère de recherche '%(key)s'." #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "Paramètre 'xag' dans EntryWrapper.update obsolète. Au mieux, son utilisation" " sera sans résultat. Au pire, elle générera des erreurs de concordance de " "etag non résolvables." #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "Aucun élément enfant de ce type." #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "Impossible de définir l'UUID sur l'encapsuleur sans métadonnées." #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "Valeur d'UUID non valide : %s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "Vous devez définir une sous-classe d'encapsuleur." #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "Vous devez indiquer la classe parent et l'UUID parent, ou aucun des deux." #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "Travail %(job_id)s en cours de surveillance depuis %(time)i secondes." #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "Emission de demande d'annulation du travail %(job_id)s. Le travail sera" " interrogé indéfiniment pour l'arrêt." #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "Travail %s non supprimé. Etat du travail : en cours d'exécution." #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "La partition logique n'est pas à l'état actif." #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "Le système cible n'est pas compatible avec la fonction de mobilité de partition IBM i." #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "La partition logique IBM i n'a pas restriction d'E-S." #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "Le système source n'est pas compatible avec la fonction de mobilité de partition IBM i." #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "La partition logique n'a pas de connexion RMC active." #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "La partition logique est la partition de gestion" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "La partition logique n'est pas disponible pour LPM en raison de fonctions DLPAR manquantes." #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "Il ne s'agit pas de la propriété que vous recherchez. Utilisez " "srr_enabled dans un environnement NovaLink." #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "IPLSrc non valide '%s'." #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "Cette propriété est obsolète. Utilisez pci_subsys_dev_id à la place." #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "Cette propriété est obsolète. Utilisez pci_rev_id à la place." #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "Cette propriété est obsolète. Utilisez pci_subsys_vendor_id à la place." #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "Cette propriété est obsolète. Utilisez drc_index à la place." #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "Cette propriété est obsolète. Utilisez drc_name à la place." #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "Spécification parent non valide pour CNA.create." #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PV a encodé le descripteur pg83 \"%(pg83_raw)s\", mais n'a pas pu le décoder. " "(%(type_error)s)." #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "Propriété 'xags' de la classe VIOS EntryWrapper obsolète. Utilisez" " à la place des valeurs de pypowervm.const.XAG." #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "La partition de type VIOS n'est pas compatible LPM" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "Impossible de spécifier de LUA d'unité cible sans unité de stockage de secours." # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/locale/zh-Hans/0000775000175000017500000000000013571367172020070 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/zh-Hans/pypowervm.po0000664000175000017500000016517013571367171022511 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=1; plural=0;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "无效å议“%sâ€" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "与 PowerVM 的通信未加密ï¼å°†é…置还原为 https。" #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "计算缺çœå®¡è®¡ memento 失败,将使用“defaultâ€ã€‚" #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "在 HMC ä¸Šï¼Œä¸æ”¯æŒæœ¬åœ°è®¤è¯ã€‚" #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "正在为 %s 设置事件侦å¬å™¨" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "在 %s 请求中å‘现æ„å¤–çš„æ–‡ä»¶å¥æŸ„" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "%(meth)s %(url)s çš„æ„外错误" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "%(method)s %(url)s çš„æ„外错误 %(class)s:%(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "釿–°ç™»å½•被认为ä¸å®‰å…¨ã€‚ä¸åº”该å†ä½¿ç”¨æ­¤ä¼šè¯" "实例。" #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "正在å°è¯•釿–°ç™»å½• %s" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "釿–°ç™»å½• 401,å“应主体:\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "釿–°ç™»å½•失败,å“应主体:\n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "釿–°ç™»å½•失败:\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "釿–°å°è¯•登录 401 失败,å“应主体:\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "%(method)s %(path)s çš„å¯ç–‘ HTTP 401 å“应:令牌是全新的" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "无法连接至 REST æœåС噍 - pvm-rest æœåŠ¡æ˜¯å¦å·²å¯åŠ¨ï¼Ÿ" "正在 %(delay)d åŽè¿›è¡Œç¬¬ %(try_num)d 次(共 %(max_tries)d 次)é‡è¯•。" #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "ä¼šè¯æ­£åœ¨ç™»å½• %s" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "æœªèƒ½è§£æžæ¥è‡ª PowerVM å“应的会è¯ä»¤ç‰Œã€‚" #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " 主体 = %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "æœªèƒ½è§£æžæ¥è‡ª PowerVM å“åº”çš„ä¼šè¯æ–‡ä»¶è·¯å¾„。" #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "令牌文件 %s ä¸åŒ…å«å¯è¯»çš„会è¯ä»¤ç‰Œã€‚" #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "ä¼šè¯æ­£åœ¨ä»Ž %s 注销" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "注销时å‘生问题。正在忽略。" #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "作业必须是 JobRequest 元素" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "JobRequest 缺少 OperationName" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "路径 = %s 䏿˜¯ PowerVM API 引用" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "路径 = %s 䏿˜¯ PowerVM API 引用" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "文件æè¿°ç¬¦æ— æ•ˆ" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "期望的 root_id" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "期望的 child_type" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "æ„外的 suffix_type = %s" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "期望的 suffix_parm" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "期望的 child_id" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "æ„外的 child_id" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "æ„外的 root_id" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "æ„外的 req_method = %s" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "è§£æžæ¥è‡ª PowerVM çš„ XML å“应时å‘生错误:%s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "å“åº”ä¸æ˜¯ Atom 订阅æº/æ¡ç›®" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "请求的æ„外 HTTP 204" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "æ„外的空å“应主体" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "请求头:%(reqheaders)s\n" "\n" "请求主体:%(reqbody)s\n" "\n" "å“应头:%(respheaders)s\n" "\n" "å“应主体:%(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "%(method)s %(path)s çš„ Atom 错误:%(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "会è¯ä¸å¾—是“无â€" #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "æŸä¸ªäº‹ä»¶ä¾¦å¬å™¨åœ¨ä¼šè¯ä¸Šå·²ç»å¤„于活动状æ€ã€‚" #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "未能åˆå§‹åŒ–事件订阅æºä¾¦å¬å™¨ï¼š%s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "åº”ç”¨ç¨‹åºæ ‡è¯†â€œ%sâ€ä¸å”¯ä¸€" #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "正在关闭" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "å·²ç»è®¢é˜…此处ç†ç¨‹åº" #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "处ç†ç¨‹åºå¿…须是 EventHandler" #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "在订户列表中找ä¸åˆ°å¤„ç†ç¨‹åº" #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "正在关闭 %s çš„ EventListener" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "已完æˆå…³é—­ %s çš„ EventListener" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "èŽ·å– PowerVM 事件时å‘生错误:%s。(pvm-rest æœåŠ¡æ˜¯å¦å·²å…³é—­ï¼Ÿï¼‰" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "æ„外的事件类型 = %s" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "å¤„ç† PowerVM 事件时å‘生错误" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "无法为 WWPN %(wwpn)s 生æˆç›¸åº”ç‰©ç† FC 端å£ã€‚VIOS 扩展属性组" " å¯èƒ½ä¸å¤Ÿç”¨ã€‚查询的 VIOS URI" " 为 %(vio_uri)s。" #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "找ä¸åˆ°å…ƒç´ ï¼š%(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "找ä¸åˆ° LPAR:%(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "找ä¸åˆ°é€‚é…器" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "“%(operation_name)sâ€æ“作失败。%(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "“%(operation_name)sâ€æ“作失败。无法在" " %(seconds)d 秒钟之内完æˆè¯¥ä»»åŠ¡ã€‚" #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "在虚拟机 %(lpar_nm)s 上无法执行æ“作系统关闭," "因为其 RMC è¿žæŽ¥å¤„äºŽä¸æ´»åŠ¨çŠ¶æ€ã€‚" #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "未能关闭虚拟机 %(lpar_nm)s:%(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "关闭虚拟机 %(lpar_nm)s 在 %(timeout)d 秒之åŽ" "超时。" #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "未能开å¯è™šæ‹Ÿæœº %(lpar_nm)s:%(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "å¼€å¯è™šæ‹Ÿæœº %(lpar_nm)s 在 %(timeout)d 秒之åŽ" "超时。" #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "无法移除 VLAN %(vlan_id)d," " 因为它是å¦å¤–的网桥上的主 VLAN 标识。" #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "无法供应 VLAN %(vlan_id)d。它å¯èƒ½åŒ…å«åœ¨" "Virtual I/O Server %(vios)s 上的设备“%(dev_name)sâ€ä¸­ã€‚该设备" " 未连接至任何网桥(共享以太网适é…器)。请" "手动移除该设备,或将其添加至网桥,然åŽ" "å†ç»§ç»­æ“作。" #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "共享存储池 %(ssp_name)s 上已存在å称为 %(lu_name)s" "的逻辑å•元。" #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "找ä¸åˆ°è¦å°†è™šæ‹Ÿå…‰çº¤é€šé“ç«¯å£æ˜ å°„至的物ç†ç«¯å£ã€‚" "这是由于 Virtual I/O Server ä¸å¯ç”¨ï¼Œ" " 或者为物ç†å…‰çº¤é€šé“ç«¯å£æŒ‡å®šäº†ä¸æ­£ç¡®çš„端å£ã€‚" #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "无法对虚拟机å¯åŠ¨æŽ§åˆ¶å°ã€‚pypowervm API" " æ­£åœ¨ä»¥éžæœ¬åœ°æ–¹å¼è¿è¡Œã€‚仅当 pypowervm 与 PowerVM API ä½äºŽåŒä¸€ä½ç½®æ—¶ï¼Œ" "æ‰èƒ½éƒ¨ç½²æŽ§åˆ¶å°ã€‚" #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "WrapperTask %(name)s 没有å­ä»»åŠ¡ï¼" #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "FeedTask ä¸èƒ½å…·æœ‰ç©ºçš„订阅æºã€‚" #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "æ“作系统拒ç»è®¿é—®æ–‡ä»¶ %(access_file)s。" #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "æ“作系统在å°è¯•è¯»å–æ–‡ä»¶ %(access_file)s æ—¶é‡åˆ° I/O 错误:" "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "è¿ç§»ä»»åŠ¡å¤±è´¥ã€‚%(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "找ä¸åˆ° VM %(vm_name)s 的装入æº" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "无法为 hdisk %(dev_name)s 派生 pg83 ç¼–ç ã€‚VIOS 扩展属性组" "未设置 parent_entry 属性。这å¯èƒ½æ˜¯ç”±äºŽä½¿ç”¨äº† PV" "(通过ä¸å—支æŒçš„属性链获å–)。该 PV 必须通过" "VIOS.phys_volsã€VG.phys_vols 或" "VIOS.scsi_mappings[n].backing_storage 进行访问。" #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "æ— æ³•é‡æ–°æ˜ å°„ vSCSI 映射的存储器元素。应该正好找到" "一个匹é…çš„æ˜ å°„ï¼Œä½†å´æ‰¾åˆ° %(num_mappings)d 个。" #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "æ— æ³•é‡æ–°æ˜ å°„ vSCSI 映射的存储器元素。已存在" "存储器元素 %(stg_name)s 至客户机 LPAR %(lpar_uuid)s 的映射。" #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "找到设备 %(devname)s %(count)d æ¬¡ï¼›æœŸæœ›åªæ‰¾åˆ°è¯¥è®¾å¤‡æœ€å¤š" "一次。" #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "FeedTask %(ft_name)s é‡åˆ°å¤šä¸ªå¼‚常:\n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "应该有且仅有一个管ç†åˆ†åŒºï¼›ä½†æ˜¯æ‰¾åˆ° %(count)d 个管ç†åˆ†åŒºã€‚" #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "æœŸæœ›åªæ‰¾åˆ°ä¸€ä¸ªæ ‡è¯†ä¸º %(lpar_id)d çš„åˆ†åŒºï¼›ä½†å´æ‰¾åˆ°" "%(count)d." #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "在共享存储池 %(ssp_name)s 上找ä¸åˆ°ç¼ºçœå±‚。" #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "在任何 Virtual I/O Server 上都找ä¸åˆ° UDID 为 %(udid)s çš„" "设备。" #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "没有足够的 Virtual I/O Server æ¥æ”¯æŒ" " UDID 为 %(udid)s 的虚拟机设备。" #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "在任何 Virtual I/O Server 上都找ä¸åˆ°æœŸæœ›çš„光纤网 (%(fabrics)s)。" " " #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "无法é‡å»ºè™šæ‹Ÿæœºã€‚它使用 I/O 类型" "%(io_type)s,但是 VM é‡å»ºä¸æ”¯æŒè¯¥ç±»åž‹ã€‚" #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "目标系统上的 VFC æ’æ§½æ•° (%(rebuild_slots)d) 与" " 客户机系统 (%(original_slots)d) ä¸Šçš„æ’æ§½æ•°ä¸åŒ¹é…。" "在此系统上无法é‡å»ºæ­¤è™šæ‹Ÿæœºã€‚" #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "è¦æ³¨å†Œç½‘ç»œè®¾å¤‡çš„æ’æ§½ä¿¡æ¯ï¼Œéœ€è¦ CNA 或 VNIC" "适é…器。但是,æä¾›çš„æ˜¯ä»¥ä¸‹å†…容:%(wrapper)s。" #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "没有足够的å¯ç”¨æ´»åЍ Virtual I/O Server。需è¦" "%(exp)d 个,但找到 %(act)d 个。" #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "没有å¯ç”¨çš„ Virtual I/O Server。已å°è¯•等待 %(wait_time)d 秒以便 VIOS" "å˜ä¸ºæ´»åŠ¨çŠ¶æ€ã€‚请检查" "PowerVM NovaLink 与 Virtual I/O Server 之间的 RMC 连接。" #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "在 SR-IOV æ–¹å¼å’Œè¿è¡Œçжæ€ä¸‹æ‰¾ä¸åˆ°ä»»ä½• SR-IOV 适é…器。\n" "ä½ç½® | æ–¹å¼ | 状æ€\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "无法满足 %(red)d 的冗余需求。找到了 %(found_vfs)d 个" " å¯è¡Œçš„æ”¯æŒè®¾å¤‡ã€‚" #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "å—ç®¡ç³»ç»Ÿä¸æ”¯æŒ vNIC。" #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "æ²¡æœ‰æ´»åŠ¨çš„æ”¯æŒ vNIC çš„ VIOS。" #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "指定了冗余 %(red)d,但是å—ç®¡ç³»ç»Ÿä¸æ”¯æŒ" " vNIC 故障转移。" #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "已指定冗余 %(red)d,但是没有" "æ”¯æŒ vNIC 故障转移的活动 VIOS。" #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "找ä¸åˆ°ç”¨äºŽå­˜å‚¨è™šæ‹Ÿå…‰å­¦ä»‹è´¨çš„å·ç»„ %(vol_grp)s。" "无法创建介质存储库。" #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "未å°è¯•æ›´æ–°å—管系统,因为所请求的更改" " 是针对一个或多个正在由 vNIC 使用的 SR-IOV 物ç†ç«¯å£è€Œå‘出。\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "无法创建基于 VNC 的虚拟终端:%(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "适é…器高速缓存ä¸å—支æŒã€‚" #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "“%(enum)sâ€çš„值“%(value)sâ€æ— æ•ˆã€‚有效值为:" "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "找ä¸åˆ°å称为 %(vios_name)s çš„ VIOS。" #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "找ä¸åˆ°å称为 %(vg_name)s çš„å·ç»„。" #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "å称为 %(part_name)s çš„åˆ†åŒºä¸æ˜¯ IBMi 分区。" #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "PanelJob åŠŸèƒ½åˆ†åŒºå‚æ•°ä¸ºç©ºã€‚" #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "颿¿åŠŸèƒ½æ“作 %(op_name)s 无效。期望其中一项 %(valid_ops)s" "。" #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "针对 VIOS %(vios_uuid)s 执行的 ISCSI å‘现æ“作失败。返回ç ï¼š%(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "针对 VIOS %(vios_uuid)s 执行的 ISCSI 注销æ“作失败。返回ç ï¼š%(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "针对 VIOS %(vios_uuid)s 执行的 ISCSI 移除æ“作失败。返回ç ï¼š%(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "找ä¸åˆ° VIOS %(vios_uuid)s çš„ Vstor %(stor_udid)s。" #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "建议的扩展属性组“%(arg_xag)sâ€" "与现有的扩展属性组“%(path_xag)sâ€ä¸åŒ¹é…" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "è¯ä¹¦å·²åˆ°æœŸã€‚" #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "å‰ç¼€å’ŒåŽç¼€ä¸€å…±ä¸èƒ½è¶…过 %d 个字符。" #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "总长度必须至少为 1 个字符。" #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "name 傿•°çš„长度必须至少为 1 个字符。" #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "当 trunk_ok 为 False 时,name 傿•°ä¸å¾—超过 %d 个字符。" #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "å¼€å‘者错误:部分父级规范。" #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "å¼€å‘者错误:parent_type 必须为字符串模å¼ç±»åž‹æˆ–" "包装程åºå­ç±»ã€‚" #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "无效值“%(bad_val)sâ€ã€‚需è¦ä¸€ä¸ªæˆ–一列 %(good_vals)s。" #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "请求:%s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "å“应:%s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "正在等待进行中的上载æ“作完æˆã€‚标记 LU:%s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "正在放弃支æŒè¿›è¡Œä¸­çš„上载æ“作。" #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "正在放弃上载æ“ä½œï¼Œè½¬ä¸ºæ”¯æŒæ ‡è®° %s。" #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "æ­£åœ¨ä½¿ç”¨å·²ä¸Šè½½çš„æ˜ åƒ LU %s。" #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "正在创建标记 LU %s" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "æ­£åœ¨ä¸Šè½½è‡³æ˜ åƒ LU %(lu)s(标记 %(mkr)s)。" #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "正在移除失败的 LU %s。" #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "在系统上找ä¸åˆ°è™šæ‹Ÿäº¤æ¢æœº %s。" #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "找ä¸åˆ°è™šæ‹Ÿäº¤æ¢æœº %s 的有效 VLAN。" #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "èŽ·å– UUID 为“%(host)sâ€çš„主机的主机内存开销时å‘生错误:" "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "等待所有已通电 Virtual I/O Server çš„ RMC 状æ€å˜ä¸ºæ´»åŠ¨æ—¶è¶…æ—¶ã€‚" "等待时间为 %(time)d 秒。未进入活动状æ€çš„" "VIOS 为:%(vioses)s。" #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "å‡å®šæ— æè¿°çš„æ’æ§½æ˜¯ç‰©ç† I/O:%s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "已开å¯åˆ†åŒº %s。" #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "已关闭分区 %s。" #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "建议ä¸è¦å°† add_parms 指定为字典。请改为指定 %s" "实例。" #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "IBMi æ“作系统正常关闭失败。å°è¯•ç«‹å³å…³é—­æ“作系统。" "分区:%s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "IBMi æ“作系统立å³å…³é—­å¤±è´¥ã€‚å°è¯•正常关闭 VSP。" "分区:%s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "éž IBMi æ“作系统立å³å…³é—­å·²è¶…时。å°è¯•硬关闭 VSP。" "分区:%s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "éž IBMi æ“作系统立å³å…³é—­å¤±è´¥ã€‚å°è¯•正常关闭 VSP。" "分区:%s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "VSP 硬关闭,返回缺çœè¶…时。分区:%s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "é‡è¯•修改 SCSI 映射。" #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "找到 %(stg_type)s 存储器元素 %(stg_name)s çš„" "从 Virtual I/O Server %(vios_name)s 到客户机 LPAR %(lpar_uuid)s 的现有映射。" #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "正在创建 %(stg_type)s 存储元素 %(stg_name)s çš„" "从 Virtual I/O Server %(vios_name)s 到客户机 LPAR %(lpar_uuid)s 的映射。" #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "ä¸å¾—åŒæ—¶æŒ‡å®š match_func å’Œ stg_elem。" #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "建议ä¸è¦ä½¿ç”¨ register_cna 方法ï¼è¯·ä½¿ç”¨ register_vnet" "方法。" #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "建议ä¸è¦ä½¿ç”¨ drop_cna 方法ï¼è¯·ä½¿ç”¨ drop_vnet 方法。" #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "ä½ç½® %(loc_code)s 中的 SR-IOV 物ç†ç«¯å£æ­£åœ¨æ”¯æŒä¸€ä¸ª vNIC,该 vNIC 属于" " LPAR %(lpar_name)s(LPAR UUID:%(lpar_uuid)sï¼›vNIC UUID:" "%(vnic_uuid)s)." #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "正在更改下列 SR-IOV 物ç†ç«¯å£æ ‡ç­¾ï¼Œå°½ç®¡å®ƒä»¬æ­£ç”±" "vNIC 使用:" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "未能删除 UUID 为 %s çš„ vio_file。必须将其手动删除。" #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "上载时é‡åˆ°é—®é¢˜ã€‚å°†é‡è¯•。" #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "建议ä¸è¦ä½¿ç”¨ crt_lu_linked_clone 方法ï¼è¯·ä½¿ç”¨ crt_lu" "方法(clone=src_lu,size=lu_size_gb)。" #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "ç£ç›˜é€»è¾‘å•å…ƒ %(luname)s æ²¡æœ‰æ”¯æŒæ˜ åƒ LU。(UDID:%(udid)s)" #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "上载文件时找ä¸åˆ°æ–°çš„ vDisk。" #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "正在忽略设备,因为它缺少 UDID:\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "在列表中找ä¸åˆ°è®¾å¤‡ %s。" #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "正在从å·ç»„ %(vg)s 中删除虚拟盘 %(vdisk)s" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "正在从å·ç»„ %(vg)s 中删除虚拟光学设备 %(vopt)s" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "正在移除 LU %(lu_name)s (UDID %(lu_udid)s)" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "找ä¸åˆ° LU %(lu_name)s - å¯èƒ½å·²å¸¦å¤–删除。" "(UDID:%(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "æ­£åœ¨ç§»é™¤æ˜ åƒ LU %(lu_name)s,因为ä¸å†å¯¹å®ƒè¿›è¡Œä½¿ç”¨ã€‚(UDID:)" "%(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "找ä¸åˆ°æ”¯æŒ LU %(lu_name)s。(UDID:%(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "å¼€å‘者错误:需è¦å±‚或 lufeed。" #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "å¼€å‘者错误:lufeed 傿•°å¿…é¡»åŒ…å« LUEnt EntryWrappers。" #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "正在删除 LU %(lu_name)s(UDID:%(lu_udid)s)" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "忽略 HttpError,因为å¯èƒ½å·²ç»ä»¥é¢‘带方å¼åˆ é™¤ LU %(lu_name)s。" " (UDID:%(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "正在从 VIOS %(vios_name)s 移除 %(num_maps)d 个孤立的 %(stg_type)s" "%(vios_name)s." #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "正在从 VIOS %(vios_name)s 中移除 %(num_maps)d 个缺少端å£çš„ VFC 映射。" #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "正在从 VIOS %(vios_name)s 移除与 LPAR 标识 " "%(lpar_id)d 相关è”çš„ %(num_maps)d 个 %(stg_type)s 映射。" #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "未在移除类型为 %(stg_type)s 的存储器 %(stg_name)s,因为无法确定" "它是å¦ä»åœ¨ä½¿ç”¨ä¸­ã€‚å¯èƒ½éœ€è¦æ‰‹åŠ¨éªŒè¯" "和清除。" #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "存储器擦除正在忽略存储元素 %(stg_name)s,因为它是" "æ„外类型 %(stg_type)s。" #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "正在从 VIOS %(vios)s 中擦除下列 %(vdcount)d 个虚拟盘:" "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "正在从 VIOS %(vios)s 中擦除下列 %(vocount)d 个虚拟光学设备:" "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "正在为下列 LPAR 标识跳过从 VIOS %(vios_name)s 擦除 %(stg_type)s 映射," "因为这些 LPAR 已存在:%(lpar_ids)s" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "找ä¸åˆ°ç›¸åº”çš„ VIOS。所æä¾›çš„æœ‰æ•ˆå†…容" "很有å¯èƒ½ä¸è¶³ã€‚有效内容数æ®ä¸ºï¼š\n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "匹é…çš„ VFC ç«¯å£æ˜ å°„尚未设置支æŒç«¯å£ã€‚正在将 %(port)s 添加到" "下列客户机 WWPN 的映射:%(wwpns)s" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "查询虚拟光学介质存储库时å‘生错误。" "正在å°è¯•ä¸Žè™šæ‹Ÿå…‰å­¦ä»‹è´¨å­˜å‚¨åº“é‡æ–°" "建立连接。" #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "无法关闭 vterm。" #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "已打开 vterm 上的输出无效。正在å°è¯•é‡ç½® vterm。错误为 %s" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "VNCSocket 侦å¬å™¨æ­£åœ¨ä¾¦å¬ ip=%(ip)s port=%(port)s" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "针对 VNC 转å‘器å商 SSL 时出错:%s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "å‘现 hdisk 失败;将擦除 LPAR 标识 %s 的旧存储器," "ç„¶åŽé‡è¯•。" #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "LUA æ¢å¤æˆåŠŸã€‚æ‰¾åˆ°çš„è®¾å¤‡ï¼š%s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "é‡åˆ°äº† ITL 错误:%s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "当剿­£åœ¨ä½¿ç”¨ %s 设备。" #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "å‘现了具有未知 UUID çš„ %s 设备。" #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "未能å‘现设备:%s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "CLIRunner 错误:%s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "QUERY_INVENTORY LUARecovery 作业已æˆåŠŸï¼Œä½†ç»“æžœæ—¢ä¸åŒ…å«" "OutputXML,也ä¸åŒ…嫿 ‡å‡†è¾“出。" #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORY 生æˆäº†æ— æ•ˆ XML åŒºå— (%(chunk)s)。错误:%(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "未能在 XML 输出中找到 pg83 æè¿°ç¬¦ï¼š\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "ISCSI 命令已æˆåŠŸå®Œæˆ" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "ISCSI 会è¯å·²å­˜åœ¨å¹¶ç™»å½•" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "ISCSI 命令已在ä¸å—支æŒçš„ VIOS 或主机上完æˆã€‚" #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "ISCSI å‘现æ“作在 ODM æ•°æ®åº“中找到了旧æ¡ç›®ã€‚" #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "找ä¸åˆ° ISCSI 会è¯" #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "找ä¸åˆ°è¦å¯¹å…¶æ‰§è¡Œæ“作的记录/目标/会è¯/门户网站" #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "ISCSI å‘½ä»¤å¤±è´¥ï¼Œå†…éƒ¨é”™è¯¯çŠ¶æ€ = %s" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "ISCSI 通用错误代ç " #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "ISCSI 会è¯ç™»å½•失败" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "ISCSI 命令的自å˜é‡æ— æ•ˆ" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "å°è¯•连接时,ISCSI 连接计时器已到期。" #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "ISCSI 命令找ä¸åˆ°ä¸»æœº" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "ISCSI 命令返回了æ„外状æ€ï¼ˆ= %s)" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "ISCSI 命令已在ä¸å—支æŒçš„ VIOS 上完æˆ" #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "请擦除 LPAR 标识 %s 的旧存储器,然åŽé‡è¯• iSCSI å‘现æ“作。" #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "指标数æ®ä¸å¯ç”¨ã€‚è¿™å¯èƒ½æ˜¯å› ä¸ºæŒ‡æ ‡æœ€è¿‘" "进行了åˆå§‹åŒ–。" #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "这是一个测试" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "è¿™æ˜¯ä¸€æ¡æœªè¿›è¡Œç¿»è¯‘的消æ¯" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "处ç†å™¨å•元因å­å¿…须介于 0.05 到 1.0 之间。值:%s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "逻辑分区å称的长度无效。å称:%s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "字段“%(field)sâ€çš„值无效:“%(value)sâ€" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "值 None 无效。" #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "对于字段“%(field)sâ€ï¼Œå€¼â€œ%(value)sâ€æ— æ•ˆï¼Œå¯æŽ¥å—çš„" "选项为:%(choices)s" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "字段“%(field)sâ€çš„值低于最å°å€¼ã€‚值:%(value)sï¼›" "最å°å€¼ï¼š%(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "字段“%(field)sâ€çš„值高于最大值。值:%(value)sï¼›" "最大值:%(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "“%(desired_field)sâ€çš„值大于“%(max_field)sâ€å€¼ã€‚" "期望值:%(desired)s 最大值:%(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "“%(desired_field)sâ€çš„值å°äºŽâ€œ%(min_field)sâ€å€¼ã€‚" "期望值:%(desired)s 最å°å€¼ï¼š%(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "å†…å­˜å€¼ä¸æ˜¯ä¸»æœºçš„逻辑内存å—大å°" "(%(lmb_size)s) çš„å€æ•°ã€‚值:%(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "å—ç®¡ç³»ç»Ÿä¸æ”¯æŒåЍæ€å†…存扩展。VIOS 扩展属性组" "扩展因å­å€¼â€œ%(value)sâ€æ— æ•ˆã€‚" #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "活动内存扩展值必须大于或等于 1.0 且" "å°äºŽæˆ–等于 10.0。值 0 也有效,它表示" " AME 关闭。“%(value)sâ€æ— æ•ˆã€‚" #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "对 URI %(uri)s å°è¯•第 %(retry)d 次(共 %(total)d 次)。错误为" "一个已知的é‡è¯•å“应代ç ï¼š%(resp_code)s" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "å°è¯•了 %(retry)d 次(总共 %(total)d 次)都失败。将é‡è¯•。异常为:\n" " %(except)s." #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "å¿…é¡»æä¾› EntryWrapper 或 EntryWrapperGetter。" #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "å¿…é¡»æä¾›æœ‰æ•ˆå­ä»»åŠ¡ã€‚" #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "é‡å¤çš„“providesâ€åç§° %s。" #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %s 没有å­ä»»åŠ¡ï¼›æ‰§è¡Œç©ºæ“作。" #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "å¿…é¡»æä¾› EntryWrappers 或 FeedGetter 的列表。" #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %s 没有å­ä»»åŠ¡ï¼›æ‰§è¡Œç©ºæ“作。" #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "FeedTask %s é‡åˆ°å¤šä¸ªå¼‚常。下é¢é€ä¸ªè®°å½•了" " 这些异常。" #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "在主机上,没有足够的 %(res_name)s å¯ç”¨äºŽè™šæ‹Ÿæœº" "“%(instance_name)sâ€ï¼ˆè¯·æ±‚的是 %(requested)s,而实际 %(avail)s å¯ç”¨ï¼‰" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "内存" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "更改最å°å†…存或最大内存之å‰ï¼Œ" "必须关闭虚拟机。关闭虚拟机 %s,然åŽé‡è¯•。" #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "更改扩展因å­ä¹‹å‰ï¼Œ" "必须关闭虚拟机。关闭虚拟机 %s,然åŽé‡è¯•。" #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "CPU æ•°" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "处ç†å•å…ƒ" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "更改最å°å†…存或最大内存之å‰ï¼Œ" "必须关闭虚拟机。关闭虚拟机 %s,然åŽé‡è¯•。" #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "更改最å°å†…存或最大内存之å‰ï¼Œ" "必须关闭虚拟机。关闭虚拟机 %s,然åŽé‡è¯•。" #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "更改处ç†å™¨å…¼å®¹æ–¹å¼ä¹‹å‰ï¼Œ" "必须关闭虚拟机。关闭虚拟机 %s,然åŽé‡è¯•。" #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "æ›´æ”¹å¤„ç†æ–¹å¼ä¹‹å‰ï¼Œå¿…须先关闭" "虚拟机。关闭虚拟机 %s,然åŽé‡è¯•。" #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "所期望的处ç†å™¨æ•° (%(vcpus)d)" "ä¸èƒ½è¶…过æ¯ä¸ªåˆ†åŒºå…许的 (%(max_allowed)d),虚拟机" "'%(instance_name)s'." #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "所期望的处ç†å™¨æ•° (%(vcpus)d)" "ä¸èƒ½è¶…过æ¯ä¸ªåˆ†åŒºå…许的 (%(max_allowed)d),虚拟机" "'%(instance_name)s'." #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "æ›´æ”¹ç®€åŒ–çš„è¿œç¨‹é‡æ–°å¯åŠ¨åŠŸèƒ½ä¹‹å‰ï¼Œ" "必须关闭虚拟机。关闭虚拟机 %s,然åŽé‡è¯•。" #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "分区没有活动 RMC 连接。" #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "分区没有对应 %s 的活动 DLPAR 功能。" #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "I/O" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "内存" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "处ç†å™¨" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "KeylockPos“%sâ€æ— æ•ˆã€‚" #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "无效的 BootMode“%sâ€ã€‚" #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "建议ä¸è¦ä½¿ç”¨ IOSlot.adapterï¼è¯·æ”¹ä¸ºä½¿ç”¨ IOSlot.io_adapter。" #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "æ— æ³•æ ¹æ® %(identifier)s ç¡®å®šä¸»ç®¡ç†æŽ§åˆ¶å° MTMS(机器类型ã€åž‹å·" "åºåˆ—å·ï¼‰ï¼Œå› ä¸ºæœªå°†ä»»ä½• %(param)s 标记为" " 池的主控制å°ã€‚" #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "无法设置 UUID。" #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "无法转æ¢å¯¹è±¡ %(pvmobject)s 中的 %(property_name)s=“%(value)sâ€" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "æ‹’ç»è®¾ç½®åŸºäºŽå¤šä¸ªé“¾æŽ¥çš„ href。\n" "路径:%{path}s\n" "找到的链接数:%{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "æ‹’ç»æž„造和åˆå¹¶æ²¡æœ‰æ ‡è®°çš„元素。" #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "å“应缺少“entryâ€å±žæ€§ã€‚" #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "å¿…é¡»æä¾›è¦åˆå¹¶çš„å“应或æ¡ç›®ã€‚但是获得了 %s" #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "å¼€å‘者错误:指定“parentâ€æˆ–(“parent_typeâ€å’Œâ€œparent_uuidâ€ï¼‰" " 以接收 CHILD 对象。" #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "请求根对象时,请指定“uuidâ€æˆ–“root_idâ€ã€‚" #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "检索å­ä»£è®¢é˜…æºæˆ–é¡¹ç›®æ—¶ï¼Œéœ€è¦ parent_type å’Œ" "parent_uuid。" #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "通过 parent_uuid 傿•°æ¥æŒ‡å®šçˆ¶ä»£çš„ UUID。" #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "请求å­å¯¹è±¡æ—¶ï¼Œè¯·æŒ‡å®šâ€œuuidâ€æˆ–“child_idâ€ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "指定了没有父类型的父 UUID。" #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "search() 方法åªéœ€è¦ä¸€ä¸ª key=value 傿•°ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "包装器类 %(class)s 䏿”¯æŒæœç´¢é”®â€œ%(key)sâ€ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "建议ä¸è¦ä½¿ç”¨ EntryWrapper.update 的“xagâ€å‚æ•°ï¼ä½¿ç”¨æ­¤å‚æ•°çš„" " 最好结果是产生空æ“作。最差结果是引起无法纠正的 etag" "ä¸åŒ¹é…错误。" #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "没有这样的å­å…ƒç´ ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "在没有元数æ®çš„æƒ…况下无法在包装器上设置 UUID。" #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "UUID 值无效:%s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "必须指定包装器å­ç±»ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "å¿…é¡»åŒæ—¶æŒ‡å®šçˆ¶ç±»å’Œçˆ¶ UUIDï¼Œæˆ–è€…ä¸¤è€…éƒ½ä¸æŒ‡å®šã€‚" #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "监视作业 %(job_id)s %(time)i 秒。" #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "正对作业 %(job_id)s å‘å‡ºå–æ¶ˆè¯·æ±‚ã€‚å°†æ— é™æœŸè½®è¯¢ä½œä¸š" " 至终止。" #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "未删除作业 %s。该作业处于“正在è¿è¡Œâ€çжæ€ã€‚" #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "LPAR å¤„äºŽä¸æ´»åŠ¨çŠ¶æ€ã€‚" #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "目标系统ä¸å…·å¤‡ IBM i LPAR 移动性能力。" #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "IBM i LPAR ä¸å…·å¤‡æœ‰é™ I/O。" #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "æºç³»ç»Ÿä¸å…·å¤‡ IBM i LPAR 移动性能力。" #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "LPAR 没有活动 RMC 连接。" #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "LPAR 是管ç†åˆ†åŒº" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "ç”±äºŽç¼ºä¹ DLPAR 能力,LPAR ä¸å¯ç”¨äºŽ LPM。" #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "è¿™ä¸æ˜¯æ‚¨æ­£åœ¨æŸ¥æ‰¾çš„属性。请在 NovaLink 环境中" "使用 srr_enabled。" #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "无效 IPLSrc“%sâ€ã€‚" #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "建议ä¸è¦ä½¿ç”¨æ­¤å±žæ€§ï¼è¯·æ”¹ä¸ºä½¿ç”¨ pci_subsys_dev_id。" #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "建议ä¸è¦ä½¿ç”¨æ­¤å±žæ€§ï¼è¯·æ”¹ä¸ºä½¿ç”¨ pci_rev_id。" #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "建议ä¸è¦ä½¿ç”¨æ­¤å±žæ€§ï¼è¯·æ”¹ä¸ºä½¿ç”¨ pci_subsys_vendor_id。" #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "建议ä¸è¦ä½¿ç”¨æ­¤å±žæ€§ï¼è¯·æ”¹ä¸ºä½¿ç”¨ drc_index。" #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "建议ä¸è¦ä½¿ç”¨æ­¤å±žæ€§ï¼è¯·æ”¹ä¸ºä½¿ç”¨ drc_name。" #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "CNA.create 的父代规范无效。" #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PV 已对 pg83 æè¿°ç¬¦â€œ%(pg83_raw)sâ€è¿›è¡Œç¼–ç ï¼Œä½†æœªèƒ½è§£ç " "(%(type_error)s)." #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "建议ä¸è¦ä½¿ç”¨ VIOS EntryWrapper 类的“xagsâ€å±žæ€§ï¼è¯·" " 改为使用 pypowervm.const.XAG 中的值。" #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "VIOS ç±»åž‹çš„åˆ†åŒºä¸æ”¯æŒ LPM" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "在没有备用存储器设备的情况下,无法指定目标设备 LUAï¼" # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/locale/it/0000775000175000017500000000000013571367172017174 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/it/pypowervm.po0000664000175000017500000017531313571367171021615 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=2; plural=n != 1;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "Protocollo non valido \"%s\"" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "Comunicazioni non codificate con PowerVM! Invertire la configurazione su https." #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "Calcolo memo di controllo predefinito non riuscito, si utilizza 'default'." #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "Autenticazione locale non supportata sulla HMC." #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "Impostazione del listener di eventi per %s" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "Richiesta imprevista di gestione file su %s" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "Errore imprevisto per %(meth)s %(url)s" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "Errore imprevisto %(class)s per %(method)s %(url)s: %(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "Un nuovo login è stato ritenuto non sicuro. Questa istanza di sessione non dovrebbe più " "essere utilizzata." #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "Tentativo di nuovo login %s" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "Nuovo login 401, corpo risposta:\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "Nuovo login non riuscito, corpo risposta:\n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "Nuovo login non riuscito:\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "Nuovo tentativo non riuscito con un altro 401, corpo risposta:\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "risposta HTTP 401 sospetta per %(method)s %(path)s: il token è nuovo" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "Impossibile connettersi al server REST - il servizio pvm-rest è stato avviato? " "Nuovo tentativo %(try_num)d di %(max_tries)d dopo %(delay)d secondi." #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "Logon sessione %s" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "Impossibile analizzare un token sessione dalla risposta PowerVM." #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " Corpo= %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "Impossibile analizzare un percorso file sessione dalla risposta PowerVM." #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "Il file token %s non contiene un token sessione leggibile." #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "Logoff sessione %s" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "Problema nel logoff. Ignorato." #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "il lavoro deve essere un elemento JobRequest" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "In JobRequest manca OperationName" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "il percorso=%s non è un riferimento API PowerVM" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "il percorso=%s non è un riferimento API PowerVM" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "Descrittore file non valido" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "Previsto root_id" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "Previsto child_type" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "suffix_type=%s non previsto" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "Previsto suffix_parm" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "Previsto child_id" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "child_id non previsto" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "root_id non previsto" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "req_method=%s non previsto" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "Errore durante l'analisi della risposta XML da PowerVM: %s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "La risposta non è un feed/voce atom" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "Errore HTTP 204 imprevisto per la richiesta" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "Corpo della risposta inaspettatamente vuoto" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "intestazioni richiesta: %(reqheaders)s\n" "\n" "corpo richiesta: %(reqbody)s\n" "\n" "intestazioni risposta: %(respheaders)s\n" "\n" "corpo risposta: %(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "Errore atom per %(method)s %(path)s: %(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "La sessione non deve essere Nessuno" #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "Un listener di eventi è già attivo nella sessione." #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "Impossibile inizializzare il listener feed eventi: %s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "L'ID applicazione \"%s\" non è univoco" #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "In arresto" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "Questo gestore è già sottoscritto" #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "Il gestore deve essere un gestore eventi" #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "Gestore non trovato nell'elenco di sottoscrittori" #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "Chiusura del listener di eventi per %s" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "Chiusura listener di eventi completata per %s" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "Errore durante l'ottenimento di eventi PowerVM: %s. (Il servizio pvm-rest è inattivo?)" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "EventType=%s imprevisto" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "Errore durante l'elaborazione degli eventi PowerVM" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "Impossibile derivare la porta FC fisica appropriata per WWPN %(wwpn)s. I" " gruppi di attributi estesi VIOS potrebbero essere stati insufficienti. L'URI VIOS" " per la query era %(vio_uri)s." #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "Elemento non trovato: %(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "LPAR non trovata: %(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "Adattatore non trovato" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "Operazione '%(operation_name)s' non riuscita. %(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "L'operazione '%(operation_name)s' non è riuscita. Impossibile completare l'attività in" " %(seconds)d secondi." #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "Impossibile eseguire la chiusura del sistema operativo sulla macchina virtuale %(lpar_nm)s, perché " "la relativa connessione RMC non è attiva." #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Impossibile spegnere la macchina virtuale %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "Spegnimento della macchina virtuale %(lpar_nm)s scaduto dopo %(timeout)d " "secondi." #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Impossibile accendere la macchina virtuale %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "Accensione della macchina virtuale %(lpar_nm)s scaduta dopo %(timeout)d " "secondi." #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "Impossibile rimuovere la VLAN %(vlan_id)d, poiché è l'identificativo VLAN primario su" " un bridge di rete differente." #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "Impossibile eseguire il provisioning della VLAN %(vlan_id)d. Sembra che sia contenuta nel " "dispositivo '%(dev_name)s' sul Virtual I/O Server %(vios)s. Questo dispositivo non è" " connesso ad alcun bridge di rete (Shared Ethernet Adapter). Rimuovere " "manualmente il dispositivo o aggiungerlo al bridge di rete, prima " "di continuare." #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "Una LU (Logical Unit) con il nome %(lu_name)s esiste già sul pool di memoria " "condivisa %(ssp_name)s." #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "Impossibile trovare una porta fisica a cui associare una porta Fibre Channel virtuale. " "Questo è dovuto alla indisponibilità di un Virtual I/O Server o a una specifica" " della porta non corretta per le porte Fibre Channel fisiche." #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "Impossibile avviare la console per la macchina virtuale. L'API pypowervm è" " in esecuzione in modalità non locale. La console può essere distribuita solo quando " "pypowervm si trova nella stessa ubicazione dell'API PowerVM." #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "WrapperTask %(name)s non ha attività secondarie." #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "FeedTask non può avere un feed vuoto." #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "L'SO ha negato l'accesso al file %(access_file)s." #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "L'SO ha rilevato un errore I/O nel tentativo di leggere il file %(access_file)s: " "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "Attività di migrazione non riuscita. %(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "Nessuna origine di caricamento trovata per la VM %(vm_name)s" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "Impossibile derivare la codifica pg83 per l'hdisk %(dev_name)s. L'attributo " "parent_entry non è impostato. Ciò può essere dovuto all'utilizzo di un PV " "ottenuto tramite una catena di proprietà non supportata. È necessario accedere al PV " "tramite VIOS.phys_vols, VG.phys_vols, oppure " "VIOS.scsi_mappings[n].backing_storage." #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "Impossibile riassociare l'elemento di memoria dell'associazione vSCSI. Era previsto trovare " "esattamente un'associazione corrispondente, trovate %(num_mappings)d." #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "Impossibile riassociare l'elemento di memoria dell'associazione vSCSI. Un'associazione per " "l'elemento di memoria %(stg_name)s esiste già sulla LPAR client %(lpar_uuid)s." #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "Il dispositivo %(devname)s è stato trovato %(count)d volte; era previsto che venisse trovato al massimo " "una sola volta." #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "L'attività feed %(ft_name)s ha emesso diverse eccezioni:\n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "Era previsto trovare una sola partizione di gestione; trovate %(count)d." #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "Era previsto trovare esattamente una singola partizione con ID %(lpar_id)d; trovate " "%(count)d." #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "Impossibile trovare il pool di memoria condiviso o di livello predefinito %(ssp_name)s." #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "Il dispositivo con UDID %(udid)s non è stato trovato su alcun Virtual I/O " "Server." #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "Non sono presenti Virtual I/O Server sufficienti per supportare il dispositivo" " della macchina virtuale con UDID %(udid)s." #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "I fabric previsti (%(fabrics)s) non sono stati trovati su alcun Virtual " "I/O Server." #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "Impossibile ricreare la macchina virtuale. Sta utilizzando un tipo I/O " "%(io_type)s, che non è supportato per la nuova creazione della macchina virtuale." #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "Il numero di slot VFC sul sistema di destinazione (%(rebuild_slots)d) non" " corrisponde al numero di slot sul sistema client %(original_slots)d). " "Impossibile ricreare questa macchina virtuale su questo sistema." #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "Per registrare le informazioni sullo slot del dispositivo di rete, è necessario un adattatore " "CNA o VNIC. È stato invece fornito quanto segue: %(wrapper)s." #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "Non sono disponibili Virtual I/O Server attivi sufficienti. Previsti " "%(exp)d; trovati %(act)d." #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "Non è disponibile alcun Virtual I/O Server. Tentativo di attesa che un VIOS " "diventi attivo per %(wait_time)d secondi. Controllare la connettività " "RMC tra PowerVM NovaLink e i Virtual I/O Server." #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "Impossibile trovare adattatori SR-IOV in modalità Sriov e in stato In esecuzione.\n" "Ubicazione | Modalità | Stato\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "Impossibile soddisfare i requisiti di ridondanza di %(red)d. Trovati %(found_vfs)d" " dispositivi di backup utilizzabili." #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "Il sistema gestito non supporta vNIC." #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "Non ci sono VIOS che supportano vNIC attive." #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "È stata specificata una ridondanza di %(red)d ma il sistema gestito non" " supporta il failover vNIC." #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "È stata specificata una ridondanza %(red)d, ma non sono presenti VIOS " "che supportano il failover vNIC attivi." #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "Impossibile individuare il gruppo di volumi %(vol_grp)s per memorizzarvi i supporti ottici " "virtuali. Impossibile creare il repository di supporti." #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "L'aggiornamento ManagedSystem non è stato tentato perché erano richieste delle modifiche" " a una o più porte fisiche SR-IOV che sono utilizzate dalle vNIC.\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "Impossibile creare il terminale virtuale basato su VNC: %(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "La cache adattatore non è supportata." #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "Valore non valido '%(value)s' per '%(enum)s'. Valori validi sono: " "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "Nessun VIOS trovato con nome %(vios_name)s." #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "Nessun gruppo di volumi trovato con nome %(vg_name)s." #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "La partizione con nome %(part_name)s non è una partizione IBMi." #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "Argomento partizione della funzione PanelJob vuoto." #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "Operazione funzione pannello %(op_name)s non valida. Una tra %(valid_ops)s " "prevista." #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "Rilevamento ISCSI non riuscito per %(vios_uuid)s. Codice di ritorno: %(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "Logout ISCSI non riuscito per %(vios_uuid)s. Codice di ritorno: %(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "Rimozione di ISCSI non riuscita per VIOS %(vios_uuid)s. Codice di ritorno: %(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "Vstor %(stor_udid)s non trovato per VIOS %(vios_uuid)s." #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "Il gruppo di attributi estesi proposto '%(arg_xag)s' non corrisponde al " "gruppo di attributi estesi esistente '%(path_xag)s'" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "Il certificato è scaduto." #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "Il prefisso e il suffisso insieme non possono superare i %d caratteri." #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "La lunghezza totale deve essere di almeno 1 carattere." #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "Il parametro del nome deve essere lungo almeno un carattere." #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "Il parametro del nome non deve superare i %d caratteri quando trunk_ok è False." #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "Errore sviluppatore: specifica parent parziale." #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "Errore sviluppatore: il tipo parent deve essere un tipo schema stringa o " "una sottoclasse Wrapper." #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "Valore non valido '%(bad_val)s'. Era previsto uno di %(good_vals)s oppure un elenco." #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "RICHIESTA: %s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "RISPOSTA: %s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "In attesa del completamento dei caricamenti in corso. LU indicatori: %s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "Interruzione in favore del caricamento in corso." #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "Interruzione caricamento in favore dell'indicatore %s." #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "Utilizzo della LU immagine già caricata %s." #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "Creazione della LU indicatore %s" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "Caricamento della LU immagine %(lu)s (indicatore %(mkr)s)." #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "Rimozione della LU malfunzionante %s." #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "Impossibile trovare lo switch virtuale %s sul sistema." #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "Impossibile trovare una VLAN valida VLAN per lo switch virtuale %s." #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "Errore nell'ottenere il sovraccarico di memoria host per l'host con UUID '%(host)s': " "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "Timeout mentre si attendeva che lo stato RMC di tutti i Virtual I/O " "Server accesi indicasse una condizione di attivo. Il tempo di attesa era: %(time)d secondi. I VIOS che " "non sono diventati attivi erano: %(vioses)s." #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "Si assume che lo slot senza descrizione sia I/O fisico: %s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "Partizione %s già accesa." #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "Partizione %s già spenta." #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "La specifica di add_parms come dict è obsoleta. Specificare un'istanza %s " "invece." #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "Arresto regolare del sistema operativo IBMi non riuscito. Tentativo di arresto immediato del sistema operativo. " "Partizione: %s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Arresto immediato del sistema operativo IBMi non riuscito. Tentativo di arresto regolare VSP. " "Partizione: %s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "Arresto immediato sistema operativo non IBMi scaduto. Tentativo di arresto a freddo VSP. " "Partizione: %s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Arresto immediato del sistema operativo non IBMi non riuscito. Tentativo di arresto regolare VSP. " "Partizione: %s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "Arresto a freddo VSP con timeout predefinito. Partizione: %s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "Nuovo tentativo di modifica dell'associazione SCSI." #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Trovata un'associazione esistente dell'elemento di memoria %(stg_type)s %(stg_name)s " "dal Virtual I/O Server %(vios_name)s alla LPAR client %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Creazione dell'associazione dell'elemento di memoria %(stg_type)s %(stg_name)s dal " "Virtual I/O Server %(vios_name)s alla LPAR client %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "Non si devono specificare entrambi match_func e stg_elem." #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "Il metodo register_cna è obsoleto. Utilizzare il metodo " "register_vnet." #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "Il metodo drop_cna è obsoleto. Utilizzare il metodo drop_vnet." #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "La porta fisica SR-IOV nell'ubicazione %(loc_code)s sta supportando una vNIC che appartiene" " alla LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "Si stanno apportando modifiche alle seguenti etichette di porta fisica SR-IOV, anche se " "sono utilizzate dalle vNIC:" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "Impossibile eliminare vio_file con UUID %s. Deve essere eliminato manualmente." #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "È stato riscontrato un problema durante il caricamento. Verrà effettuato un nuovo tentativo." #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "Il metodo crt_lu_linked_clone è obsoleto. Utilizzare il metodo crt_lu " "(clone=src_lu, size=lu_size_gb)." #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "La LU (Logical Unit) disco %(luname)s non ha alcuna LU immagine di supporto. (UDID: %(udid)s) " #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "Impossibile individuare il nuovo vDisk sul caricamento file." #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "Il dispositivo verrà ignorato perché gli manca un UDID:\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "Dispositivo %s non trovato nell'elenco." #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "Eliminazione del disco virtuale %(vdisk)s dal gruppo di volumi %(vg)s" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "Eliminazione del dispositivo ottico virtuale %(vopt)s dal gruppo di volumi %(vg)s" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "Rimozione della LU %(lu_name)s (UDID %(lu_udid)s)" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "La LU %(lu_name)s non è stata trovata, potrebbe essere stata eliminata fuori banda. " "(UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "Rimozione della LU immagine %(lu_name)s perché non è più in uso. (UDID: " "%(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "La LU di supporto %(lu_name)s non è stata trovata. (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "Errore sviluppatore: è richiesto il livello o lufeed." #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "Errore sviluppatore: il parametro lufeed deve comprendere LUEnt EntryWrappers." #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "Eliminazione LU %(lu_name)s (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "Si ignora HttpError per LU %(lu_name)s, potrebbe essere stata eliminata fuori banda." " (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "Rimozione di %(num_maps)d associazioni %(stg_type)s orfane da VIOS " "%(vios_name)s." #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "Rimozione di %(num_maps)d associazioni VFC senza porta dal VIOS %(vios_name)s." #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "Rimozione di %(num_maps)d associazioni %(stg_type)s associate all'ID LPAR " "%(lpar_id)d dal VIOS %(vios_name)s." #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "Non viene rimossa la memoria %(stg_name)s di tipo %(stg_type)s, perché non è possibile " "determinare se è ancora in uso. Potrebbero essere necessari una verifica e " "un cleanup manuali." #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "Scrub memoria ignora l'elemento memoria %(stg_name)s perché è " "del tipo imprevisto %(stg_type)s." #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "Ripulitura dei seguenti %(vdcount)d dischi virtuali dal VIOS %(vios)s: " "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "Ripulitura dei seguenti %(vocount)d dispositivi ottici virtuali dal VIOS %(vios)s: " "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "Ignorato scrub di associazioni %(stg_type)s dal VIOS %(vios_name)s per i " "seguenti ID LPAR perché tali LPAR esistono: %(lpar_ids)s" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "Impossibile trovare il VIOS appropriato. Il payload fornito probabilmente era " "insufficiente. I dati di payload sono:\n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "Per l'associazione di porta VFC messa in corrispondenza non è impostata alcuna porta di supporto. Aggiunta di %(port)s " "all'associazione per i wwpn client: %(wwpns)s" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "Si è verificato un errore durante la query del repository di supporti ottici virtuali. " "Tentativo di ristabilire la connessione con un repository di supporti ottici " "virtuali." #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "Impossibile chiudere vterm." #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "Output non valido sull'apertura di vterm. Tentativo di reimpostare il vterm. L'errore era %s" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "Listener VNCSocket in ascolto su ip=%(ip)s porta=%(port)s" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "Errore di negoziazione SSL per il VNC Repeater: %s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "Rilevamento hdisk non riuscito; verrà ripulita la memoria obsoleta per gli ID LPAR %s e " "verrà fatto un nuovo tentativo." #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "Ripristino LUA riuscito. Dispositivo trovato: %s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "Riscontrato errore ITL: %s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "Il dispositivo %s è attualmente è in uso." #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "Dispositivo %s rilevato con UDID sconosciuto." #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "Rilevamento del dispositivo non riuscito: %s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "Errore CLIRunner: %s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "Lavoro di ripristino QUERY_INVENTORY LUA riuscito ma il risultato non contiene " "OutputXML né StdOut." #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORY ha prodotto un chunk di XML (%(chunk)s) non valido. Errore: %(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "Impossibile trovare il descrittore pg83 nell'output XML:\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "Comando ISCSI completato correttamente" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "Sessione ISCSI già esistente e collegata" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "Comando ISCSI eseguito su host, VIOS non supportato. " #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "Il rilevamento ISCSI ha individuato voci obsolete nel database ODM." #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "Impossibile trovare la sessione ISCSI " #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "Nessun record/destinazione/sessione/portale trovato su cui eseguire l'operazione" #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "Comando ISCSI non riuscito con stato errore interno = %s" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "Codice di errore ISCSI generico" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "Errore di accesso alla sessione ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "Comando ISCSI con argomenti non validi" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "Timer di connessione ISCSI scaduto durante il tentativo di connessione. " #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "Il comando ISCSI non è stato in grado di ricercare l'host" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "Il comando ISCSI ha restituito uno stato imprevisto = %s" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "Comando ISCSI eseguito su un VIOS non supportato" #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "Ripulire la memoria obsoleta per gli ID LPAR %s e ripetere il rilevamento iSCSI." #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "I dati di metrica non sono disponibili. Ciò può essere dovuto al fatto che le metriche sono state " "recentemente inizializzate." #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "Questo è un test" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "Questo è un messaggio per il quale non esiste una traduzione" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "Il fattore unità  processore deve essere tra 0.05 e 1.0. Valore: %s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "Il nome partizione logica ha una lunghezza non valida. Nome: %s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "Il campo '%(field)s' ha un valore non valido: '%(value)s'" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "Il valore Nessuno non è valido." #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "Il valore '%(value)s' non è valido per il campo '%(field)s' con " "scelte accettabili: %(choices)s" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "Il campo '%(field)s' ha un valore al di sotto del minimo. Valore: %(value)s; " "Minimo: %(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "Il campo '%(field)s' ha un valore al di sopra del massimo. Valore: %(value)s; " "Massimo: %(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "'%(desired_field)s' ha un valore superiore al valore '%(max_field)s'. " "Desiderato: %(desired)s Massimo: %(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "'%(desired_field)s' ha un valore inferiore al valore '%(min_field)s'. " "Desiderato: %(desired)s Minimo: %(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "Il valore di memoria non è un multiplo della dimensione di blocco della memoria logica " "(%(lmb_size)s) dell'host. Valore: %(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "Il sistema gestito non supporta l'espansione di memoria attiva. Il " "valore del fattore di espansione '%(value)s' non è valido." #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "Il valore di espansione di memoria attiva deve essere maggiore di o uguale a 1.0 e " "inferiore o uguale a 10.0. È valido anche un valore 0 è valido e indica che" " AME è spento. '%(value)s' non è valido." #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "Tentativo %(retry)d di %(total)d totali per l'URI %(uri)s. L'errore è un " "codice di risposta di nuovo tentativo noto: %(resp_code)s" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "Tentativo %(retry)d di %(total)d non riuscito. Verrà effettuato un nuovo tentativo. L'eccezione è:\n" " %(except)s." #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "È necessario fornire EntryWrapper o EntryWrapperGetter." #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "È necessario fornire un'attività secondaria valida." #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "Nome 'provides' duplicato %s." #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %s non ha attività secondarie; nessuna operazione in esecuzione." #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "È necessario fornire un elenco di EntryWrappers o un FeedGetter." #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %s non ha attività secondarie; nessuna operazione in esecuzione." #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "L'attività feed %s ha emesso diverse eccezioni. Vengono registrate singolarmente" " di seguito." #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "%(res_name)s disponibili non sufficienti sull'host per la macchina virtuale " "'%(instance_name)s' (%(requested)s richiesti, %(avail)s disponibili)" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "di memoria" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "La macchina virtuale deve essere spenta prima di modificare il valore minimo o " "massimo di memoria. Spegnere la macchina virtuale %s e riprovare." #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "La macchina virtuale deve essere spenta prima di modificare il " "fattore di espansione. Spegnere la macchina virtuale %s e riprovare." #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "CPU" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "unità di elaborazione" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "La macchina virtuale deve essere spenta prima di modificare il valore minimo o " "massimo di processori. Spegnere la macchina virtuale %s e riprovare." #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "La macchina virtuale deve essere spenta prima di modificare il valore minimo o " "massimo di unità processore. Spegnere la macchina virtuale %s e riprovare." #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "La macchina virtuale deve essere spenta prima di modificare la modalità " "di compatibilità processore. Spegnere la macchina virtuale %s e riprovare." #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "La macchina virtuale deve essere spenta prima di modificare la modalità " "di elaborazione. Spegnere la macchina virtuale %s e riprovare." #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "I processori desiderati (%(vcpus)d) non possono essere più del numero massimo " "di processori consentiti per partizione (%(max_allowed)d) per la macchina virtuale " "'%(instance_name)s'." #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "Il numero massimo di processori (%(vcpus)d) non può essere superiore al limite massimo " "di processori della capacità di sistema (%(max_allowed)d) per la macchina virtuale " "'%(instance_name)s'." #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "La macchina virtuale deve essere spenta prima di modificare " "la funzione di riavvio remoto semplificato. Spegnere la macchina virtuale %s e riprovare." #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "La partizione non ha una connessione RMC attiva." #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "La partizione non ha una capacità DLPAR attiva per %s." #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "I/O" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "Memoria" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "Processori" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "KeylockPos non valido '%s'." #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "Modalità di avvio non valida '%s'." #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "IOSlot.adapter è obsoleto, utilizzare invece IOSlot.io_adapter." #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "Impossibile determinare la console di gestione master MTMS (tipo macchina, modello, " "numero di serie) da %(identifier)s perché nessun %(param)s era contrassegnato come" " console master per il pool." #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "Impossibile impostare l'uuid." #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "Impossibile convertire %(property_name)s='%(value)s' nell'oggetto %(pvmobject)s" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "Rifiuto impostazione di href su più link.\n" "Percorso: %{path}s\n" "Numero di link trovati: %{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "Rifiuto di creare e includere un elemento senza tag." #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "Alla risposta manca la proprietà 'entry'." #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "È necessario fornire una risposta o una voce da includere. Ottenuto %s" #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "Errore sviluppatore: specificare 'parent' o ('parent_type' e 'parent_uuid') per" " richiamare un oggetto CHILD." #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "Specificare 'uuid' o 'root_id' quando si richiede un oggetto ROOT." #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "Sono richiesti sia parent_type che parent_uuid quando si richiama un feed " "o una voce CHILD." #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "Specificare l'UUID de,l parent tramite il parametro parent_uuid." #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "Specificare 'uuid' o 'child_id' quando si richiede un oggetto CHILD." #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "UUID parent specificato senza tipo di parent." #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "Il metodo search() richiede un solo argomento key=value." #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "La classe wrapper %(class)s non supporta la chiave di ricerca '%(key)s'." #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "Il parametro 'xag' per EntryWrapper.update è obsoleto. Nel migliore dei casi, il suo utilizzo" " non produrrà alcuna operazione. Nel peggiore dei casi, produrrà degli errori di mancata corrispondenza " "di etag incorreggibili." #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "Nessun elemento child di questo genere." #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "Impossibile impostare l'UUID sul wrapper senza metadati." #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "Il valore uuid non è valido: %s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "È necessario specificare una sottoclasse wrapper." #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "È necessario specificare sia la classe parent che l'UUID parent, o nessuno dei due." #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "Monitoraggio lavoro %(job_id)s per %(time)i secondi." #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "Emettere una richiesta di annullamento per il lavoro %(job_id)s. Verrà eseguito il polling del lavoro indefinitamente" " per terminarlo." #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "Lavoro %s non eliminato. Il lavoro è in stato In esecuzione." #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "La LPAR non è in stato Attivo." #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "Il sistema di destinazione non ha la capacità LPAR Mobility IBM i." #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "La LPAR IBM i non ha I/O limitato." #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "Il sistema di origine non ha la capacità LPAR Mobility IBM i." #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "La LPAR non ha una connessione RMC attiva." #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "LPAR è la partizione di gestione" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "La LPAR non è disponibile per LPM a causa di capacità DLPAR mancanti." #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "Questa non è la proprietà che si sta cercando. Utilizzare srr_enabled in un " "ambiente NovaLink." #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "IPLSrc non valido '%s'." #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "Questa proprietà è obsoleta. Utilizzare invece pci_subsys_dev_id." #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "Questa proprietà è obsoleta. Utilizzare invece pci_rev_id." #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "Questa proprietà è obsoleta. Utilizzare invece pci_subsys_vendor_id." #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "Questa proprietà è obsoleta. Utilizzare invece drc_index." #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "Questa proprietà è obsoleta. Utilizzare invece drc_name." #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "Specifica parent non valido per CNA.create." #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PV aveva un descrittore pg83 codificato \"%(pg83_raw)s\", ma non è riuscito a decodificare " "(%(type_error)s)." #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "La proprietà 'xags' della classe VIOS EntryWrapper è obsoleta! Utilizzare" " invece i valori ricavati da pypowervm.const.XAG." #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "La partizione di tipo VIOS non supporta LPM" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "Impossibile specificare la LUA di dispositivo di destinazione senza un dispositivo di memoria di supporto." # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/locale/de/0000775000175000017500000000000013571367172017150 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/de/pypowervm.po0000664000175000017500000020141013571367171021555 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=2; plural=n != 1;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "Ungültiges Protokoll \"%s\"" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "Nicht verschlüsselte Kommunikation mit PowerVM! Zurücksetzen der Konfiguration auf HTTPS." #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "Fehler bei der Berechnung des Standard-Audit-Erinnerungselements unter Verwendung von 'Standard'." #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "Lokale Authentifizierung auf der HMC nicht unterstützt." #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "Einrichten des Ereignislisteners für %s" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "Nicht erwartete Dateikennung in der Anforderung von %s" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "Unerwarteter Fehler für %(meth)s %(url)s" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "Unerwarteter Fehler: %(class)s für %(method)s %(url)s: %(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "Die Wiederanmeldung wird für unsicher gehalten. Diese Sitzungsinstanz sollte nicht länger " "verwendet werden." #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "Versuch der Wiederanmeldung %s" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "Wiederanmeldung 401, Antworthauptteil:\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "Wiederanmeldung fehlgeschlagen, Antworthauptteil:\n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "Wiederanmeldung fehlgeschlagen:\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "Neuer Versuch fehlgeschlagen mit einer weiteren 401-Antwort, Antworthauptteil:\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "verdächtige HTTP 401-Antwort für %(method)s %(path)s: Token ist brandneu" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "Fehler beim Herstellen einer Verbindung zu REST-Server - wurde der Service pvm-rest gestartet? " "Wiederholung %(try_num)d von %(max_tries)d nach %(delay)d Sekunden." #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "Sitzungsprotokollierung auf %s" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "Fehler bei der Auswertung eines Sitzungstokens aus der PowerVM-Antwort." #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " Hauptteil= %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "Fehler bei der Auswertung einer Sitzungsdatei aus der PowerVM-Antwort." #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "Die Tokendatei %s enthielt kein lesbares Sitzungstoken." #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "Sitzungsprotokollierung aus %s" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "Fehlerprotokollierung aus. Wird ignoriert." #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "Job muss ein Jobanforderungselement sein" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "In der Jobanforderung fehlt der Name der Operation" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "Pfad=%s ist keine PowerVM-API-Referenz" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "Pfad=%s ist keine PowerVM-API-Referenz" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "Ungültiger Dateideskriptor" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "Erwartete root_id" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "Erwarteter child_type" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "Nicht erwarteter suffix_type=%s" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "Erwartetes suffix_parm" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "Erwartete child_id" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "Nicht erwartete child_id" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "Nicht erwartete root_id" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "Nicht erwartete req_method=%s" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "Fehler beim Parsing der XML-Antwort von PowerVM: %s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "Die Antwort ist kein Atom-Feed/-Eintrag" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "Nicht erwartetes HTTP 204 auf die Anforderung" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "Unerwartet leerer Antworthauptteil" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "Anforderungsheader: %(reqheaders)s\n" "\n" "Anforderungshauptteil: %(reqbody)s\n" "\n" "Antwortheader: %(respheaders)s\n" "\n" "Antworthauptteil: %(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "Atom-Fehler für %(method)s %(path)s: %(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "Die Sitzung darf nicht 'Keine' sein" #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "Es ist bereits ein Ereignislistener in der Sitzung aktiv." #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "Ereignis-Feed-Listener konnte nicht initialisiert werden: %s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "Anwendungs-ID \"%s\" nicht eindeutig" #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "Herunterfahren" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "Dieser Handler ist bereits abonniert" #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "Der Handler muss ein Ereignishandler sein" #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "Handler in Abonnentenliste nicht gefunden" #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "Abschalten des Ereignislisteners für %s" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "Beendigung des Ereignislisteners für %s abgeschlossen" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "Fehler beim Abrufen der PowerVM-Ereignisse: %s. (Ist der Service pvm-rest inaktiv?)" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "Nicht erwarteter Ereignistyp=%s" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "Fehler bei der Verarbeitung der PowerVM-Ereignisse" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "Der entsprechende physische FC-Anschluss für WWPN %(wwpn)s konnte nicht abgeleitet werden. Die" " erweiterten VIOS-Attributgruppen waren möglicherweise unzureichend. Der VIOS-URI" " für die Abfrage lautete %(vio_uri)s." #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "Element nicht gefunden: %(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "LPAR nicht gefunden: %(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "Adapter nicht gefunden" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "Die Operation '%(operation_name)s' ist fehlgeschlagen. %(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "Die Operation '%(operation_name)s' ist fehlgeschlagen. Die Task konnte nicht in" " %(seconds)d Sekunden abgeschlossen werden." #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "Das Betriebssystem auf der virtuellen Maschine %(lpar_nm)s kann nicht heruntergefahren werden, weil " "die RMC-Verbindung nicht aktiv ist." #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Die virtuelle Maschine %(lpar_nm)s konnte nicht ausgeschaltet werden: %(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "Beim Ausschalten der virtuellen Maschine %(lpar_nm)s wurde nach %(timeout)d " "Sekunden das zulässige Zeitlimit überschritten." #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "Die virtuelle Maschine %(lpar_nm)s konnte nicht eingeschaltet werden: %(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "Beim Einschalten der virtuellen Maschine %(lpar_nm)s wurde nach %(timeout)d " "Sekunden das zulässige Zeitlimit überschritten." #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "VLAN %(vlan_id)d kann nicht entfernt werden, da dies die primäre VLAN-ID" " einer anderen Netzbrücke ist." #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "VLAN %(vlan_id)d kann nicht bereitgestellt werden. Es scheint in der " "Einheit '%(dev_name)s' auf dem virtuellen E/A-Server %(vios)s enthalten zu sein. Diese Einheit ist mit keiner" " Netzbrücke (gemeinsam genutzter Ethernet-Adapter) verbunden. Bitte " "entfernen Sie die Einheit manuell oder fügen Sie sie zur Netzbrücke hinzu, bevor " "Sie fortfahren." #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "Eine logische Einheit mit dem Namen %(lu_name)s ist auf dem gemeinsam genutzten " "Speicherpool %(ssp_name)s bereits vorhanden." #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "Es wurde kein physischer Anschluss gefunden, dem ein virtueller Fibre Channel-Port zugeordnet werden könnte. " "Entweder ist ein virtueller E/A-Server nicht verfügbar oder" " die Spezifikation für die physischen Fibre Channel-Ports ist falsch." #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "Die Konsole für die virtuelle Maschine konnte nicht gestartet werden. Die pypowervm-API wird" " in einem nicht lokalen Modus ausgeführt. Die Konsole kann nur dann implementiert werden, wenn " "pypowervm neben der PowerVM-API ausgeführt wird." #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "WrapperTask %(name)s besitzt keine Subtasks!" #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "FeedTask kann keinen leeren Feed besitzen." #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "Das Betriebssystem verweigerte den Zugang auf die Datei %(access_file)s." #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "Das Betriebssystem stieß bei dem Versuch, die Datei %(access_file)s zu lesen, auf einen E/A-Fehler: " "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "Die Migrationstask ist fehlgeschlagen. %(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "Für die VM %(vm_name)s wurde keine Ladequelle gefunden." #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "Die pg83-Codierung für die HDisk %(dev_name)s konnte nicht abgeleitet werden. Die " "Das Attribut parent_entry ist nicht festgelegt. Dies könnte darauf zurückzuführen sein, dass ein physischer Datenträger verwendet wird, " "der über eine nicht unterstützte Kette von Eigenschaften angefordert wurde. " "Auf den physischen Datenträger muss über VIOS.phys_vols, VG.phys_vols oder " "VIOS.scsi_mappings[n].backing_storage zugegriffen werden." #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "Speicherelement der vSCSI-Zuordnung konnte nicht neu zugeordnet werden. Erwartet wurde " "genau eine übereinstimmende Zuordnung, gefunden: %(num_mappings)d." #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "Speicherelement der vSCSI-Zuordnung konnte nicht neu zugeordnet werden. Eine Zuordnung für " "Speicherelement %(stg_name)s zu Client-LPAR %(lpar_uuid)s ist bereits vorhanden." #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "Die Einheit %(devname)s wurde %(count)d-mal gefunden; erwartet wurde, sie höchstens " "einmal zu finden." #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "In FeedTask %(ft_name)s sind mehrere Ausnahmebedingungen aufgetreten:\n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "Erwartet wurde genau eine Managementpartition; gefunden wurden %(count)d." #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "Es wurde erwartet, genau eine Partition mit der ID %(lpar_id)d zu finden; gefunden wurden " "%(count)d." #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "Die Standardschicht im gemeinsam genutzten Speicherpool %(ssp_name)s wurde nicht gefunden." #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "Die Einheit mit UDID %(udid)s wurde auf keinem der virtuellen E/A-Server " "gefunden." #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "Es sind nicht genügend virtuelle E/A-Server zur Unterstützung" " der Einheit mit UDID %(udid)s der virtuellen Maschine vorhanden." #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "Die erwarteten Fabrics (%(fabrics)s) wurden auf keinem der virtuellen " "E/A-Server gefunden." #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "Die virtuelle Maschine kann nicht neu erstellt werden. Sie verwendet den E/A-Typ " "%(io_type)s, der für die VM-Wiederherstellung nicht unterstützt wird." #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "Die Anzahl der VFC-Steckplätze auf dem Zielsystem (%(rebuild_slots)d) stimmt nicht" " mit der Anzahl der Steckplätze auf dem Clientsystem (%(original_slots)d) überein. " "Diese virtuelle Maschine kann auf diesem System nicht neu erstellt werden." #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "Zum Registrieren der Steckplatzinformationen der Netzeinheit ist ein CNA- oder VNIC- " "Adapter erforderlich. Stattdessen wurde Folgendes angegeben: %(wrapper)s." #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "Es sind nicht genügend aktive virtuelle E/A-Server verfügbar. Erwartet " "%(exp)d; gefunden wurden %(act)d." #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "Es ist kein virtueller E/A-Server (VIOS) verfügbar. Es wurde " "%(wait_time)d Sekunden darauf gewartet, dass ein VIOS aktiv wird. Überprüfen Sie die " "RMC-Konnektivität zwischen den PowerVM-NovaLink- und den virtuellen E/A-Servern." #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "Es wurden keine aktiven SR-IOV-Adapter im SR-IOV-Modus gefunden.\n" "Position | Modus | Status\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "Die Redundanzanforderung von %(red)d konnte nicht erfüllt werden. %(found_vfs)d" " funktionsfähige Sicherungseinheit(en) wurde(n) gefunden." #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "Das verwaltete System unterstützt vNICs nicht." #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "Es gibt keine aktiven virtuellen E/A-Server, die vNICs unterstützen." #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "Es wurde eine Redundanz von %(red)d angegeben, das verwaltete System" " unterstützt aber vNIC-Failover nicht." #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "Es wurde eine Redundanz von %(red)d angegeben, es gibt aber keine aktiven " "virtuellen E/A-Server, die vNIC-Failover unterstützen." #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "Die Datenträgergruppe %(vol_grp)s, in der die virtuellen " "optischen Medien gespeichert werden sollen, wurde nicht gefunden. Das Medienrepository konnte nicht erstellt werden." #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "Die Aktualisierung des verwalteten Systems wurde nicht gestartet, da Änderungen" " an einem oder mehreren physischen SR-IOV-Anschlüssen, die durch vNICs verwendet werden, angefordert wurden.\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "Ein VNC-basiertes virtuelles Terminal kann nicht erstellt werden: %(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "Der Adaptercache wird nicht unterstützt." #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "Ungültiger Wert '%(value)s' für '%(enum)s'. Gültige Werte sind: " "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "Kein VIOS mit dem Namen %(vios_name)s gefunden." #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "Keine Datenträgergruppe mit dem Namen %(vg_name)s gefunden." #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "Die Partition mit dem Namen %(part_name)s ist keine IBM i-Partition." #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "Partitionsargument der Funktion PanelJob ist leer." #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "Anzeigefunktionsoperation %(op_name)s ist ungültig. Eine von %(valid_ops)s " "erwartet." #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "ISCSI-Erkennung für VIOS %(vios_uuid)s fehlgeschlagen. Rückgabecode: %(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "ISCSI-Abmeldung für VIOS %(vios_uuid)s fehlgeschlagen. Rückgabecode: %(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "ISCSI entfernen für VIOS %(vios_uuid)s fehlgeschlagen. Rückgabecode: %(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "Vstor %(stor_udid)s für VIOS %(vios_uuid)s nicht gefunden." #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "Die vorgeschlagene erweiterte Attributgruppe '%(arg_xag)s' passt nicht zur vorhandenen " "erweiterten Attributgruppe '%(path_xag)s'" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "Das Zertifikat ist abgelaufen." #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "Präfix und Suffix dürfen zusammen nicht mehr als %d Zeichen enthalten." #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "Gesamtlänge muss mindestens 1 Zeichen sein." #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "Der Namensparameter muss mindestens ein Zeichen lang sein." #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "Wenn trunk_ok den Wert False hat, darf der Namensparameter nicht länger als %d Zeichen sein." #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "Entwicklerfehler: partiell übergeordnete Spezifikation." #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "Entwicklerfehler: parent_type muss entweder ein Zeichenfolgenschematyp oder " "eine Wrapper-Unterklasse sein." #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "Wert '%(bad_val)s' ungültig. Erwartet wurde einer der Werte %(good_vals)s oder eine Liste." #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "ANFORDERUNG: %s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "ANTWORT: %s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "Warten auf das Beenden der in Bearbeitung befindlichen Uploads. LU-Marker: %s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "Aufgeben zugunsten des gerade stattfindenden Uploads." #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "Aufgeben des Uploads zugunsten des Markers %s." #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "Verwenden bereits hochgeladener LU-Bilder %s." #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "Erstellen von LU-Markern %s" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "Upload zur LU-Grafik %(lu)s (Marker %(mkr)s)." #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "Entfernen fehlgeschlagen LU %s." #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "Der virtuelle Switch %s wurde auf dem System nicht gefunden." #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "Es wurde kein gültiges VLAN für den virtuellen Switch %s gefunden." #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "Fehler beim Abrufen der Hostspeicherüberkapazität für Host mit UUID '%(host)s': " "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "Zeitlimitüberschreitung beim Warten darauf, dass der RMC-Status aller eingeschalteten virtuellen " "E/A-Server aktiv wird. Wartezeit war: %(time)d Sekunden. Virtuelle E/A-Server, die " "nicht aktiv geworden sind: %(vioses)s." #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "Es wird angenommen, dass der Steckplatz ohne Beschreibung ein physischer Ein-/Ausgang ist: %s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "Partition %s ist bereits eingeschaltet." #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "Partition %s ist bereits ausgeschaltet." #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "Die Angabe von add_parms als Wörterverzeichnis wird nicht weiter unterstützt. Geben Sie stattdessen eine %s- " "Instanz an." #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "Normale Beendigung des Betriebssystems IBMi fehlgeschlagen. Es wird versucht, das Betriebssystem sofort zu beenden. " "Partition: %s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Sofortige Beendigung des Betriebssystems IBMi fehlgeschlagen. Es wird versucht, VSP normal zu beenden. " "Partition: %s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "Sofortige Beendigung eines anderen Betriebssystems als IBMi hat das zulässige Zeitlimit überschritten. Es wird versucht, die Beendigung von VSP zu erzwingen. " "Partition: %s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "Sofortige Beendigung eines anderen Betriebssystems als IBMi fehlgeschlagen. Es wird versucht, VSP normal zu beenden. " "Partition: %s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "Erzwungene Beendigung von VSP mit Standardzeitlimit. Partition: %s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "Neuer Versuch zur Änderung der SCSI-Zuordnung." #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Vorhandene Zuordnung des %(stg_type)s-Speicherelements %(stg_name)s vom " "virtuellen E/A-Server %(vios_name)s zur Client-LPAR %(lpar_uuid)s gefunden." #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Erstellen der Zuordnung des %(stg_type)s-Speicherelements %(stg_name)s vom " "virtuellen E/A-Server %(vios_name)s zur Client-LPAR %(lpar_uuid)s gefunden." #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "match_func und stg_elem dürfen nicht gleichzeitig angegeben werden." #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "Die Methode register_cna ist veraltet! Verwenden Sie die Methode " "register_vnet." #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "Die Methode drop_cna ist veraltet! Verwenden Sie die Methode drop_vnet." #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "Der physische SR-IOV-Anschluss bei Position %(loc_code)s sichert einen vNIC, der" " zu LPAR %(lpar_name)s gehört (LPAR-UUID: %(lpar_uuid)s; vNIC-UUID: " "%(vnic_uuid)s)." #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "Vornehmen von Änderungen an den Bezeichnungen der folgenden physischen SR-IOV-Anschlüsse, obwohl " "sie durch vNICs verwendet werden:" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "vio_file mit UUID %s konnte nicht gelöscht werden. Sie muss manuell gelöscht werden." #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "Beim Hochladen ist ein Problem aufgetreten. Wird wiederholt." #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "Die Methode crt_lu_linked_clone ist veraltet! Verwenden Sie die Methode crt_lu " "(Klon=src_lu, Größe=lu_size_gb)." #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "Die logische Einheit %(luname)s als Platte hat keine Sicherungsimage-LU. (UDID: %(udid)s) " #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "Neue virtuelle Platte beim Hochladen der Datei nicht gefunden." #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "Die Einheit wird ignoriert, weil sie keine UDID hat:\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "Die Einheit %s wurde in der Liste nicht gefunden." #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "Löschen der virtuellen Platte %(vdisk)s aus Datenträgergruppe %(vg)s" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "Löschen der virtuellen optischen Einheit %(vopt)s aus Datenträgergruppe %(vg)s" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "Entfernen der LU %(lu_name)s (UDID %(lu_udid)s)" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "Die LU %(lu_name)s wurde nicht gefunden – sie könnte aus dem Bereich gelöscht worden sein. " "(UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "Image-LU %(lu_name)s wird entfernt, weil sie nicht mehr im Gebrauch ist. (UDID: " "%(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "Logische Sicherungseinheit %(lu_name)s wurde nicht gefunden. (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "Entwicklerfehler: Es ist entweder eine Ebene oder ein LU-Feed erforderlich." #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "Entwicklerfehler: Der LU-Feed-Parameter muss EntryWrapper LUEnt umfassen." #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "Löschen der LU %(lu_name)s (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "Der HTTP-Fehler wird ignoriert, da LU %(lu_name)s aus dem Bereich gelöscht worden sein könnte." " (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "Entfernen von %(num_maps)d verwaisten %(stg_type)s Zuordnungen vom virtuellen E/A-Server " "%(vios_name)s." #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "Es werden %(num_maps)d portlose VFC-Zuordnungen von VIOS entfernt %(vios_name)s." #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "Entfernen von %(num_maps)d %(stg_type)s Zuordnungen, die mit LPAR ID " "%(lpar_id)d verbunden sind, vom virtuellen E/A-Server %(vios_name)s." #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "Der Speicher %(stg_name)s vom Typ %(stg_type)s wird nicht entfernt, weil nicht festgestellt werden kann, " "ob er noch in Gebrauch ist. " "Es könnten manuelle Verifizierung und Bereinigung notwendig sein." #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "Die Speicherbereinigung ignoriert das Speicherelement %(stg_name)s, weil es von einem " "nicht erwarteten Typ %(stg_type)s ist." #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "Bereinigen der folgenden %(vdcount)d virtuellen Platten aus VIOS %(vios)s: " "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "Bereinigen der folgenden %(vocount)d virtuellen optischen Einheiten aus VIOS %(vios)s: " "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "Überspringen der Bereinigung von %(stg_type)s Zuordnungen aus dem virtuellen E/A-Server %(vios_name)s für die " "folgenden LPAR-IDs, da diese LPARs vorhanden sind: %(lpar_ids)s" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "Ein entsprechender virtueller E/A-Server wurde nicht gefunden. Die bereitgestellten Nutzdaten waren wahrscheinlich " "unzureichend. Die Nutzdaten lauten:\n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "Die gefundene VFC-Port-Zuordnung besitzt keine sichernde Anschlussgruppe. Hinzufügen von %(port)s zur " "Zuordnung für Client-WWPNs: %(wwpns)s" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "Bei der Abfrage des virtuellen optischen Medienrepositorys ist ein Fehler aufgetreten. " "Es wird versucht, die Verbindung mit einem virtuellen optischen Medienrepository " "zu erstellen." #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "vterm konnte nicht geschlossen werden." #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "Ungültige Ausgabe bei geöffnetem vterm. Es wird versucht, das vterm zurückzusetzen. Fehler war: %s" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "VNC-Socket-Listener empfangsbereit an IP=%(ip)s Port=%(port)s" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "Fehler beim Aushandeln von SSL für VNC-Repeater: %s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "Erkennung der HDisk fehlgeschlagen. Der alte Speicher für LPAR-IDs %s wird bereinigt und " "der Vorgang wird wiederholt." #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "LUA-Wiederherstellung erfolgreich. Gefundene Einheit: %s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "ITL-Fehler aufgetreten: %s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "Einheit %s ist aktuell in Gebrauch." #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "Einheit %s mit unbekannter UDID erkannt." #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "Die folgende Einheit konnte nicht erkannt werden: %s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "CLIRunner-Fehler: %s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "QUERY_INVENTORY LUARecovery Job erfolgreich, aber das Ergebnis enthält weder " "OutputXML noch eine Standardausgabe." #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORY erstellte ungültigen XML-Datenblock (%(chunk)s). Fehler: %(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "Es konnte kein pg83-Deskriptor in der XML-Ausgabe gefunden werden:\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "Der ISCSI-Befehl wurde erfolgreich abgeschlossen" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "Die ISCSI-Sitzung ist bereits vorhanden und angemeldet" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "Der ISCSI-Befehl wurde auf dem nicht unterstützten VIOS, Host, ausgeführt." #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "Bei der ISCSI-Erkennung wurden in der ODM-Datenbank veraltete Einträge gefunden." #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "Die ISCSI-Sitzung konnte nicht gefunden werden " #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "Es wurden keine Datensätze/Ziele/Sitzungen/Portale zum Ausführen der Operation gefunden" #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "Der ISCSI-Befehl ist mit dem internen Fehlerstatus %s fehlgeschlagen" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "Generischer ISCSI-Fehlercode" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "Fehler bei der Anmeldung der ISCSI-Sitzung" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "Ungültige Argumente im ISCSI-Befehl" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "Beim Versuch, eine Verbindung herzustellen, ist der ISCSI-Verbindungszeitgeber abgelaufen." #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "Der ISCSI-Befehl konnte den Host nicht finden" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "Der ISCSI-Befehl hat einen unerwarteten Status zurückgegeben = %s" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "Der ISCSI-Befehl wurde auf einem nicht unterstützten VIOS ausgeführt " #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "Bereinigen Sie den alten Speicher für LPAR-IDs %s und wiederholen Sie die iSCSI-Erkennung." #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "Die Messdaten sind nicht verfügbar. Dies könnte daran liegen, dass die Metriken " "vor Kurzem initialisiert wurden." #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "Dies ist ein Test" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "Dies ist eine Nachricht, für die keine Übersetzung vorhanden ist" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "Der Prozessoreinheitenfaktor muss zwischen 0,05 und 1,0 liegen. Wert: %s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "Der Name der logischen Partition hat eine ungültige Länge. Name: %s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "Feld '%(field)s' hat einen ungültigen Wert: '%(value)s'" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "Kein Wert ist ungültig." #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "Wert '%(value)s' ist nicht gültig für Feld '%(field)s' mit zulässigen " "Auswahlmöglichkeiten: %(choices)s" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "Feld '%(field)s' hat einen Wert, der kleiner als das Minimum ist. Wert: %(value)s; " "Minimum: %(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "Feld '%(field)s' hat einen Wert, der größer als das Maximum ist. Wert: %(value)s; " "Maximum: %(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "'%(desired_field)s' hat einen Wert, der über dem Wert '%(max_field)s' liegt. " "Gewünscht: %(desired)s Maximum: %(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "'%(desired_field)s' hat einen Wert, der unter dem Wert '%(min_field)s' liegt. " "Gewünscht: %(desired)s Minimum: %(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "Der Speicherwert ist kein Vielfaches der Blockgröße des logischen Hauptspeichers " "(%(lmb_size)s) im Host. Wert: %(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "Das verwaltete System unterstützt Active Memory Expansion nicht. Die " "Der Wert für den Erweiterungsfaktor '%(value)s' ist nicht gültig." #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "Der Wert für die Active Memory Expansion muss größer oder gleich 1.0 sein und kleiner oder gleich 10.0. " "Ein Wert von 0 ist ebenso gültig und zeigt an," " dass AME ausgeschaltet ist. '%(value)s' ist ungültig." #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "Versuch %(retry)d von insgesamt %(total)d für URI %(uri)s. Der Fehler war ein bekannter " "Wiederholungsantwortcode: %(resp_code)s" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "Versuch %(retry)d von %(total)d ist fehlgeschlagen. Wird wiederholt. Ausnahmebedingung:\n" " %(except)s." #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "EntryWrapper oder EntryWrapperGetter muss angegeben werden." #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "Es muss eine gültige Subtask angegeben werden." #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "Doppelter 'provides'-Name %s." #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %s besitzt keine Subtasks. Ausführung einer Nulloperation." #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "Es muss entweder eine Liste der EntryWrapper oder ein FeedGetter angegeben werden." #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %s besitzt keine Subtasks. Ausführung einer Nulloperation." #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "In FeedTask %s sind mehrere Ausnahmebedingungen aufgetreten. Sie sind unten einzeln" " protokolliert." #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "Zu wenig %(res_name)s auf dem Host für die virtuelle Maschine " "'%(instance_name)s' verfügbar (%(requested)s angefordert, %(avail)s verfügbar)" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "Speicher" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "Die virtuelle Maschine muss ausgeschaltet werden, bevor die minimale oder " "maximale Speicherkapazität geändert wird. Schalten Sie die virtuelle Maschine %s aus und versuchen Sie es erneut." #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "Die virtuelle Maschine muss ausgeschaltet werden, bevor der Erweiterungsfaktor geändert " "wird. Schalten Sie die virtuelle Maschine %s aus und versuchen Sie es erneut." #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "CPUs" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "Verarbeitungseinheiten" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "Die virtuelle Maschine muss ausgeschaltet werden, bevor die minimale oder " "maximale Anzahl von Prozessoren geändert wird. Schalten Sie die virtuelle Maschine %s aus und versuchen Sie es erneut." #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "Die virtuelle Maschine muss ausgeschaltet werden, bevor die minimale oder " "maximale Anzahl von Prozessoreinheiten geändert wird. Schalten Sie die virtuelle Maschine %s aus und versuchen Sie es erneut." #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "Die virtuelle Maschine muss ausgeschaltet werden, bevor der " "Prozessorkompatibilitätsmodus geändert wird. Schalten Sie die virtuelle Maschine %s aus und versuchen Sie es erneut." #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "Die virtuelle Maschine muss zuerst ausgeschaltet werden, bevor der " "Verarbeitungsmodus geändert wird. Schalten Sie die virtuelle Maschine %s aus und versuchen Sie es erneut." #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "Die gewünschte Anzahl an Prozessoren (%(vcpus)d) darf nicht größer sein als die maximal zulässige Anzahl " "Prozessoren pro Partition (%(max_allowed)d) für die virtuelle Maschine " "'%(instance_name)s'." #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "Die maximale Anzahl an Prozessoren (%(vcpus)d) darf nicht größer sein als der Grenzwert " "für die maximale Systemprozessorkapazität (%(max_allowed)d) für die virtuelle Maschine " "'%(instance_name)s'." #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "Die virtuelle Maschine muss ausgeschaltet werden, bevor die Funktion für den vereinfachten " "Neustart von einem fernen System geändert wird. Schalten Sie die virtuelle Maschine %s aus und versuchen Sie es erneut." #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "Partition besitzt keine aktive RMC-Verbindung." #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "Partition besitzt keine aktive DLPAR-Funktionalität für %s." #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "E/A" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "Speicher" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "Prozessoren" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "Ungültige KeylockPos '%s'." #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "Ungültiger Bootmodus '%s'." #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "Der IOSlot.adapter wird nicht mehr verwendet! Verwenden Sie stattdessen den IOSlot.io_adapter." #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "Für die Master-Managementkonsole konnte der MTMS-Wert (Maschinentyp, Modell, " "(Maschinentyp, Modell, Seriennummer) von %(identifier)s konnte nicht festgelegt werden, weil kein %(param)s" " Hauptkonsole für den Pool markiert war." #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "UUID kann nicht festgelegt werden." #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "%(property_name)s='%(value)s' konnte nicht in Objekt %(pvmobject)s konvertiert werden" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "Verweigerung der href-Festlegung für mehrere Links.\n" "Pfad: %{path}s\n" "Anzahl der gefundenen Links: %{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "Verweigerung der Erstellung und des Einschlusses eines Elements ohne Tag." #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "'entry'-Eigenschaft in Antwort fehlt." #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "Es muss eine Antwort oder ein Eintrag für den Einschluss angegeben werden. Erhalten wurde %s." #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "Entwicklerfehler: Geben Sie 'übergeordnet' oder ('parent_type' und 'parent_uuid') an," " um ein UNTERGEORDNETES Objekt abzurufen." #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "Geben Sie entweder 'uuid' oder 'root_id' an, wenn Sie ein Rootobjekt anfordern." #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "Sowohl parent_type als auch parent_uuid sind erforderlich, " "wenn Sie einen untergeordneten Feed oder Eintrag abrufen." #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "Geben Sie die übergeordnete UUID über den Parameter parent_uuid an." #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "Geben Sie entweder 'uuid' oder 'child_id' an, wenn Sie ein untergeordnetes Objekt anfordern." #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "Übergeordnete UUID ohne übergeordneten Typ angegeben." #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "Die Methode search() erfordert genau ein Argument im Format Schlüssel=Wert." #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "Wrapperklasse %(class)s unterstützt Suchkriterium '%(key)s' nicht." #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "Der Parameter 'xag' für EntryWrapper.update wird nicht mehr verwendet! Im günstigsten Fall" " resultiert er in einer Nulloperation. Im schlimmsten Fall gibt er Ihnen irreparable ETag- " "Nichtübereinstimmungsfehler an." #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "Kein solches untergeordnetes Element vorhanden." #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "UUID für Wrapper ohne Metadaten kann nicht festgelegt werden." #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "UUID-Wert ungültig: %s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "Eine Wrapper-Unterklasse muss angegeben werden." #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "Es muss entweder übergeordnete Klasse und übergeordnete UUID oder keines von beiden angegeben werden." #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "Überwachung von Job %(job_id)s für %(time)i Sekunden." #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "Ausgeben einer Abbruchanforderung für Job %(job_id)s. Wird den Job unendlich oft" " auf Beendigung abfragen." #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "Job %s nicht gelöscht. Job ist im Laufstatus." #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "LPAR ist nicht im aktiven Status." #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "Zielsystem besitzt nicht die Funktionalität der IBM i-LPAR-Mobilität." #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "IBM i-LPAR besitzt keine eingeschränkte E/A." #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "Quellensystem besitzt nicht die Funktionalität der IBM i-LPAR-Mobilität." #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "LPAR besitzt keine aktive RMC-Verbindung." #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "LPAR ist die Managementpartition" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "LPAR ist für LPM nicht verfügbar, da die DLPAR-Funktionalität fehlt." #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "Dies ist nicht die Eigenschaft, die Sie suchen. Verwenden Sie srr_enabled in einer " "NovaLink-Umgebung." #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "Ungültige IPLSrc '%s'." #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "Diese Eigenschaft ist veraltet! Verwenden Sie stattdessen pci_subsys_dev_id." #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "Diese Eigenschaft ist veraltet! Verwenden Sie stattdessen pci_rev_id." #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "Diese Eigenschaft ist veraltet! Verwenden Sie stattdessen pci_subsys_vendor_id." #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "Diese Eigenschaft ist veraltet! Verwenden Sie stattdessen drc_index." #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "Diese Eigenschaft ist veraltet! Verwenden Sie stattdessen drc_name." #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "Ungültige übergeordnete Spezifikation für CNA.create." #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PV hatte einen verschlüsselten pg83-Deskriptor \"%(pg83_raw)s\", konnte ihn aber nicht entschlüsseln " "(%(type_error)s)." #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "Die Eigenschaft 'xags' der VIOS-EntryWrapper-Klasse wird nicht mehr verwendet!" " Bitte verwenden Sie stattdessen die Werte von pypowervm.const.XAG." #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "Partition des VIOS-Typs unterstützt LPM nicht" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "LUA der Zieleinheit kann nicht ohne Sicherungsspeichereinheit angegeben werden!" # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/locale/es/0000775000175000017500000000000013571367172017167 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/es/pypowervm.po0000664000175000017500000020113613571367171021601 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=2; plural=n != 1;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "Protocolo no válido \"%s\"" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "Comunicación sin cifrar con PowerVM. Revierta la configuración a https." #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "Se ha encontrado un error en el cálculo del memento de auditoría predeterminado, se va a utilizar 'default'." #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "La autenticación local no está soportada en la HMC." #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "Configurando el escucha de sucesos para %s" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "Descriptor de archivo inesperado en la solicitud %s." #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "Error inesperado para %(meth)s %(url)s." #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "Error inesperado %(class)s para %(method)s %(url)s: %(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "El nuevo inicio de sesión se ha considerado inseguro. Esta instancia de sesión no debería " "utilizarse más." #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "Intentando volver a iniciar sesión %s" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "401 de reinicio de sesión, cuerpo de respuesta:\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "Se ha encontrado un error en el reinicio de sesión, cuerpo de respuesta:\n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "Se ha encontrado un error en el reinicio de sesión:\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "Se ha encontrado un error en el reinicio de sesión con otro 401, cuerpo de respuesta:\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "respuesta HTTP 401 sospechosa para %(method)s %(path)s: la señal es completamente nueva" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "No se ha podido establecer la conexión con el servidor REST - ¿se ha iniciado el servicio pvm-rest? " "Se están reintentando %(try_num)d de %(max_tries)d después de %(delay)d segundos." #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "Iniciando sesión en %s" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "Se ha encontrado un error en el análisis de una señal de sesión procedente de la respuesta de PowerVM." #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " Cuerpo = %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "Se ha encontrado un error en el análisis de una vía de acceso de archivo de sesión procedente de la respuesta de PowerVM." #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "El archivo de señal %s no contenía ninguna señal de sesión legible." #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "Cerrando sesión de %s" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "Problema al cerrar la sesión. Se ignorará." #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "el trabajo debe ser un elemento JobRequest" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "a JobRequest le falta OperationName" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "la vía de acceso=%s no es una referencia de API de PowerVM" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "la vía de acceso=%s no es una referencia de API de PowerVM" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "Descriptor de archivo no válido" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "root_id esperado" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "child_type esperado" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "suffix_type inesperado=%s" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "suffix_parm esperado" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "child_id esperado" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "child_id inesperado" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "root_id inesperado" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "req_method inesperado=%s" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "Error en el análisis de la respuesta XML procedente de PowerVM: %s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "La respuesta no es un feed ni una entrada Atom." #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "HTTP 204 inesperado para la solicitud" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "Cuerpo de respuesta vacío de forma inesperada" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "cabeceras de solicitud: %(reqheaders)s\n" "\n" "cuerpo de solicitud: %(reqbody)s\n" "\n" "cabeceras de respuesta: %(respheaders)s\n" "\n" "cuerpo de respuesta: %(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "Error Atom para %(method)s %(path)s: %(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "La sesión no debe ser None (ninguna)." #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "Ya hay un escucha de sucesos activo en la sesión." #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "Se ha encontrado un error en la inicialización del escucha de feeds de sucesos: %s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "El ID de aplicación \"%s\" no es exclusivo." #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "Concluyendo" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "Este manejador ya está suscrito." #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "El manejador debe ser un EventHandler." #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "No se ha encontrado el manejador en la lista de suscriptores." #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "Cerrando EventListener de %s" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "Se ha completado el cierre de EventListener para %s" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "Error al obtener sucesos de PowerVM: %s. (¿El servicio pvm-rest está inactivo?)" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "EventType inesperado=%s" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "Error al procesar los sucesos de PowerVM." #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "No se puede derivar el puerto de canal de fibra físico apropiado para WWPN %(wwpn)s. Es posible que" " los grupos de atributos ampliados del VIOS no fueran suficientes. El URI de VIOS" " para la consulta ha sido %(vio_uri)s." #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "No se ha encontrado el elemento: %(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "No se ha encontrado la LPAR: %(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "No se ha encontrado el adaptador" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "La operación '%(operation_name)s' ha fallado. %(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "La operación '%(operation_name)s' ha fallado. No se ha podido completar la tarea en" " %(seconds)d segundos." #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "No se puede ejecutar la conclusión del sistema operativo en la máquina virtual %(lpar_nm)s porque su " "conexión RMC no está activa." #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "No se ha podido apagar la máquina virtual %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "El apagado de la máquina virtual %(lpar_nm)s ha agotado el tiempo de espera después de %(timeout)d " "segundos." #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "No se ha podido encender la máquina virtual %(lpar_nm)s: %(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "El encendido de la máquina virtual %(lpar_nm)s ha agotado el tiempo de espera después de %(timeout)d " "segundos." #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "No se puede eliminar la VLAN %(vlan_id)d ya que es el identificador de VLAN primaria" " en otro puente de red." #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "No se puede suministrar la VLAN %(vlan_id)d. Parece ser que está contenida en " "el dispositivo '%(dev_name)s' en el servidor de E/S virtual %(vios)s. Dicho dispositivo no está" " conectado a ningún puente de red (adaptador Ethernet compartido). Elimine " "manualmente el dispositivo o añádalo al puente de red antes de " "continuar." #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "Ya existe una unidad lógica con el nombre %(lu_name)s en la agrupación de " "almacenamiento compartido %(ssp_name)s." #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "No se puede encontrar un puerto físico con el que correlacionar un puerto de canal de fibra virtual. " "Esto se debe a que el servidor de E/S virtual no está disponible, o a una especificación de puerto" " incorrecta para los puertos de canal de fibra físicos." #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "No se puede iniciar la consola en la máquina virtual. La API pypowervm se" " ejecuta en una modalidad no local. La consola solo se puede desplegar cuando " "pypowervm está ubicado conjuntamente con la API PowerVM." #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "WrapperTask %(name)s no tiene ninguna subtarea." #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "FeedTask no puede tener un canal de información vacío." #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "El sistema operativo ha denegado el acceso al archivo %(access_file)s." #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "El sistema operativo ha encontrado un error de E/S al intentar leer el archivo %(access_file)s: " "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "La tarea de migración ha fallado. %(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "No se ha encontrado ningún origen de carga para la máquina virtual %(vm_name)s" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "No se puede derivar la codificación pg83 para el hdisk %(dev_name)s. El " "atributo parent_entry no está definido. Puede deberse a la utilización de un volumen físico " "obtenido a través de una cadena de propiedades no soportada. El acceso al PV debe realizarse " "a través de VIOS.phys_vols, VG.phys_vols o " "VIOS.scsi_mappings[n].backing_storage." #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "No se puede volver a correlacionar el elemento de almacenamiento de la correlación vSCSI. Se esperaba encontrar " "exactamente una correlación coincidente, pero se han encontrado %(num_mappings)d." #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "No se puede volver a correlacionar el elemento de almacenamiento de la correlación vSCSI. Ya existe una correlación " "del elemento de almacenamiento %(stg_name)s con la LPAR de cliente %(lpar_uuid)s." #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "El dispositivo %(devname)s se ha encontrado %(count)d veces; se esperaba encontrarlo como máximo " "una vez." #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "FeedTask %(ft_name)s ha experimentado varias excepciones:\n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "Se esperaba encontrar una única partición de gestión; se han encontrado %(count)d." #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "Se esperaba encontrar exactamente una partición con el ID %(lpar_id)d; se han encontrado " "%(count)d." #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "No se ha podido encontrar el nivel predeterminado en la agrupación de almacenamiento compartido %(ssp_name)s." #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "El dispositivo con UDID %(udid)s no se ha encontrado en ningún servidor de E/S " "virtuales." #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "No hay suficientes servidores de E/S virtuales para dar soporte al dispositivo de la máquina" " virtual con UDID %(udid)s." #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "Los entramados esperados (%(fabrics)s) no se han encontrado en ninguno de los servidores de " "E/S virtuales." #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "No se puede recrear la máquina virtual. Está utilizando un tipo de entrada/salida de " "%(io_type)s, que no se admite para la recreación de la máquina virtual." #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "El número de ranuras de VFC en el sistema de destino (%(rebuild_slots)d) no coincide" " con el número de ranuras en el sistema cliente (%(original_slots)d). " "No se puede recrear esta máquina virtual en este sistema." #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "Para registrar la información de la ranura del dispositivo de red, se necesita un adaptador " "CNA o VNIC. En su lugar, se ha indicado lo siguiente: %(wrapper)s." #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "No hay suficientes servidores de E/S virtuales activos disponibles. Se esperaban " "%(exp)d; y se han encontrado %(act)d." #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "No hay servidores de E/S virtuales disponibles. Se ha intentado esperar a que un VIOS pasara " "a estar activo durante %(wait_time)d segundos. Compruebe la conectividad " "RMC entre NovaLink de PowerVM y los servidores de E/S virtuales." #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "No se pueden encontrar adaptadores SR-IOV en modalidad Sriov y estado En ejecución.\n" "Ubicación | Modalidad | Estado\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "No se puede cumplir el requisito de redundancia de %(red)d. Se ha(n) encontrado %(found_vfs)d" " dispositivo(s) de respaldo viable(s)." #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "El sistema gestionado no admite vNIC." #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "No hay servidores de E/S virtuales activos que admitan vNIC." #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "Se ha especificado una redundancia de %(red)d, pero el sistema gestionado no admite" " la migración tras error de vNIC." #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "Se ha especificado una redundancia de %(red)d, pero no hay VIOS activos " "que admitan la migración tras error de vNIC." #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "No se puede ubicar el grupo de volúmenes %(vol_grp)s en el que almacenar " "el soporte óptico virtual. No se puede crear el repositorio de soportes." #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "La actualización de ManagedSystem no se ha intentado porque se han solicitado cambios" " a uno o varios puertos físicos SR-IOV que están siendo utilizados por vNIC.\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "No se puede crear el terminal virtual basado en VNC: %(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "La caché del adaptador no es compatible." #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "Valor no válido '%(value)s' para '%(enum)s'. Los valores válidos son: " "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "No se ha encontrado ningún VIOS con el nombre %(vios_name)s." #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "No se ha encontrado ningún grupo de volúmenes con el nombre %(vg_name)s." #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "La partición con el nombre %(part_name)s no es una partición de IBMi." #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "El argumento de partición de función PanelJob está vacío." #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "La operación de función de panel %(op_name)s no es válida. Se esperaba uno de %(valid_ops)s. " "" #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "El descubrimiento de ISCSI ha fallado para el VIOS %(vios_uuid)s. Código de retorno: %(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "El cierre de sesión de ISCSI ha fallado para el VIOS %(vios_uuid)s. Código de retorno: %(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "La eliminación de ISCSI ha fallado para el VIOS %(vios_uuid)s. Código de retorno: %(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "No se ha encontrado Vstor %(stor_udid)s para VIOS %(vios_uuid)s." #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "El grupo de atributos ampliados propuesto '%(arg_xag)s' no coincide con el grupo de " "atributos ampliados existente '%(path_xag)s'" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "El certificado ha caducado." #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "El prefijo y sufijo juntos no pueden tener más de %d caracteres." #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "La longitud total debe ser al menos de 1 carácter." #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "El parámetro de nombre debe tener al menos un carácter de longitud." #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "El parámetro de nombre no debe sobrepasar de %d caracteres cuando trunk_ok es False." #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "Error de desarrollador: especificación de padre parcial." #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "Error de desarrollador: parent_type puede ser un tipo de esquema de serie o una " "subclase de derivador." #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "Valor no válido '%(bad_val)s'. Se esperaba uno de %(good_vals)s o una lista." #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "SOLICITUD: %s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "RESPUESTA: %s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "Esperando a que se completen las subidas en curso. LU(s) de marcador: %s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "Renunciando en favor de la cargar en curso." #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "Renunciando a la carga en favor del marcador %s." #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "Utilizando LU de imagen ya cargada %s." #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "Creando LU de marcador %s" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "Cargando en LU de imagen %(lu)s (marcador %(mkr)s)." #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "Eliminando LU con fallos %s." #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "No se puede encontrar el conmutador virtual %s en el sistema." #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "No se ha podido encontrar una VLAN válida para el conmutador virtual %s." #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "Error al obtener la sobrecarga de memoria de host para el host con el UUID '%(host)s': " "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "Se ha agotado el tiempo de espera esperando a que el estado de RMC de todos los servidores de E/S virtuales " "encendidos pasara a ser activo. El tiempo de espera era: %(time)d segundos. Los VIOS que no se han " "activado son: %(vioses)s." #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "Se considera que la ranura sin descripción es una entrada/salida física: %s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "La partición %s ya está encendida." #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "La partición %s ya está apagada." #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "La especificación de add_parms como dict ya no se utiliza. Especifique una instancia de %s " "en su lugar." #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "La conclusión normal del sistema operativo de IBMi ha fallado. Se está intentando efectuar una conclusión inmediata del sistema operativo. " "Partición: %s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "La conclusión inmediata del sistema operativo de IBMi ha fallado. Se está intentando efectuar una conclusión normal del procesador de servicio virtual. " "Partición: %s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "La conclusión inmediata del sistema operativo que no es de IBMi ha agotado el tiempo de espera. Se está intentando efectuar una conclusión forzada del procesador de servicio virtual. " "Partición: %s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "La conclusión inmediata del sistema operativo que no es de IBMi ha fallado. Se está intentando efectuar una conclusión normal del procesador de servicio virtual. " "Partición: %s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "Conclusión forzada del procesador de servicio virtual con tiempo de espera predeterminado. Partición: %s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "Se está reintentando la modificación de la correlación SCSI." #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Se ha encontrado la correlación existente del elemento de almacenamiento de %(stg_type)s %(stg_name)s del " "servidor de E/S virtual %(vios_name)s a la LPAR de cliente %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Creando correlación del elemento de almacenamiento %(stg_type)s %(stg_name)s del " "servidor de E/S virtual %(vios_name)s a la LPAR de cliente %(lpar_uuid)s." #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "No se debe especificar match_func y stg_elem simultáneamente." #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "El método register_cna ya no se utiliza. Utilice el método " "register_vnet." #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "El método drop_cna ya no se utiliza. Utilice el método drop_vnet." #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "El puerto físico SR-IOV en la ubicación %(loc_code)s respalda a un vNIC que pertenece a" " la partición lógica %(lpar_name)s (UUID de LPAR: %(lpar_uuid)s; UUID de vNIC: " "%(vnic_uuid)s)." #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "Se están realizando cambios en las siguientes etiquetas de puerto físico SR-IOV aunque " "algunos vNIC las están utilizando:" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "No se ha podido suprimir vio_file con UUID %s. Es necesario suprimirlo a mano." #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "Se ha encontrado un error al cargar. Se volverá a intentar." #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "El método crt_lu_linked_clone ya no se utiliza. Utilice el método crt_lu " "(clone=src_lu, size=lu_size_gb)." #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "La unidad lógica de disco %(luname)s no tiene LU de imagen de refuerzo. (UDID: %(udid)s) " #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "No se puede localizar el nuevo vDisk durante la carga de archivo." #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "El dispositivo se ignorará porque carece de UDID:\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "El dispositivo %s no se ha encontrado en la lista." #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "Se está suprimiendo el disco virtual %(vdisk)s del grupo de volúmenes %(vg)s" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "Se está suprimiendo el dispositivo óptico virtual %(vopt)s del grupo de volúmenes %(vg)s" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "Eliminando LU %(lu_name)s (UDID %(lu_udid)s)" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "La LU %(lu_name)s no se ha encontrado: podrían haber suprimido fuera de banda. " "(UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "Eliminando la LU de imagen %(lu_name)s porque ya no está en uso. (UDID: " "%(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "No se ha encontrado la unidad lógica %(lu_name)s de respaldo. (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "Error de desarrollador: es necesario tier o lufeed." #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "Error de desarrollador: el parámetro lufeed debe incluir LUEnt EntryWrappers." #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "Suprimiendo LU %(lu_name)s (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "Si se omite HttpError para LU %(lu_name)s podría haberse suprimido fuera de banda." " (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "Se están eliminando %(num_maps)d correlaciones de %(stg_type)s huérfano del VIOS " "%(vios_name)s." #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "Eliminando %(num_maps)d correlaciones de VFC sin puerto del servidor de E/S virtual %(vios_name)s." #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "Se están eliminando %(num_maps)d %(stg_type)s correlaciones asociadas con el ID de LPAR " "%(lpar_id)d del VIOS %(vios_name)s." #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "No se va a eliminar el almacenamiento %(stg_name)s del tipo %(stg_type)s porque no es posible " "determinar si sigue en uso. Es posible que se requiera " "una verificación y una limpieza manuales." #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "La depuración del almacenamiento ignorará el elemento del almacenamiento %(stg_name)s porque es de " "un tipo inesperado: %(stg_type)s." #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "Se están depurando los siguientes %(vdcount)d discos virtuales del VIOS %(vios)s: " "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "Se están depurando los siguientes %(vocount)d discos ópticos virtuales del VIOS %(vios)s: " "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "Se omite la depuración de las correlaciones %(stg_type)s del VIOS %(vios_name)s para los " "siguientes ID de LPAR porque dichas LPAR existen: %(lpar_ids)s" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "No se encuentra el VIOS adecuado. Es probable que la carga útil proporcionada " "fuera insuficiente. Los datos de carga útil son:\n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "La correlación del puerto VFC coincidente no tiene ningún conjunto de puertos de respaldo. Añadiendo %(port)s a " "la correlación para wwpns cliente: %(wwpns)s" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "Se ha producido un error al consultar el repositorio de soportes ópticos virtuales. " "Se está intentando restablecer la conexión con un repositorio de soportes ópticos " "virtuales." #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "No se puede cerrar vterm." #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "Salida no válida en vterm abierto. Se está intentando restablecer el vterm. El error era %s" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "La escucha de VNCSocket está escuchando en la ip=%(ip)s puerto=%(port)s" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "Error al negociar SSL para repetidor VNC: %s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "El descubrimiento de hdisk ha fallado; se depurará el almacenamiento obsoleto de los ID de LPAR %s y " "se volverá a intentar." #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "Recuperación de LUA realizada correctamente. Dispositivo encontrado: %s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "Se ha encontrado el error de ITL: %s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "El dispositivo %s está en uso actualmente." #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "Se ha descubierto un dispositivo %s con UDID desconocido." #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "No se ha podido descubrir el dispositivo: %s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "Error de CLIRunner: %s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "El trabajo QUERY_INVENTORY LUARecovery se ha ejecutado con éxito pero el resultado no contenía ni " "OutputXML ni StdOut." #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORY ha producido un trozo no válido de XML (%(chunk)s). Error: %(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "Se ha encontrado un error en la búsqueda del descriptor pg83 en la salida XML:\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "El mandato ISCSI ha finalizado correctamente." #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "La sesión ISCSI ya existe y se ha iniciado sesión en ella." #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "El mandato ISCSI se ejecutado en un host VIOS no compatible." #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "El descubrimiento de ISCSI ha detectado entradas obsoletas en la base de datos de ODM." #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "No se ha podido encontrar la sesión ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "No se han encontrado registros/destinos/sesiones/portales en los que ejecutar la operación." #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "El mandato ISCSI ha fallado con el estado de error interno = %s" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "Código de error ISCSI genérico" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "Error de inicio de sesión ISCSI" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "El mandato ISCSI tiene argumentos no válidos." #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "El temporizador de la conexión ISCSI ha caducado mientras se intentaba la conexión." #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "El mandato ISCSI no ha podido buscar el host" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "El mandato ISCSI ha devuelto un estado inesperado = %s" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "El mandato ISCSI se ejecutado en un VIOS no compatible. " #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "Depura el almacenamiento obsoleto de los ID de LPAR %s y vuelve a intentar el descubrimiento de iSCSI." #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "Los datos de métricas no están disponibles. Puede deberse a que las métricas se han " "inicializado recientemente." #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "Esto es una prueba" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "Este es un mensaje para el que no existe una traducción" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "El factor de unidades de procesador debe estar entre 0.05 y 1.0. Valor: %s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "La longitud del nombre de la partición lógica no es válida. Nombre: %s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "El campo '%(field)s' tiene un valor no válido: '%(value)s'" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "Ningún valor es válido." #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "El valor '%(value)s' no es válido para el campo '%(field)s' con opciones " "aceptables: %(choices)s" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "El campo '%(field)s' tiene un valor por debajo del mínimo. Valor: %(value)s; " "Mínimo: %(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "El campo '%(field)s' tiene un valor por encima del máximo. Valor: %(value)s; " "Máximo: %(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "'%(desired_field)s' tiene un valor por encima del valor '%(max_field)s'. " "Deseado: %(desired)s Máximo: %(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "'%(desired_field)s' tiene un valor por debajo del valor '%(min_field)s'. " "Deseado: %(desired)s Mínimo: %(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "El valor de memoria no es un múltiplo del tamaño de bloque de memoria lógica " "(%(lmb_size)s) del host. Valor: %(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "El sistema gestionado no da soporte a la expansión de memoria activa. El " "valor del factor de expansión '%(value)s' no es válido." #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "El valor de expansión de memoria activa debe ser mayor o igual que 1,0 y " "menor o igual que 10,0. El valor 0 también es válido e indica que" " AME está desactivado. El valor '%(value)s' no es válido." #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "Intento %(retry)d de un total de %(total)d para el URI %(uri)s. El error es un " "código de respuesta de reintento conocido: %(resp_code)s" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "Ha fallado el intento %(retry)d de %(total)d. Se volverá a intentar. La excepción era: \n" " %(except)s." #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "Debe proporcionar EntryWrapper o EntryWrapperGetter." #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "Debe proporcionar una subtarea válida." #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "Nombre 'provides' duplicado %s." #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %s no tiene ninguna subtarea; no se ejecuta ninguna operación." #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "Debe proporcionar una lista de EntryWrappers o un FeedGetter." #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %s no tiene ninguna subtarea; no se ejecuta ninguna operación." #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "FeedTask %s ha experimentado varias excepciones. Aparecen registradas de forma individual" " a continuación." #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "%(res_name)s disponibles insuficientes en el host para la máquina virtual " "'%(instance_name)s' (%(requested)s solicitados, %(avail)s disponibles)" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "memoria" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "La máquina virtual se debe apagar antes de cambiar la memoria mínima o " "máxima. Apague la máquina virtual %s y vuelva a intentarlo." #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "La máquina virtual se debe apagar antes de cambiar el factor de " "expansión. Apague la máquina virtual %s y vuelva a intentarlo." #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "CPU" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "unidades de proceso" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "La máquina virtual se debe apagar antes de cambiar la memoria mínima o " "máximos. Apague la máquina virtual %s y vuelva a intentarlo." #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "La máquina virtual se debe apagar antes de cambiar la memoria mínima o " "máximas. Apague la máquina virtual %s y vuelva a intentarlo." #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "La máquina virtual se debe apagar antes de cambiar la modalidad de compatibilidad " "del procesador. Apague la máquina virtual %s y vuelva a intentarlo." #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "La máquina virtual se debe apagar antes de cambiar la modalidad de " "de mantenimiento. Apague la máquina virtual %s y vuelva a intentarlo." #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "El número deseado de procesadores (%(vcpus)d) no puede ser mayor que el número máximo de procesadores " "permitidos por partición (%(max_allowed)d) para la máquina virtual " "'%(instance_name)s'." #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "El número máximo de procesadores (%(vcpus)d) no puede ser mayor que el límite máximo " "de capacidad del procesador (%(max_allowed)d) para la máquina virtual " "'%(instance_name)s'." #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "La máquina virtual se debe apagar antes de cambiar la capacidad de reinicio " "remoto simplificado. Apague la máquina virtual %s y vuelva a intentarlo." #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "La partición no tiene una conexión RMC activa." #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "La partición no tiene una prestación DLPAR activa para %s." #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "E/S" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "Memoria" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "Procesadores" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "KeylockPos no válido '%s'." #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "BootMode no válido '%s'." #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "IOSlot.adapter está en desuso. Utilice IOSlot.io_adapter en su lugar." #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "No se ha podido determinar el MTMS (tipo de máquina, modelo, número de serie) " "de consola de gestión maestra a partir de %(identifier)s porque ninguna %(param)s estaba marcada como la" " consola maestra para la agrupación." #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "No se puede establecer el UUID." #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "No se puede convertir %(property_name)s='%(value)s' en el objeto %(pvmobject)s" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "Se rechaza establecer href en varios enlaces.\n" "Vía de acceso: %{path}s\n" "Número de enlaces encontrados: %{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "Se rechaza construir y derivar un elemento sin un código." #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "Falta la propiedad 'entry' en la respuesta." #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "Debe proporcionar una respuesta o entrada para ajustar. Se obtuvo %s" #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "Error de desarrollador: especifique 'parent' o ('parent_type' y 'parent_uuid') para" " recuperar un objeto CHILD." #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "Especifique 'uuid' o 'root_id' al solicitar un objeto ROOT." #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "Tanto parent_type como parent_uuid son necesarios al recuperar un feed " "o una entrada CHILD." #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "Especifique el UUID del padre mediante el parámetro parent_uuid." #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "Especifique 'uuid' o 'child_id' al solicitar un objeto CHILD." #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "Se ha especificado el UUID sin un tipo padre." #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "El método search() requiere exactamente un argumento clave=valor." #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "La clase de derivador %(class)s no da soporte a la clave de búsqueda '%(key)s'." #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "El parámetro 'xag' para EntryWrapper.update ya no se utiliza. En el mejor de los casos, su uso" " dará como resultado que se no realizará ninguna operación. En el peor de los casos, le dará errores de discrepancia de " "etag irresolubles." #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "No existe ningún elemento hijo de este tipo." #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "No se puede establecer el UUID en el derivador sin metadatos." #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "El valor del UUID no es válido: %s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "Debe especificar una subclase de derivador." #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "Debe especificar una clase padre y un UUID padre, o ninguno de los dos." #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "El trabajo %(job_id)s supervisará durante %(time)i segundos." #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "Se está emitiendo una solicitud de cancelación para el trabajo %(job_id)s. Sondeará el trabajo de forma indefinida" " para comprobar si ha terminado." #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "El trabajo %s no se ha suprimido. El trabajo se encuentra en estado de ejecución." #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "La LPAR no está en un estado activo." #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "El sistema de destino no tiene la prestación IBM i LPAR Mobility." #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "La LPAR de IBM i no tiene E/S restringida." #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "El sistema de origen no tiene la prestación de movilidad de LPAR de IBM i." #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "La LPAR no tiene una conexión RMC activa." #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "La partición lógica LPAR es la partición de gestión" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "La LPAR no está disponible para LPM debido a que faltan prestaciones DLPAR." #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "Esta no es la propiedad que busca. Utilice srr_enabled en un " "entorno NovaLink." #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "IPLSrc no válido '%s'." #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "Esta propiedad ya no se utiliza. Utilice en su lugar pci_subsys_dev_id." #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "Esta propiedad ya no se utiliza. Utilice en su lugar pci_rev_id." #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "Esta propiedad ya no se utiliza. Utilice en su lugar pci_subsys_vendor_id." #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "Esta propiedad ya no se utiliza. Utilice en su lugar drc_index." #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "Esta propiedad ya no se utiliza. Utilice en su lugar drc_name." #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "Especificación padre no válida para CNA.create." #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PV había codificado el descriptor pg83 \"%(pg83_raw)s\", pero no lo ha podido decodificar " "(%(type_error)s)." #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "La propiedad 'xags' de la clase VIOS EntryWrapper está obsoleta. " " En su lugar, use valores de pypowervm.const.XAG." #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "La partición del tipo de VIOS no es apta para LPM" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "No se puede especificar LUA de dispositivo de destino sin un dispositivo de almacenamiento de respaldo." # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/locale/zh-Hant/0000775000175000017500000000000013571367172020071 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/zh-Hant/pypowervm.po0000664000175000017500000016623213571367171022512 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=1; plural=0;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "無效的通訊å”定 \"%s\"" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "與 PowerVM 的通訊未加密ï¼è«‹å°‡é…置回復為 https。" #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "計算é è¨­å¯©æ ¸å‚™å¿˜éŒ„失敗,將使用 'default'." #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "HMC ä¸Šä¸æ”¯æ´æœ¬ç«¯é‘‘別。" #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "正在設定 %s 的事件接è½å™¨" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "在 %s è¦æ±‚上發ç¾éžé æœŸçš„æª”案控點" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "%(meth)s %(url)s 發生éžé æœŸçš„錯誤" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "éžé æœŸçš„錯誤:%(method)s %(url)s çš„ %(class)s:%(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "釿–°ç™»å…¥è¢«èªç‚ºä¸å®‰å…¨ã€‚䏿‡‰è©²å†ä½¿ç”¨æ­¤éšŽæ®µä½œæ¥­" "實例。" #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "æ­£åœ¨å˜—è©¦é‡æ–°ç™»å…¥ %s" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "釿–°ç™»å…¥ 401,回應主體:\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "釿–°ç™»å…¥å¤±æ•—,回應主體:\n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "釿–°ç™»å…¥å¤±æ•—:\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "釿–°å˜—試失敗,å¦ä¸€å€‹ 401,回應主體:\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "%(method)s %(path)s çš„å¯ç–‘ HTTP 401 回應:記號是全新的" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "無法連接至 REST 伺æœå™¨ - 是å¦å·²å•Ÿå‹• pvm-rest æœå‹™ï¼Ÿ" "將在 %(delay)d 後進行第 %(try_num)d 次(共 %(max_tries)d 次)é‡è©¦ã€‚" #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "階段作業正在登入 %s" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "ç„¡æ³•å‰–æž PowerVM 回應中的階段作業記號。" #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " 主體 = %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "ç„¡æ³•å‰–æž PowerVM 回應中的階段作業檔案路徑。" #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "記號檔案 %s 未包å«å¯è®€å–的階段作業記號。" #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "階段作業正在從 %s 登出" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "登出時發生å•題。將忽略。" #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "job 必須是一個 JobRequest 元素" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "JobRequest éºæ¼äº† OperationName" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "路徑 = %s 䏿˜¯ PowerVM API åƒç…§" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "path=%s 䏿˜¯ PowerVM API åƒç…§" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "檔案æè¿°å­ç„¡æ•ˆ" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "é æœŸçš„ root_id" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "é æœŸçš„ child_type" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "éžé æœŸçš„ suffix_type=%s" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "é æœŸçš„ suffix_parm" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "é æœŸçš„ child_id" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "éžé æœŸçš„ child_id" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "éžé æœŸçš„ root_id" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "éžé æœŸçš„ req_method=%s" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "剖æžä¾†è‡ª PowerVM çš„ XML 回應時發生錯誤:%s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "å›žæ‡‰ä¸æ˜¯ Atom 資訊來æº/é …ç›®" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "è¦æ±‚çš„éžé æœŸ HTTP 204" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "éžé æœŸçš„空白回應主體" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "è¦æ±‚標頭:%(reqheaders)s\n" "\n" "è¦æ±‚內文:%(reqbody)s\n" "\n" "回應標頭:%(respheaders)s\n" "\n" "回應主體:%(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "%(method)s %(path)s çš„ Atom 錯誤:%(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "階段作業ä¸å¾—為「無ã€" #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "階段作業上,æŸå€‹äº‹ä»¶æŽ¥è½å™¨å·²ç¶“在作用中。" #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "ç„¡æ³•èµ·å§‹è¨­å®šäº‹ä»¶è³‡è¨Šä¾†æºæŽ¥è½å™¨ï¼š%s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "æ‡‰ç”¨ç¨‹å¼ ID \"%s\" 䏿˜¯å”¯ä¸€çš„" #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "關機中" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "已經訂閱此處ç†ç¨‹å¼" #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "處ç†ç¨‹å¼å¿…須是 EventHandler" #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "在訂閱者清單中找ä¸åˆ°è™•ç†ç¨‹å¼" #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "正在關閉 %s çš„ EventListener" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "%s çš„ EventListener 關閉已完æˆ" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "å–å¾— PowerVM 事件時發生錯誤:%s。(pvm-rest æœå‹™æ˜¯å¦å·²é—œé–‰ï¼Ÿï¼‰" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "éžé æœŸçš„ EventType=%s" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "è™•ç† PowerVM 事件時發生錯誤" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "無法è¡ç”Ÿ WWPN %(wwpn)s çš„é©ç•¶å¯¦é«” FC 埠。「VIOS" " 延伸屬性群組ã€å¯èƒ½ä¸è¶³ã€‚查詢的 VIOS URI" " 為 %(vio_uri)s。" #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "找ä¸åˆ°å…ƒç´ ï¼š%(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "找ä¸åˆ° LPAR:%(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "找ä¸åˆ°é…接器" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "'%(operation_name)s' 作業失敗。%(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "'%(operation_name)s' 作業失敗。無法在 %(seconds)d 秒內完æˆ" " 作業。" #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "無法在虛擬機器 %(lpar_nm)s 上執行作業系統關閉,因為其" "RMC 連線處於éžä½œç”¨ä¸­ã€‚" #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "無法關閉虛擬機器 %(lpar_nm)s 的電æºï¼š%(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "關閉虛擬機器 %(lpar_nm)s 在 %(timeout)d 秒後" "逾時。" #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "無法開啟虛擬機器 %(lpar_nm)s 的電æºï¼š%(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "開啟虛擬機器 %(lpar_nm)s 在 %(timeout)d 秒後" "逾時。" #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "無法移除 VLAN %(vlan_id)d,因為它是å¦ä¸€å€‹ç¶²è·¯æ©‹æŽ¥å™¨ä¸Šçš„" " ä¸»è¦ VLAN ID。" #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "無法供應 VLAN %(vlan_id)d。它似乎包å«åœ¨" "Virtual I/O Server %(vios)s 上的è£ç½® '%(dev_name)s' 中。該è£ç½®" " æœªé€£ç·šè‡³ä»»ä½•ç¶²è·¯æ©‹æŽ¥å™¨ï¼ˆå…±ç”¨ä¹™å¤ªç¶²è·¯é…æŽ¥å¡ï¼‰ã€‚è«‹" "請手動移除該è£ç½®æˆ–將它新增至網路橋接器,然後å†" "繼續。" #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "å為 %(lu_name)s çš„é‚輯單元已經存在於下列共用儲存å€ä¸Šï¼š" "%(ssp_name)s。" #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "找ä¸åˆ°è¦å°‡è™›æ“¬å…‰çº–通é“åŸ å°æ˜ è‡³çš„實體埠。" "這是由於 Virtual I/O Server 無法使用,或者" " 實體光纖通é“åŸ çš„åŸ è¦æ ¼ä¸é©ç•¶ã€‚" #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "無法å°è™›æ“¬æ©Ÿå™¨å•Ÿå‹•主控å°ã€‚pypowervm API " " æ­£åœ¨ä»¥éžæœ¬ç«¯æ¨¡å¼åŸ·è¡Œã€‚僅當 " "pypowervm 與 PowerVM API 存在於相åŒä½ç½®æ™‚,æ‰èƒ½éƒ¨ç½²ä¸»æŽ§å°ã€‚" #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "WrapperTask %(name)s 沒有å­ä½œæ¥­ï¼" #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "FeedTask ä¸èƒ½æœ‰ç©ºçš„資訊來æºã€‚" #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "ä½œæ¥­ç³»çµ±æ‹’çµ•å­˜å–æª”案 %(access_file)s。" #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "ä½œæ¥­ç³»çµ±åœ¨å˜—è©¦è®€å–æª”案 %(access_file)s 時é‡åˆ° I/O 錯誤:" "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "移轉作業失敗。%(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "找ä¸åˆ° VM %(vm_name)s 的載入來æº" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "無法è¡ç”Ÿç¡¬ç¢Ÿ %(dev_name)s çš„ pg83 編碼。「VIOS" "未設定 parent_entry 屬性。這å¯èƒ½æ˜¯ç”±æ–¼ä½¿ç”¨" "é€éŽä¸æ”¯æ´ä¹‹å…§å®¹éˆå–å¾—çš„ PV。PV å¿…é ˆé€éŽä¸‹åˆ—æ–¹å¼é€²è¡Œå­˜å–:" "VIOS.phys_volsã€VG.phys_vols 或" "VIOS.scsi_mappings[n].backing_storage。" #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "ç„¡æ³•é‡æ–°å°æ˜  vSCSI å°æ˜ çš„å„²å­˜é«”å…ƒç´ ã€‚é æœŸå‰›å¥½æ‰¾åˆ°" "ä¸€å€‹ç›¸ç¬¦å°æ˜ ï¼Œä½†æ‰¾åˆ° %(num_mappings)d 個。" #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "ç„¡æ³•é‡æ–°å°æ˜  vSCSI å°æ˜ çš„儲存體元素。儲存體元素" "%(stg_name)s çš„å°æ˜ å·²å­˜åœ¨æ–¼ç”¨æˆ¶ç«¯ LPAR %(lpar_uuid)s 中。" #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "找到è£ç½® %(devname)s %(count)d æ¬¡ï¼›é æœŸåªæ‰¾åˆ°è©²è£ç½®æœ€å¤š" "一次。" #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "FeedTask %(ft_name)s é‡åˆ°å¤šå€‹ç•°å¸¸ç‹€æ³ï¼š\n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "é æœŸåªæ‰¾åˆ°ä¸€å€‹ç®¡ç†åˆ†å‰²å€ï¼›ä½†æ‰¾åˆ° %(count)d 個。" #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "é æœŸåªæ‰¾åˆ° ID 為 %(lpar_id)d 的一個分割å€ï¼›ä½†å»æ‰¾åˆ°" "%(count)d." #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "åœ¨å…±ç”¨å„²å­˜å€ %(ssp_name)s 上找ä¸åˆ°é è¨­å±¤ç´šã€‚" #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "在任何 Virtual I/O Server ä¸Šï¼Œå‡æ‰¾ä¸åˆ° UDID 為 %(udid)s çš„" "è£ç½®ã€‚" #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "Virtual I/O Server ä¸è¶³ï¼Œç„¡æ³•支æ´è™›æ“¬æ©Ÿå™¨çš„" " è£ç½®ï¼ˆUDID 為 %(udid)s)。" #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "在任何 Virtual I/O Server ä¸Šï¼Œå‡æ‰¾ä¸åˆ°é æœŸçš„光纖 (%(fabrics)s)。" " " #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "無法é‡å»ºè™›æ“¬æ©Ÿå™¨ã€‚它正在使用的 I/O 類型為" "%(io_type)s,虛擬機器é‡å»ºä¸æ”¯æ´é€™ç¨® I/O 類型。" #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "目標系統上的 VFC æ’æ§½æ•¸ç›® (%(rebuild_slots)d) 與" " ç”¨æˆ¶ç«¯ç³»çµ±ä¸Šçš„æ’æ§½æ•¸ç›® (%(original_slots)d) ä¸ç¬¦ã€‚" "無法在這個系統上é‡å»ºæ­¤è™›æ“¬æ©Ÿå™¨ã€‚" #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "è‹¥è¦ç™»éŒ„網路è£ç½®çš„æ’æ§½è³‡è¨Šï¼Œå‰‡éœ€è¦ CNA 或" "VNIC 酿ޥå¡ã€‚ä½†å»æä¾›äº†ä»¥ä¸‹é …ç›®ï¼š%(wrapper)s。" #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "沒有足夠的作用中 Virtual I/O Server å¯ç”¨ã€‚é æœŸ" "%(exp)d 個;但找到 %(act)d 個。" #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "沒有 Virtual I/O Server å¯ç”¨ã€‚已嘗試等待 VIOS 變æˆ" "ä½œç”¨ä¸­ç‹€æ…‹é” %(wait_time)d 秒。請檢查 PowerVM NovaLink" "與 Virtual I/O Server 之間的 RMC 連線。" #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "找ä¸åˆ°ä»»ä½• SR-IOV 酿ޥå¡è™•æ–¼ Sriov 模å¼åŠåŸ·è¡Œä¸­ç‹€æ…‹ã€‚\n" "ä½ç½® | æ¨¡å¼ | 狀態\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "無法滿足備æ´éœ€æ±‚(%(red)d 個)。找到 %(found_vfs)d " " 個å¯è¡Œçš„æ”¯æŒè£ç½®ã€‚" #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "å—管ç†ç³»çµ±ä¸æ”¯æ´ vNIC。" #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "æ²’æœ‰è™•æ–¼ä½œç”¨ä¸­ç‹€æ…‹ä¸”æ”¯æ´ vNIC çš„ VIOS。" #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "指定了 %(red)d 個備æ´ï¼Œä½†å—管ç†ç³»çµ±ä¸å…·å‚™ vNIC " " 失效接手功能。" #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "å·²æŒ‡å®šå‚™æ´ %(red)d,但沒有具備 vNIC 失效接手功能的" "作用中 VIOS。" #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "找ä¸åˆ°åœ¨å…¶ä¸­å„²å­˜è™›æ“¬å…‰å­¸åª’體的" "ç£å€ç¾¤çµ„ %(vol_grp)s。無法建立媒體儲存庫。" #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "未嘗試進行å—管ç†ç³»çµ±æ›´æ–°ï¼Œå› ç‚ºæ‰€è¦æ±‚的變更" " 是é‡å°ä¸€å€‹æˆ–多個正在由 vNIC 使用的 SR-IOV 實體埠而發出。\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "無法建立 VNC 型虛擬終端機:%(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "䏿”¯æ´é…接å¡å¿«å–。" #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "'%(enum)s' 的值 '%(value)s' 無效。有效值為:" "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "找ä¸åˆ°å稱為 %(vios_name)s çš„ VIOS。" #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "找ä¸åˆ°å稱為 %(vg_name)s çš„ç£å€ç¾¤çµ„。" #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "å稱為 %(part_name)s 的分割å€ä¸æ˜¯ IBMi 分割å€ã€‚" #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "PanelJob 功能分割å€å¼•數是空的。" #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "ç•«é¢åŠŸèƒ½ä½œæ¥­ %(op_name)s ç„¡æ•ˆã€‚é æœŸå…¶ä¸­ä¸€å€‹ %(valid_ops)s " "。" #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "é‡å° VIOS %(vios_uuid)s çš„ ISCSI 探索失敗。回覆碼:%(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "é‡å° VIOS %(vios_uuid)s çš„ ISCSI 登出失敗。回覆碼:%(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "é‡å° VIOS %(vios_uuid)s 執行的 ISCSI 移除作業失敗。回覆碼:%(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "找ä¸åˆ° VIOS %(vios_uuid)s çš„ Vstor %(stor_udid)s。" #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "所æå‡ºçš„延伸屬性群組 '%(arg_xag)s' èˆ‡ä¸‹åˆ—ç¾æœ‰å»¶ä¼¸å±¬æ€§ç¾¤çµ„ä¸ç¬¦ï¼š" "'%(path_xag)s'" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "æ†‘è­‰å·²éŽæœŸã€‚" #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "字首åŠå­—尾總共ä¸èƒ½è¶…éŽ %d 個字元。" #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "總長度必須至少是 1 個字元。" #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "åç¨±åƒæ•¸çš„長度必須至少是一個字元。" #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "ç•¶ trunk_ok 為 False 時,åç¨±åƒæ•¸ä¸å¾—è¶…éŽ %d 個字元。" #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "開發者錯誤:局部æ¯é …è¦ç¯„。" #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "開發者錯誤:parent_type 必須為字串綱目類型或 " "å°å¥—å­é¡žåˆ¥ã€‚" #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "無效值 '%(bad_val)s'ã€‚é æœŸç‚º %(good_vals)s 中的一個或一個清單。" #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "è¦æ±‚:%s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "回應:%s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "正在等待進行中的上傳作業完æˆã€‚標記 LU:%s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "正在放棄支æ´é€²è¡Œä¸­çš„上傳。" #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "æ­£åœ¨æ”¾æ£„ä¸Šå‚³ï¼Œè½‰ç‚ºæ”¯æ´æ¨™è¨˜ %s。" #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "æ­£åœ¨ä½¿ç”¨å·²ä¸Šå‚³çš„æ˜ åƒæª” LU %s。" #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "正在建立標記 LU %s" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "æ­£åœ¨ä¸Šå‚³è‡³æ˜ åƒæª” LU %(lu)s(標記 %(mkr)s)。" #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "正在移除失敗的 LU %s。" #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "在系統上找ä¸åˆ°è™›æ“¬äº¤æ›å™¨ %s。" #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "找ä¸åˆ°è™›æ“¬äº¤æ›å™¨ %s 的有效 VLAN。" #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "å–得主機(UUID 為 '%(host)s')的主機記憶體é¡å¤–負擔時發生錯誤:" "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "等待所有已開啟電æºä¹‹ Virtual I/O Server çš„ RMC 狀態" "變æˆä½œç”¨ä¸­æ™‚逾時。等待時間為 %(time)d 秒。未變æˆ" "作用中的 VIOS 為:%(vioses)s。" #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "å‡å®šç„¡èªªæ˜Žçš„æ’æ§½æ˜¯å¯¦é«” I/O:%s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "å·²é–‹å•Ÿåˆ†å‰²å€ %s 的電æºã€‚" #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "å·²é—œé–‰åˆ†å‰²å€ %s 的電æºã€‚" #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "已淘汰將 add_parms 指定為 dict é€™ä¸€åšæ³•。請改為指定 %s" "實例。" #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "IBMi 作業系統正常關閉失敗。正在嘗試立å³é—œé–‰ä½œæ¥­ç³»çµ±ã€‚" "分割å€ï¼š%s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "IBMi 作業系統立å³é—œé–‰å¤±æ•—。正在嘗試正常關閉 VSP。" "分割å€ï¼š%s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "éž IBMi 作業系統立å³é—œé–‰å·²é€¾æ™‚。正在嘗試強迫關閉 VSP。" "分割å€ï¼š%s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "éž IBMi 作業系統立å³é—œé–‰å¤±æ•—。正在嘗試正常關閉 VSP。" "分割å€ï¼š%s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "強迫關閉 VSP,發生é è¨­é€¾æ™‚。分割å€ï¼š%s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "正在é‡è©¦ä¿®æ”¹ SCSI å°æ˜ ã€‚" #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "找到 %(stg_type)s 儲存體元素 %(stg_name)s çš„ç¾æœ‰å°æ˜ ï¼šå¾ž" "Virtual I/O Server %(vios_name)s 至用戶端 LPAR %(lpar_uuid)s。" #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "正在建立 %(stg_type)s 儲存體元素 %(stg_name)s çš„å°æ˜ ï¼šå¾ž" "Virtual I/O Server %(vios_name)s 至用戶端 LPAR %(lpar_uuid)s。" #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "ä¸å¾—åŒæ™‚指定 match_func å’Œ stg_elem。" #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "register_cna 方法已淘汰ï¼è«‹ä½¿ç”¨ register_vnet" "方法。" #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "drop_cna 方法已淘汰。請使用 drop_vnet 方法。" #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "ä½ç½® %(loc_code)s 處的 SR-IOV 實體埠正在支æ´å±¬æ–¼ä¸‹åˆ— LPAR çš„" " vNIC:%(lpar_name)s(LPAR UUID:%(lpar_uuid)sï¼›vNIC UUID:" "%(vnic_uuid)s)." #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "正在變更下列 SR-IOV 實體埠標籤,å³ä½¿å®ƒå€‘正在由" "vNIC 使用,亦是如此:" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "無法刪除 UUID 為 %s çš„ vio_file。必須將其手動刪除。" #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "上傳時é‡åˆ°å•題。將é‡è©¦ã€‚" #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "crt_lu_linked_clone 方法已淘汰ï¼è«‹ä½¿ç”¨ crt_lu" "方法(clone=src_lu,size=lu_size_gb)。" #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "ç£ç¢Ÿé‚輯單元 %(luname)s 沒有支æ´çš„æ˜ åƒæª” LU。(UDID:%(udid)s)" #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "在檔案上傳時,找ä¸åˆ°æ–°çš„ vDisk。" #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "忽略è£ç½®ï¼Œå› ç‚ºè©²è£ç½®ç¼ºå°‘ UDID:\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "在清單中找ä¸åˆ°è£ç½® %s。" #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "正在從ç£å€ç¾¤çµ„ %(vg)s 刪除虛擬ç£ç¢Ÿ %(vdisk)s" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "正在從ç£å€ç¾¤çµ„ %(vg)s 刪除虛擬光學è£ç½® %(vopt)s" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "正在移除 LU %(lu_name)s (UDID %(lu_udid)s)" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "找ä¸åˆ° LU %(lu_name)s - å¯èƒ½å·²åœ¨é »å¸¶å¤–將其刪除。" "(UDID:%(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "æ­£åœ¨ç§»é™¤æ˜ åƒæª” LU %(lu_name)s,因為ä¸å†ä½¿ç”¨å®ƒã€‚(UDID:" "%(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "找ä¸åˆ°æ”¯æ´çš„ LU %(lu_name)s。(UDID:%(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "開發者錯誤:需è¦å±¤ç´šæˆ– lufeed。" #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "開發者錯誤:lufeed åƒæ•¸å¿…é ˆåŒ…å« LUEnt EntryWrappers。" #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "正在刪除 LU %(lu_name)s(UDID:%(lu_udid)s)" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "正在忽略 HttpError,å¯èƒ½å·²åœ¨é »å¸¶å¤–刪除 LU %(lu_name)s。" " (UDID:%(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "正在將 %(num_maps)d 個孤立 %(stg_type)s å°æ˜ å¾ž VIOS " "%(vios_name)s." #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "正在從 VIOS %(vios_name)s 中移除 %(num_maps)d 個缺少埠的 VFC å°æ˜ ã€‚" #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "正在將 %(num_maps)d 個與 LPAR ID " "%(lpar_id)d 相關è¯çš„ %(stg_type)s å°æ˜ å¾ž VIOS %(vios_name)s 中移除。" #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "未移除類型 %(stg_type)s 的儲存體 %(stg_name)s,因為" "無法判定它是å¦ä»åœ¨ä½¿ç”¨ä¸­ã€‚å¯èƒ½éœ€è¦æ‰‹å‹•驗證和" "清ç†ã€‚" #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "儲存體清除忽略儲存體元素 %(stg_name)s,因為它是" "éžé æœŸçš„類型 %(stg_type)s。" #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "正在從 VIOS %(vios)s 清除以下 %(vdcount)d 個虛擬ç£ç¢Ÿï¼š" "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "正在從 VIOS %(vios)s 清除以下 %(vocount)d 個虛擬光學è£ç½®ï¼š" "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "正在從 VIOS %(vios_name)s è·³éŽä¸‹åˆ— LPAR ID çš„ %(stg_type)s å°æ˜ æ¸…除," "原因是已存在這些 LPAR:%(lpar_ids)s" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "找ä¸åˆ°é©ç•¶çš„ VIOS。æä¾›çš„æœ‰æ•ˆè² è¼‰" "å¯èƒ½ä¸è¶³ã€‚有效負載資料為:\n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "相符的 VFC åŸ å°æ˜ æœªè¨­å®šæ”¯æ´åŸ ã€‚正在將 %(port)s 新增至" "下列用戶端 WWPN çš„å°æ˜ ï¼š%(wwpns)s" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "查詢虛擬光學媒體儲存庫時發生錯誤。" "æ­£åœ¨å˜—è©¦é‡æ–°å»ºç«‹è™›æ“¬å…‰å­¸åª’體儲存庫" "連線。" #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "無法關閉 vterm。" #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "vterm 開啟時的輸出無效。正在嘗試é‡è¨­ vterm。錯誤為 %s" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "VNCSocket 接è½å™¨æ­£åœ¨æ–¼ IP %(ip)s 埠 %(port)s 上接è½" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "å° VNC 轉é€ç«™å”è­° SSL 時發生錯誤:%s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "硬碟探索失敗;將清除 LPAR ID %s 的陳舊儲存體," "然後å†è©¦ä¸€æ¬¡ã€‚" #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "LUA 回復æˆåŠŸã€‚æ‰¾åˆ°è£ç½®ï¼š%s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "發生 ITL 錯誤:%s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "%s è£ç½®ç›®å‰åœ¨ä½¿ç”¨ä¸­ã€‚" #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "æŽ¢ç´¢åˆ°å…·æœ‰ä¸æ˜Ž UDID çš„ %s è£ç½®ã€‚" #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "無法探索è£ç½®ï¼š%s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "CLIRunner 錯誤:%s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "QUERY_INVENTORY LUARecovery 工作æˆåŠŸï¼Œä½†çµæžœä¸åŒ…å«" "OutputXML 和標準輸出。" #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORY 產生了 XML (%(chunk)s) 的無效å€å¡Šã€‚錯誤:%(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "在 XML 輸出中找ä¸åˆ° pg83 æè¿°å­ï¼š\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "ISCSI 指令已順利完æˆ" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "ISCSI 階段作業已存在,並且已登入" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "ISCSI 指令在ä¸å—支æ´çš„ VIOS 主機上執行。" #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "ISCSI 探索在 ODM 資料庫中找到舊項目。" #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "找ä¸åˆ° ISCSI 階段作業" #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "找ä¸åˆ°è¦åœ¨å…¶ä¸ŠåŸ·è¡Œä½œæ¥­çš„記錄/目標/階段作業/å…¥å£ç¶²ç«™" #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "ISCSI 指令失敗,內部錯誤狀態 = %s" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "ISCSI åŒå±¬éŒ¯èª¤ç¢¼" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "ISCSI 階段作業登入失敗" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "ISCSI 指令無效引數" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "嘗試連接時,ISCSI é€£ç·šè¨ˆæ™‚å™¨éŽæœŸã€‚" #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "ISCSI 指令無法查閱主機" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "ISCSI 指令傳回éžé æœŸçš„狀態 = %s" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "ISCSI 指令在ä¸å—支æ´çš„ VIOS 上執行" #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "清除 LPAR ID %s 的陳舊儲存體,然後é‡è©¦ iSCSI 探索。" #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "無法使用度é‡è³‡æ–™ã€‚這å¯èƒ½æ˜¯å› ç‚ºæœ€è¿‘正在" "起始設定度é‡ã€‚" #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "這是測試" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "一則訊æ¯ï¼Œé‡å°æ­¤è¨Šæ¯ä¸å­˜åœ¨è½‰æ›" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "處ç†å™¨è£ç½®å› å­å¿…須介於 0.05 到 1.0 之間。值:%s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "é‚輯分割å€å稱的長度無效。å稱:%s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "æ¬„ä½ '%(field)s' 具有無效的值:'%(value)s'" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "None 值無效。" #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "值 '%(value)s' å°æ–¼æ¬„ä½ '%(field)s' è€Œè¨€ç„¡æ•ˆï¼Œå¯æŽ¥å—çš„" "鏿“‡ï¼š%(choices)s" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "æ¬„ä½ '%(field)s' 的值低於下é™ã€‚值:%(value)sï¼›" "下é™ï¼š%(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "æ¬„ä½ '%(field)s' 的值高於上é™ã€‚值:%(value)sï¼›" "上é™ï¼š%(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "'%(desired_field)s' 具有高於 '%(max_field)s' 值的值。" "期望:%(desired)s 上é™ï¼š%(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "'%(desired_field)s' 具有低於 '%(min_field)s' 值的值。" "期望:%(desired)s 下é™ï¼š%(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "è¨˜æ†¶é«”å€¼ä¸æ˜¯ä¸»æ©Ÿé‚輯記憶體å€å¡Šå¤§å°" "主機的 (%(lmb_size)s)。值:%(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "å—管ç†ç³»çµ±ä¸æ”¯æ´ä½œç”¨ä¸­è¨˜æ†¶é«”擴充。「VIOS" "擴充因數值 '%(value)s' 無效。" #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "作用中記憶體擴充值必須大於或等於 1.0,且" "å°æ–¼æˆ–等於 10.0。值 0 也有效,用來指示" " AME 關閉。'%(value)s' 無效。" #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "å° URI %(uri)s 嘗試第 %(retry)d 次(總計 %(total)d 次)。錯誤是一個已知的" "é‡è©¦å›žæ‡‰ç¢¼ï¼š%(resp_code)s" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "第 %(retry)d 次(總共 %(total)d 次)嘗試失敗。將é‡è©¦ã€‚異常狀æ³ç‚ºï¼š\n" " %(except)s." #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "å¿…é ˆæä¾› EntryWrapper 或 EntryWrapperGetter。" #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "å¿…é ˆæä¾›æœ‰æ•ˆçš„å­ä½œæ¥­ã€‚" #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "é‡è¤‡çš„「æä¾›é …ç›®ã€å稱 %s。" #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %s 沒有å­ä½œæ¥­ï¼›æ²’有作業執行。" #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "å¿…é ˆæä¾› EntryWrapper 的清單,或者æä¾› FeedGetter。" #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %s 沒有å­ä½œæ¥­ï¼›æ²’有作業執行。" #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "FeedTask %s é‡åˆ°å¤šå€‹ç•°å¸¸ç‹€æ³ã€‚下é¢é€å€‹è¨˜è¼‰äº†" " 這些異常狀æ³ã€‚" #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "é‡å°ä¸‹åˆ—虛擬機器,主機上å¯ç”¨çš„ %(res_name)s ä¸è¶³ï¼š" "'%(instance_name)s'ï¼ˆè¦æ±‚ %(requested)s å€‹ï¼Œä½†åªæœ‰ %(avail)s 個å¯ç”¨ï¼‰" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "記憶體" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "必須先關閉虛擬機器電æºï¼Œç„¶å¾Œæ‰èƒ½è®Šæ›´" "è¨˜æ†¶é«”ä¸‹é™æˆ–上é™ã€‚請關閉虛擬機器 %s 的電æºï¼Œç„¶å¾Œå†è©¦ä¸€æ¬¡ã€‚" #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "必須先關閉虛擬機器電æºï¼Œç„¶å¾Œå†è®Šæ›´" "擴充因數。請關閉虛擬機器 %s 的電æºï¼Œç„¶å¾Œå†è©¦ä¸€æ¬¡ã€‚" #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "CPU 數目" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "處ç†è£ç½®" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "必須先關閉虛擬機器電æºï¼Œç„¶å¾Œæ‰èƒ½è®Šæ›´" "處ç†å™¨æ•¸ç›®ä¸‹é™æˆ–上é™ã€‚請關閉虛擬機器 %s 的電æºï¼Œç„¶å¾Œå†è©¦ä¸€æ¬¡ã€‚" #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "必須先關閉虛擬機器電æºï¼Œç„¶å¾Œæ‰èƒ½è®Šæ›´" "處ç†å™¨è£ç½®æ•¸ç›®ä¸‹é™æˆ–上é™ã€‚請關閉虛擬機器 %s 的電æºï¼Œç„¶å¾Œå†è©¦ä¸€æ¬¡ã€‚" #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "必須先關閉虛擬機器電æºï¼Œç„¶å¾Œæ‰èƒ½è®Šæ›´" "處ç†å™¨ç›¸å®¹æ¨¡å¼ã€‚請關閉虛擬機器 %s 的電æºï¼Œç„¶å¾Œå†è©¦ä¸€æ¬¡ã€‚" #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "必須先關閉虛擬機器電æºï¼Œç„¶å¾Œæ‰èƒ½è®Šæ›´è™•ç†" "模å¼ã€‚請關閉虛擬機器 %s 的電æºï¼Œç„¶å¾Œå†è©¦ä¸€æ¬¡ã€‚" #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "想è¦çš„處ç†å™¨æ•¸ç›® (%(vcpus)d) ä¸èƒ½è¶…éŽ" "下列虛擬機器之æ¯å€‹åˆ†å‰²å€å®¹è¨±çš„處ç†å™¨æ•¸ç›®ä¸Šé™ (%(max_allowed)d):" "'%(instance_name)s'." #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "處ç†å™¨æ•¸ç›®ä¸Šé™ (%(vcpus)d) ä¸å¾—è¶…éŽ" "下列虛擬機器的系統容é‡è™•ç†å™¨ä¸Šé™ (%(max_allowed)d):" "'%(instance_name)s'." #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "必須先關閉虛擬機器電æºï¼Œç„¶å¾Œæ‰èƒ½è®Šæ›´" "已簡化的é ç«¯é‡æ–°å•Ÿå‹•功能。請關閉虛擬機器 %s 的電æºï¼Œç„¶å¾Œå†è©¦ä¸€æ¬¡ã€‚" #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "åˆ†å‰²å€æ²’有作用中的 RMC 連線。" #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "åˆ†å‰²å€æ²’有 %s 的作用中 DLPAR 功能。" #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "I/O" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "記憶體" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "處ç†å™¨æ•¸ç›®" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "KeylockPos '%s' 無效。" #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "BootMode '%s' 無效。" #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "IOSlot.adapter 已淘汰ï¼è«‹æ”¹ç‚ºä½¿ç”¨ IOSlot.io_adapter。" #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "無法根據 %(identifier)s 判定主è¦ç®¡ç†ä¸»æŽ§å° MTMSï¼ˆæ©Ÿåž‹ã€æ¨¡åž‹ã€" "åºè™Ÿï¼‰ï¼Œå› ç‚ºæœªå°‡ä»»ä½• %(param)s 標示為" " 儲存å€çš„主è¦ä¸»æŽ§å°ã€‚" #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "無法設定 UUID。" #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "在物件 %(pvmobject)s ä¸­ï¼Œç„¡æ³•è½‰æ› %(property_name)s='%(value)s'" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "拒絕é€éŽå¤šé‡éˆçµè¨­å®š href。\n" "路徑:%{path}s\n" "找到的éˆçµæ•¸ç›®ï¼š%{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "拒絕建構åŠè¦†è“‹ä¸å«æ¨™è¨˜çš„元素。" #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "å›žæ‡‰éºæ¼äº† 'entry' 內容。" #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "å¿…é ˆæä¾› Response 或 Entry æ‰èƒ½è¦†è“‹ã€‚å–å¾— %s" #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "開發者錯誤:指定 'parent' 或 ('parent_type' and 'parent_uuid') 以" " æ“·å– CHILD 物件。" #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "åœ¨è¦æ±‚根物件時,指定 'uuid' 或 'root_id'。" #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "æ“·å–å­é …è³‡è¨Šä¾†æºæˆ–é …ç›®æ™‚ï¼Œéœ€è¦ parent_type åŠ" "parent_uuid。" #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "é€éŽ parent_uuid åƒæ•¸æŒ‡å®šæ¯é …çš„ UUID。" #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "åœ¨è¦æ±‚å­ç‰©ä»¶æ™‚,指定 'uuid' 或 'child_id'。" #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "指定的æ¯é … UUID ä¸å«è¦ªé …類型。" #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "search() 方法åªéœ€è¦ä¸€å€‹ key=value 引數。" #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "å°å¥—類別 %(class)s 䏿”¯æ´æœå°‹é—œéµå­— '%(key)s'。" #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "EntryWrapper.update çš„ 'xag' åƒæ•¸å·²æ·˜æ±°ï¼ä½¿ç”¨æ­¤åƒæ•¸çš„" " çš„æœ€å¥½çµæžœæ˜¯ç”¢ç”Ÿç©ºä½œæ¥­ã€‚æœ€å·®çµæžœæ˜¯é€ æˆç„¡æ³•更正的 etag" "ä¸ç¬¦éŒ¯èª¤ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "沒有此類å­å…ƒç´ ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "無法在沒有 Meta 資料的å°å¥—上設定 UUID。" #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "UUID 值無效:%s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "必須指定å°å¥—å­é¡žåˆ¥ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "å¿…é ˆåŒæ™‚指定æ¯é¡žåˆ¥åŠæ¯é … UUIDï¼Œæˆ–è€…å…©è€…çš†ä¸æŒ‡å®šã€‚" #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "正在監視工作 %(job_id)s %(time)i 秒。" #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "正在å°å·¥ä½œ %(job_id)s ç™¼å‡ºå–æ¶ˆè¦æ±‚ã€‚å°‡ç„¡é™æœŸåœ°è¼ªè©¢å·¥ä½œ" " 以進行終止。" #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "未刪除工作 %s。工作處於執行中狀態。" #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "LPAR 未處於作用中狀態。" #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "目標系統沒有 IBM i LPAR 行動性功能。" #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "IBM i LPAR 沒有å—é™çš„ I/O。" #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "來æºç³»çµ±æ²’有 IBM i LPAR 行動性功能。" #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "LPAR 沒有作用中的 RMC 連線。" #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "LPAR 是管ç†åˆ†å‰²å€" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "ç”±æ–¼éºæ¼ DLPAR 功能,LPM 無法使用 LPAR。" #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "這䏿˜¯æ‚¨è¦å°‹æ‰¾çš„內容。請在 NovaLink 環境中" "使用 srr_enabled。" #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "無效的 IPLSrc '%s'。" #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "此內容已淘汰ï¼è«‹æ”¹ç‚ºä½¿ç”¨ pci_subsys_dev_id。" #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "此內容已淘汰ï¼è«‹æ”¹ç‚ºä½¿ç”¨ pci_rev_id。" #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "此內容已淘汰ï¼è«‹æ”¹ç‚ºä½¿ç”¨ pci_subsys_vendor_id。" #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "此內容已淘汰ï¼è«‹æ”¹ç‚ºä½¿ç”¨ drc_index。" #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "此內容已淘汰ï¼è«‹æ”¹ç‚ºä½¿ç”¨ drc_name。" #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "CNA.create çš„æ¯é …è¦æ ¼ç„¡æ•ˆã€‚" #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PV 具有已編碼的 pg83 æè¿°å­ \"%(pg83_raw)s\",但無法解碼 " "(%(type_error)s)." #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "VIOS EntryWrapper 類別的 'xags' 內容已淘汰ï¼è«‹" " 改為使用 pypowervm.const.XAG 中的值。" #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "VIOS 類型的分割å€ä¸æ”¯æ´ LPM" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "在沒有支æ´å„²å­˜è£ç½®çš„æƒ…æ³ä¸‹ï¼Œç„¡æ³•指定目標è£ç½® LUAï¼" # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/locale/ja/0000775000175000017500000000000013571367172017152 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/locale/ja/pypowervm.po0000664000175000017500000021365413571367171021574 0ustar neoneo00000000000000# English translations for pypowervm. # Copyright (C) 2018 ORGANIZATION # This file is distributed under the same license as the pypowervm project. # FIRST AUTHOR , 2018. # msgid "" msgstr "" "Project-Id-Version: pypowervm 1.1.11.dev47\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2018-10-02 09:44-0400\n" "PO-Revision-Date: 2018-10-02 11:01-0400\n" "Last-Translator: FULL NAME \n" "Language: en\n" "Language-Team: en \n" "Plural-Forms: nplurals=1; plural=0;" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.5.3\n" #: pypowervm/adapter.py:122 #, python-format msgid "Invalid protocol \"%s\"" msgstr "プロトコル「%sã€ã¯ç„¡åйã§ã™" #: pypowervm/adapter.py:128 msgid "Unencrypted communication with PowerVM! Revert configuration to https." msgstr "PowerVM ã¨ã®é€šä¿¡ãŒæš—å·åŒ–ã•れã¦ã„ã¾ã›ã‚“。構æˆã‚’ https ã«æˆ»ã—ã¦ãã ã•ã„。" #: pypowervm/adapter.py:142 msgid "Calculating default audit memento failed, using 'default'." msgstr "デフォルト監査記録を計算ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚「defaultã€ã‚’使用ã—ã¾ã™ã€‚" #: pypowervm/adapter.py:183 msgid "Local authentication not supported on HMC." msgstr "ローカルèªè¨¼ã¯ HMC ã§ã‚µãƒãƒ¼ãƒˆã•れã¦ã„ã¾ã›ã‚“。" #: pypowervm/adapter.py:202 #, python-format msgid "Setting up event listener for %s" msgstr "%s ã®ã‚¤ãƒ™ãƒ³ãƒˆãƒ»ãƒªã‚¹ãƒŠãƒ¼ã‚’セットアップã—ã¦ã„ã¾ã™" #: pypowervm/adapter.py:272 #, python-format msgid "Unexpected filehandle on %s request" msgstr "%s è¦æ±‚ã«å¯¾ã—ã¦äºˆæœŸã—ãªã„ファイル・ãƒãƒ³ãƒ‰ãƒ«ã§ã™" #: pypowervm/adapter.py:316 #, python-format msgid "Unexpected error for %(meth)s %(url)s" msgstr "%(meth)s %(url)s ã«é–¢ã—ã¦äºˆæœŸã—ãªã„エラーãŒç™ºç”Ÿã—ã¾ã—ãŸ" #: pypowervm/adapter.py:318 #, python-format msgid "Unexpected error: %(class)s for %(method)s %(url)s: %(excp)s" msgstr "%(method)s %(url)s ã«é–¢ã—ã¦äºˆæœŸã—ãªã„エラー %(class)s ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %(excp)s" #: pypowervm/adapter.py:355 msgid "" "Re-login has been deemed unsafe. This Session instance should no longer " "be used." msgstr "" "å†ãƒ­ã‚°ã‚¤ãƒ³ã¯å®‰å…¨ã§ã¯ãªã„ã¨ã¿ãªã•れã¦ã„ã¾ã™ã€‚ ã“ã®ã‚»ãƒƒã‚·ãƒ§ãƒ³ãƒ»ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã¯" "今後使用ã™ã¹ãã§ã¯ã‚りã¾ã›ã‚“。" #: pypowervm/adapter.py:363 #, python-format msgid "Attempting re-login %s" msgstr "%s ã¸ã®å†ãƒ­ã‚°ã‚¤ãƒ³ã‚’試ã¿ã¦ã„ã¾ã™" #: pypowervm/adapter.py:374 #, python-format msgid "" "Re-login 401, response body:\n" "%s" msgstr "" "å†ãƒ­ã‚°ã‚¤ãƒ³ 401ã€å¿œç­”本体:\n" "%s" #: pypowervm/adapter.py:379 #, python-format msgid "" "Re-login failed, resp body:\n" "%s" msgstr "" "å†ãƒ­ã‚°ã‚¤ãƒ³ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚応答本体:\n" "%s" #: pypowervm/adapter.py:383 #, python-format msgid "" "Re-login failed:\n" "%s" msgstr "" "å†ãƒ­ã‚°ã‚¤ãƒ³ãŒå¤±æ•—ã—ã¾ã—ãŸ:\n" "%s" #: pypowervm/adapter.py:401 #, python-format msgid "" "Re-attempt failed with another 401, response body:\n" "%s" msgstr "" "別㮠401 ã§å†è©¦è¡ŒãŒå¤±æ•—ã—ã¾ã—ãŸã€‚応答本体:\n" "%s" #: pypowervm/adapter.py:404 #, python-format msgid "suspicious HTTP 401 response for %(method)s %(path)s: token is brand new" msgstr "%(method)s %(path)s ã«ã¤ã„ã¦ç–‘ã‚ã—ã„ HTTP 401 応答ã§ã™: ãƒˆãƒ¼ã‚¯ãƒ³ã¯æ–°è¦ã®ã‚‚ã®ã§ã™" #: pypowervm/adapter.py:456 #, python-format msgid "" "Failed to connect to REST server - is the pvm-rest service started? " "Retrying %(try_num)d of %(max_tries)d after %(delay)d seconds." msgstr "" "REST サーãƒãƒ¼ã«æŽ¥ç¶šã§ãã¾ã›ã‚“ã§ã—㟠- pvm-rest サービスã¯é–‹å§‹ã•れã¦ã„ã¾ã™ã‹? " "%(delay)d 秒後ã«å†è©¦è¡Œã—ã¾ã™ (%(try_num)d / %(max_tries)d)。" #: pypowervm/adapter.py:463 #, python-format msgid "Session logging on %s" msgstr "セッション・ログオン %s" #: pypowervm/adapter.py:526 msgid "Failed to parse a session token from the PowerVM response." msgstr "PowerVM 応答ã‹ã‚‰ã®ã‚»ãƒƒã‚·ãƒ§ãƒ³ãƒ»ãƒˆãƒ¼ã‚¯ãƒ³ã‚’è§£æžã§ãã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/adapter.py:528 pypowervm/adapter.py:544 #, python-format msgid " Body= %s" msgstr " 本体= %s" #: pypowervm/adapter.py:542 msgid "Failed to parse a session file path from the PowerVM response." msgstr "PowerVM 応答ã‹ã‚‰ã®ã‚»ãƒƒã‚·ãƒ§ãƒ³ãƒ»ãƒ•ァイル・パスを解æžã§ãã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/adapter.py:556 #, python-format msgid "Token file %s didn't contain a readable session token." msgstr "トークン・ファイル %s ã«ã¯èª­ã¿å–りå¯èƒ½ãªã‚»ãƒƒã‚·ãƒ§ãƒ³ãƒ»ãƒˆãƒ¼ã‚¯ãƒ³ãŒå«ã¾ã‚Œã¦ã„ã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/adapter.py:566 #, python-format msgid "Session logging off %s" msgstr "セッション・ログオフ %s" #: pypowervm/adapter.py:571 msgid "Problem logging off. Ignoring." msgstr "ログオフã™ã‚‹ã¨ãã«å•題ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚ 無視ã—ã¾ã™ã€‚" #: pypowervm/adapter.py:673 msgid "job must be a JobRequest element" msgstr "ジョブ㯠JobRequest エレメントã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“" #: pypowervm/adapter.py:677 msgid "JobRequest is missing OperationName" msgstr "JobRequest ã« OperationName ãŒã‚りã¾ã›ã‚“" #: pypowervm/adapter.py:691 pypowervm/adapter.py:847 pypowervm/adapter.py:900 #, python-format msgid "path=%s is not a PowerVM API reference" msgstr "path=%s 㯠PowerVM API å‚ç…§ã§ã¯ã‚りã¾ã›ã‚“" #: pypowervm/adapter.py:809 #, python-format msgid "path=%s not a PowerVM API reference" msgstr "path=%s 㯠PowerVM API å‚ç…§ã§ã¯ã‚りã¾ã›ã‚“" #: pypowervm/adapter.py:915 pypowervm/adapter.py:932 msgid "Invalid file descriptor" msgstr "ファイル記述å­ãŒç„¡åйã§ã™" #: pypowervm/adapter.py:1018 pypowervm/adapter.py:1046 #: pypowervm/adapter.py:1061 msgid "Expected root_id" msgstr "root_id ãŒäºˆæœŸã•れã¾ã—ãŸ" #: pypowervm/adapter.py:1020 msgid "Expected child_type" msgstr "child_type ãŒäºˆæœŸã•れã¾ã—ãŸ" #: pypowervm/adapter.py:1024 pypowervm/adapter.py:1050 #: pypowervm/adapter.py:1055 #, python-format msgid "Unexpected suffix_type=%s" msgstr "予期ã—ãªã„ suffix_type=%s ã§ã™" #: pypowervm/adapter.py:1027 pypowervm/adapter.py:1058 msgid "Expected suffix_parm" msgstr "suffix_parm ãŒäºˆæœŸã•れã¾ã—ãŸ" #: pypowervm/adapter.py:1029 pypowervm/adapter.py:1048 #: pypowervm/adapter.py:1063 msgid "Expected child_id" msgstr "child_id ãŒäºˆæœŸã•れã¾ã—ãŸ" #: pypowervm/adapter.py:1032 pypowervm/adapter.py:1041 msgid "Unexpected child_id" msgstr "予期ã—ãªã„ child_id ã§ã™" #: pypowervm/adapter.py:1034 pypowervm/adapter.py:1043 msgid "Unexpected root_id" msgstr "予期ã—ãªã„ root_id ã§ã™" #: pypowervm/adapter.py:1065 #, python-format msgid "Unexpected req_method=%s" msgstr "予期ã—ãªã„ req_method=%s ã§ã™" #: pypowervm/adapter.py:1133 #, python-format msgid "Error parsing XML response from PowerVM: %s" msgstr "PowerVM ã‹ã‚‰ã® XML 応答を解æžã—ã¦ã„ã‚‹ã¨ãã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %s" #: pypowervm/adapter.py:1146 msgid "Response is not an Atom feed/entry" msgstr "応答㯠Atom フィード/エントリーã§ã¯ã‚りã¾ã›ã‚“" #: pypowervm/adapter.py:1157 msgid "Unexpected HTTP 204 for request" msgstr "è¦æ±‚ã«å¯¾ã™ã‚‹äºˆæœŸã—ãªã„ HTTP 204" #: pypowervm/adapter.py:1165 msgid "Unexpectedly empty response body" msgstr "応答本体ãŒäºˆæœŸã›ãšç©ºã§ã™" #: pypowervm/adapter.py:1168 #, python-format msgid "" "%(err_reason)s:\n" "request headers: %(reqheaders)s\n" "\n" "request body: %(reqbody)s\n" "\n" "response headers: %(respheaders)s\n" "\n" "response body: %(respbody)s" msgstr "" "%(err_reason)s:\n" "è¦æ±‚ヘッダー: %(reqheaders)s\n" "\n" "è¦æ±‚本体: %(reqbody)s\n" "\n" "応答ヘッダー: %(respheaders)s\n" "\n" "応答本体: %(respbody)s" #: pypowervm/adapter.py:1176 #, python-format msgid "Atom error for %(method)s %(path)s: %(reason)s" msgstr "%(method)s %(path)s ã«é–¢ã—㦠Atom エラーãŒç™ºç”Ÿã—ã¾ã—ãŸ: %(reason)s" #: pypowervm/adapter.py:1217 msgid "Session must not be None" msgstr "「セッションã€ã¯ã€Œãªã—ã€ã§ã‚ã£ã¦ã¯ãªã‚Šã¾ã›ã‚“" #: pypowervm/adapter.py:1219 msgid "An event listener is already active on the session." msgstr "ã“ã®ã‚»ãƒƒã‚·ãƒ§ãƒ³ã§ã¯ã‚¤ãƒ™ãƒ³ãƒˆãƒ»ãƒªã‚¹ãƒŠãƒ¼ãŒæ—¢ã«ã‚¢ã‚¯ãƒ†ã‚£ãƒ–ã«ãªã£ã¦ã„ã¾ã™ã€‚" #: pypowervm/adapter.py:1238 #, python-format msgid "Failed to initialize event feed listener: %s" msgstr "ã‚¤ãƒ™ãƒ³ãƒˆãƒ»ãƒ•ã‚£ãƒ¼ãƒ‰ãƒ»ãƒªã‚¹ãƒŠãƒ¼ã‚’åˆæœŸåŒ–ã§ãã¾ã›ã‚“ã§ã—ãŸ: %s" #: pypowervm/adapter.py:1242 #, python-format msgid "Application id \"%s\" not unique" msgstr "アプリケーション ID「%sã€ã¯å›ºæœ‰ã§ã¯ã‚りã¾ã›ã‚“" #: pypowervm/adapter.py:1250 msgid "Shutting down" msgstr "シャットダウン中" #: pypowervm/adapter.py:1253 msgid "This handler is already subscribed" msgstr "ã“ã®ãƒãƒ³ãƒ‰ãƒ©ãƒ¼ã¯æ—¢ã«ã‚µãƒ–スクライブã•れã¦ã„ã¾ã™" #: pypowervm/adapter.py:1261 msgid "Handler must be an EventHandler" msgstr "ãƒãƒ³ãƒ‰ãƒ©ãƒ¼ã¯ã‚¤ãƒ™ãƒ³ãƒˆãƒ»ãƒãƒ³ãƒ‰ãƒ©ãƒ¼ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“" #: pypowervm/adapter.py:1264 msgid "Handler not found in subscriber list" msgstr "ãƒãƒ³ãƒ‰ãƒ©ãƒ¼ãŒã‚µãƒ–スクライãƒãƒ¼ãƒ»ãƒªã‚¹ãƒˆã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #: pypowervm/adapter.py:1271 #, python-format msgid "Shutting down EventListener for %s" msgstr "%s ã®ã‚¤ãƒ™ãƒ³ãƒˆãƒ»ãƒªã‚¹ãƒŠãƒ¼ã‚’シャットダウンã—ã¦ã„ã¾ã™" #: pypowervm/adapter.py:1275 #, python-format msgid "EventListener shutdown complete for %s" msgstr "%s ã®ã‚¤ãƒ™ãƒ³ãƒˆãƒ»ãƒªã‚¹ãƒŠãƒ¼ã®ã‚·ãƒ£ãƒƒãƒˆãƒ€ã‚¦ãƒ³ãŒå®Œäº†ã—ã¾ã—ãŸ" #: pypowervm/adapter.py:1295 #, python-format msgid "Error while getting PowerVM events: %s. (Is the pvm-rest service down?)" msgstr "PowerVM イベントをå–å¾—ã™ã‚‹ã¨ãã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %s。 (pvm-rest サービスãŒãƒ€ã‚¦ãƒ³ã—ã¦ã„ã¾ã™ã‹?)" #: pypowervm/adapter.py:1338 #, python-format msgid "Unexpected EventType=%s" msgstr "予期ã—ãªã„イベント・タイプ (%s) ã§ã™" #: pypowervm/adapter.py:1365 msgid "Error while processing PowerVM events" msgstr "PowerVM イベントを処ç†ã—ã¦ã„ã‚‹ã¨ãã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #: pypowervm/exceptions.py:127 #, python-format msgid "" "Unable to derive the appropriate physical FC port for WWPN %(wwpn)s. The" " VIOS Extended Attribute Groups may have been insufficient. The VIOS URI" " for the query was %(vio_uri)s." msgstr "" "WWPN %(wwpn)s ã«é©ã—ãŸç‰©ç† FC ãƒãƒ¼ãƒˆã‚’å¾—ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“。 VIOS 拡張属性グループãŒå分ã§ã¯ãªã„å¯èƒ½æ€§ãŒã‚りã¾ã™ã€‚ 照会㮠VIOS URI 㯠%(vio_uri)s ã§ã—ãŸã€‚" #: pypowervm/exceptions.py:133 #, python-format msgid "Element not found: %(element_type)s %(element)s" msgstr "エレメントãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“: %(element_type)s %(element)s" #: pypowervm/exceptions.py:137 #, python-format msgid "LPAR not found: %(lpar_name)s" msgstr "LPAR ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“: %(lpar_name)s" #: pypowervm/exceptions.py:141 msgid "Adapter not found" msgstr "アダプターãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #: pypowervm/exceptions.py:145 #, python-format msgid "The '%(operation_name)s' operation failed. %(error)s" msgstr "「%(operation_name)sã€æ“作ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚%(error)s" #: pypowervm/exceptions.py:149 #, python-format msgid "" "The '%(operation_name)s' operation failed. Failed to complete the task in" " %(seconds)d seconds." msgstr "" "「%(operation_name)sã€æ“作ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚ %(seconds)d 秒以内ã«ã‚¿ã‚¹ã‚¯ã‚’完了ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/exceptions.py:154 #, python-format msgid "" "Can not perform OS shutdown on Virtual Machine %(lpar_nm)s because its " "RMC connection is not active." msgstr "" "RMC 接続ãŒã‚¢ã‚¯ãƒ†ã‚£ãƒ–ã§ãªã„ãŸã‚ã€ä»®æƒ³ãƒžã‚·ãƒ³ %(lpar_nm)s ã§ OS ã®" "シャットダウンを実行ã§ãã¾ã›ã‚“。" #: pypowervm/exceptions.py:159 #, python-format msgid "Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "仮想マシン %(lpar_nm)s ã®é›»æºã‚’オフã«ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(reason)s" #: pypowervm/exceptions.py:163 #, python-format msgid "" "Power off of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "仮想マシン %(lpar_nm)s ã®é›»æºã‚ªãƒ•㯠%(timeout)d 秒後ã«ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«" "ãªã‚Šã¾ã—ãŸã€‚" #: pypowervm/exceptions.py:168 #, python-format msgid "Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s" msgstr "仮想マシン %(lpar_nm)s ã®é›»æºã‚’オンã«ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(reason)s" #: pypowervm/exceptions.py:172 #, python-format msgid "" "Power on of Virtual Machine %(lpar_nm)s timed out after %(timeout)d " "seconds." msgstr "" "仮想マシン %(lpar_nm)s ã®é›»æºã‚ªãƒ³ã¯ %(timeout)d 秒後ã«ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«" "ãªã‚Šã¾ã—ãŸã€‚" #: pypowervm/exceptions.py:177 #, python-format msgid "" "Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN Identifier on" " a different Network Bridge." msgstr "" "VLAN %(vlan_id)d ã¯åˆ¥ã®ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãƒ»ãƒ–リッジ上㮠1 次 " "VLAN ID ã§ã‚ã‚‹ãŸã‚削除ã§ãã¾ã›ã‚“。" #: pypowervm/exceptions.py:182 #, python-format msgid "" "Unable to provision VLAN %(vlan_id)d. It appears to be contained on " "device '%(dev_name)s' on Virtual I/O Server %(vios)s. That device is not" " connected to any Network Bridge (Shared Ethernet Adapter). Please " "manually remove the device or add it to the Network Bridge before " "continuing." msgstr "" "VLAN %(vlan_id)d をプロビジョンã§ãã¾ã›ã‚“。 ã“れ㯠Virtual I/O Server " "%(vios)s 上ã®ãƒ‡ãƒã‚¤ã‚¹ã€Œ%(dev_name)sã€ã«å«ã¾ã‚Œã¦ã„ã‚‹å¯èƒ½æ€§ãŒã‚りã¾ã™ã€‚ " "ã“ã®ãƒ‡ãƒã‚¤ã‚¹ã¯ã„ãšã‚Œã®ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãƒ»ãƒ–リッジ (共用イーサãƒãƒƒãƒˆãƒ»ã‚¢ãƒ€ãƒ—ター) " "ã«ã‚‚接続ã•れã¦ã„ã¾ã›ã‚“。 作業を続行ã™ã‚‹å‰ã«ã€æ‰‹å‹•ã§ã“ã®ãƒ‡ãƒã‚¤ã‚¹ã‚’" "削除ã™ã‚‹ã‹ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãƒ»ãƒ–リッジã«è¿½åŠ ã—ã¦ãã ã•ã„。" #: pypowervm/exceptions.py:191 #, python-format msgid "" "A Logical Unit with name %(lu_name)s already exists on Shared Storage " "Pool %(ssp_name)s." msgstr "" "%(lu_name)s ã¨ã„ã†åå‰ã®è«–ç†è£…ç½®ã¯ã€å…±ç”¨ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ãƒ»ãƒ—ール " "%(ssp_name)s ä¸Šã«æ—¢ã«å­˜åœ¨ã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/exceptions.py:196 msgid "" "Unable to find a physical port to map a virtual Fibre Channel port to. " "This is due to either a Virtual I/O Server being unavailable, or improper" " port specification for the physical Fibre Channel ports." msgstr "" "仮想ファイãƒãƒ¼ãƒ»ãƒãƒ£ãƒãƒ«ãƒ»ãƒãƒ¼ãƒˆã®ãƒžãƒƒãƒ—å…ˆã¨ãªã‚‹ç‰©ç†ãƒãƒ¼ãƒˆãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。 " "ãã®åŽŸå› ã¯ã€Virtual I/O Server ãŒä½¿ç”¨ä¸å¯ã«ãªã£ã¦ã„ã‚‹ã“ã¨ã€ã¾ãŸã¯ç‰©ç†" "ファイãƒãƒ¼ãƒ»ãƒãƒ£ãƒãƒ«ãƒ»ãƒãƒ¼ãƒˆã«å¯¾ã—ã¦æŒ‡å®šã•れãŸãƒãƒ¼ãƒˆãŒæ­£ã—ããªã„ã“ã¨ã§ã™ã€‚" #: pypowervm/exceptions.py:203 msgid "" "Unable to start the console to the Virtual Machine. The pypowervm API is" " running in a non-local mode. The console can only be deployed when " "pypowervm is co-located with the PowerVM API." msgstr "" "仮想マシンã«å¯¾ã—ã¦ã‚³ãƒ³ã‚½ãƒ¼ãƒ«ã‚’é–‹å§‹ã§ãã¾ã›ã‚“。pypowervm API ãŒéžãƒ­ãƒ¼ã‚«ãƒ«ãƒ»" "モードã§å®Ÿè¡Œã•れã¦ã„ã¾ã™ã€‚コンソールã¯ã€pypowervm ㌠PowerVM API ã¨" "åŒä¸€å ´æ‰€ã«è¨­ç½®ã•れã¦ã„ã‚‹å ´åˆã«ã®ã¿ãƒ‡ãƒ—ロイå¯èƒ½ã§ã™ã€‚" #: pypowervm/exceptions.py:210 #, python-format msgid "WrapperTask %(name)s has no subtasks!" msgstr "WrapperTask %(name)s ã«ã‚µãƒ–タスクãŒã‚りã¾ã›ã‚“!" #: pypowervm/exceptions.py:214 msgid "FeedTask can't have an empty feed." msgstr "FeedTask ã§ç©ºã®ãƒ•ィードã¯ä½¿ç”¨ã§ãã¾ã›ã‚“。" #: pypowervm/exceptions.py:218 #, python-format msgid "OS denied access to file %(access_file)s." msgstr "ファイル %(access_file)s ã¸ã®ã‚¢ã‚¯ã‚»ã‚¹ãŒ OS ã§æ‹’å¦ã•れã¾ã—ãŸã€‚" #: pypowervm/exceptions.py:222 #, python-format msgid "" "OS encountered an I/O error attempting to read file %(access_file)s: " "%(error)s" msgstr "" "ファイル %(access_file)s を読ã¿å–ã‚‹ã¨ãã«å…¥å‡ºåŠ›ã‚¨ãƒ©ãƒ¼ãŒ OS ã§æ¤œå‡ºã•れã¾ã—ãŸ: " "%(error)s" #: pypowervm/exceptions.py:227 #, python-format msgid "The migration task failed. %(error)s" msgstr "マイグレーション・タスクãŒå¤±æ•—ã—ã¾ã—ãŸã€‚%(error)s" #: pypowervm/exceptions.py:231 #, python-format msgid "No load source found for VM %(vm_name)s" msgstr "VM %(vm_name)s ã®ãƒ­ãƒ¼ãƒ‰ãƒ»ã‚½ãƒ¼ã‚¹ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #: pypowervm/exceptions.py:235 #, python-format msgid "" "Unable to derive the pg83 encoding for hdisk %(dev_name)s. The " "parent_entry attribute is not set. This may be due to using a PV " "obtained through an unsupported property chain. The PV must be accessed " "via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage." msgstr "" "hdisk %(dev_name)s ã«ã¤ã„㦠pg83 エンコードをå–å¾—ã§ãã¾ã›ã‚“。 " "parent_entry 属性ãŒè¨­å®šã•れã¦ã„ã¾ã›ã‚“。 ã“ã®åŽŸå› ã¨ã—ã¦ã€ã‚µãƒãƒ¼ãƒˆã•れã¦ã„ãªã„" "プロパティー・ãƒã‚§ãƒ¼ãƒ³ã‹ã‚‰å–å¾—ã•れ㟠PV ãŒä½¿ç”¨ã•れã¦ã„ã‚‹ã“ã¨ãŒè€ƒãˆã‚‰ã‚Œã¾ã™ã€‚ PV ã«ã¯ã€" "VIOS.phys_volsã€VG.phys_volsã€ã¾ãŸã¯ VIOS.scsi_mappings[n].backing_storage ã‚’" "使用ã—ã¦ã‚¢ã‚¯ã‚»ã‚¹ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚" #: pypowervm/exceptions.py:243 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. Expected to find " "exactly one matching mapping, found %(num_mappings)d." msgstr "" "vSCSI マッピングã®ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ãƒ»ã‚¨ãƒ¬ãƒ¡ãƒ³ãƒˆã‚’å†ãƒžãƒƒãƒ—ã§ãã¾ã›ã‚“。一致ã™ã‚‹ãƒžãƒƒãƒ”ングã¯" " 1 ã¤ã ã‘予期ã•れã¦ã„ã¾ã—ãŸãŒã€è¦‹ã¤ã‹ã£ãŸã®ã¯ %(num_mappings)d 個ã§ã—ãŸã€‚" #: pypowervm/exceptions.py:249 #, python-format msgid "" "Unable to remap storage element of vSCSI mapping. A mapping for storage " "element %(stg_name)s already exists to client LPAR %(lpar_uuid)s." msgstr "" "vSCSI マッピングã®ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ãƒ»ã‚¨ãƒ¬ãƒ¡ãƒ³ãƒˆã‚’å†ãƒžãƒƒãƒ—ã§ãã¾ã›ã‚“。" "ストレージ・エレメント %(stg_name)s ã®ãƒžãƒƒãƒ”ングã¯ã€æ—¢ã«ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆ LPAR %(lpar_uuid)s を対象ã«å­˜åœ¨ã—ã¾ã™ã€‚" #: pypowervm/exceptions.py:255 #, python-format msgid "" "Found device %(devname)s %(count)d times; expected to find it at most " "once." msgstr "" "デãƒã‚¤ã‚¹ %(devname)s ㌠%(count)d 回検出ã•れã¾ã—ãŸã€‚ã“ã®ãƒ‡ãƒã‚¤ã‚¹ã¯å¤šãã¦ã‚‚ " "1 回検出ã•れるã“ã¨ãŒäºˆæœŸã•れã¦ã„ã¾ã—ãŸã€‚" #: pypowervm/exceptions.py:270 #, python-format msgid "" "FeedTask %(ft_name)s experienced multiple exceptions:\n" "\t%(concat_msgs)s" msgstr "" "FeedTask %(ft_name)s ã§è¤‡æ•°ã®ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ:\n" "\t%(concat_msgs)s" #: pypowervm/exceptions.py:280 #, python-format msgid "Expected to find exactly one management partition; found %(count)d." msgstr "1 ã¤ã®ç®¡ç†åŒºç”»ã®ã¿ãŒè¦‹ã¤ã‹ã‚‹ã¨äºˆæœŸã•れã¦ã„ã¾ã—ãŸãŒã€%(count)d 個ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸã€‚" #: pypowervm/exceptions.py:286 #, python-format msgid "" "Expected to find exactly one partition with ID %(lpar_id)d; found " "%(count)d." msgstr "" "ID %(lpar_id)d ã®ãƒ‘ーティション㯠1 ã¤ã®ã¿æ¤œå‡ºã•れるã“ã¨ãŒ" "予期ã•れã¦ã„ã¾ã—ãŸãŒã€%(count)d å€‹ãŒæ¤œå‡ºã•れã¾ã—ãŸã€‚" #: pypowervm/exceptions.py:292 #, python-format msgid "Couldn't find the default Tier on Shared Storage Pool %(ssp_name)s." msgstr "共用ストレージ・プール %(ssp_name)s ã«ãƒ‡ãƒ•ォルト層ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/exceptions.py:301 #, python-format msgid "" "The device with UDID %(udid)s was not found on any of the Virtual I/O " "Servers." msgstr "" "UDID %(udid)s ã®ãƒ‡ãƒã‚¤ã‚¹ã¯ã„ãšã‚Œã® Virtual I/O Server ã«ã‚‚見ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/exceptions.py:306 #, python-format msgid "" "There are not enough Virtual I/O Servers to support the virtual machine's" " device with UDID %(udid)s." msgstr "" "仮想マシンã®å½“該デãƒã‚¤ã‚¹ (UDID: %(udid)s) をサãƒãƒ¼ãƒˆã§ãã‚‹ã ã‘ã®å分㪠Virtual I/O Server ãŒã‚りã¾ã›ã‚“。" #: pypowervm/exceptions.py:311 #, python-format msgid "" "The expected fabrics (%(fabrics)s) were not found on any of the Virtual " "I/O Servers." msgstr "" "予期ã•れãŸãƒ•ァブリック (%(fabrics)s) ã¯ã„ãšã‚Œã® Virtual I/O Server ã«ã‚‚見ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/exceptions.py:316 #, python-format msgid "" "Can not rebuild the virtual machine. It is using an I/O type of " "%(io_type)s which is not supported for VM rebuild." msgstr "" "ä»®æƒ³ãƒžã‚·ãƒ³ã‚’å†æ§‹ç¯‰ã§ãã¾ã›ã‚“。 VM 冿§‹ç¯‰ã«å¯¾ã—ã¦ã‚µãƒãƒ¼ãƒˆã•れã¦ã„ãªã„" "入出力タイプ %(io_type)s ãŒä½¿ç”¨ã•れã¦ã„ã¾ã™ã€‚" #: pypowervm/exceptions.py:321 #, python-format msgid "" "The number of VFC slots on the target system (%(rebuild_slots)d) does not" " match the number of slots on the client system (%(original_slots)d). " "Unable to rebuild this virtual machine on this system." msgstr "" "ターゲット・システム上㮠VFC ã‚¹ãƒ­ãƒƒãƒˆã®æ•° (%(rebuild_slots)d) ãŒã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆãƒ»" "システム上ã®ã‚¹ãƒ­ãƒƒãƒˆã®æ•° (%(original_slots)d) ã¨ä¸€è‡´ã—ã¾ã›ã‚“。 " "ã“ã®ã‚·ã‚¹ãƒ†ãƒ ã§å½“該仮想マシンã¯å†æ§‹ç¯‰ã§ãã¾ã›ã‚“。" #: pypowervm/exceptions.py:328 #, python-format msgid "" "To register the slot information of the network device a CNA or VNIC " "adapter is needed. Instead the following was given: %(wrapper)s." msgstr "" "ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãƒ»ãƒ‡ãƒã‚¤ã‚¹ã®ã‚¹ãƒ­ãƒƒãƒˆæƒ…報を登録ã™ã‚‹ã«ã¯ã€CNA ã¾ãŸã¯ VNIC " "アダプターãŒå¿…è¦ã«ãªã‚Šã¾ã™ã€‚ 代ã‚り㫠%(wrapper)s ãŒæŒ‡å®šã•れã¾ã—ãŸã€‚" #: pypowervm/exceptions.py:334 #, python-format msgid "" "There are not enough active Virtual I/O Servers available. Expected " "%(exp)d; found %(act)d." msgstr "" "使用å¯èƒ½ãªã‚¢ã‚¯ãƒ†ã‚£ãƒ– Virtual I/O Server ãŒå分ã«ã‚りã¾ã›ã‚“。 予期ã•れãŸ" "数㯠%(exp)d; ã§ã™ã€‚検出ã•ã‚ŒãŸæ•°ã¯ %(act)d ã§ã™ã€‚" #: pypowervm/exceptions.py:339 #, python-format msgid "" "No Virtual I/O Servers are available. Attempted to wait for a VIOS to " "become active for %(wait_time)d seconds. Please check the RMC " "connectivity between the PowerVM NovaLink and the Virtual I/O Servers." msgstr "" "使用å¯èƒ½ãª Virtual I/O Server ãŒã‚りã¾ã›ã‚“。 VIOS ãŒã‚¢ã‚¯ãƒ†ã‚£ãƒ–ã«ãªã‚‹ã¾ã§ " "%(wait_time)d 秒間待機ã—よã†ã¨ã—ã¾ã—ãŸã€‚ PowerVM NovaLink 㨠" "Virtual I/O Server ã®é–“ã® RMC 接続を調ã¹ã¦ãã ã•ã„。" #: pypowervm/exceptions.py:349 #, python-format msgid "" "Could not find any SR-IOV adapters in Sriov mode and Running state.\n" "Location | Mode | State\n" "%(sriov_loc_mode_state)s" msgstr "" "Sriov モードãŠã‚ˆã³ã€Œå®Ÿè¡Œä¸­ã€çŠ¶æ…‹ã® SR-IOV アダプターãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚\n" "ロケーション | モード | 状態\n" "%(sriov_loc_mode_state)s" #: pypowervm/exceptions.py:354 #, python-format msgid "" "Unable to fulfill redundancy requirement of %(red)d. Found %(found_vfs)d" " viable backing device(s)." msgstr "" "冗長度è¦ä»¶ %(red)d を満ãŸã™ã“ã¨ãŒã§ãã¾ã›ã‚“。 %(found_vfs)d 個ã®å®Ÿè¡Œå¯èƒ½ãªãƒãƒƒã‚­ãƒ³ã‚°ãƒ»ãƒ‡ãƒã‚¤ã‚¹ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸã€‚" #: pypowervm/exceptions.py:359 msgid "The Managed System is not vNIC capable." msgstr "管ç†å¯¾è±¡ã‚·ã‚¹ãƒ†ãƒ ãŒ vNIC 対応ã§ã¯ã‚りã¾ã›ã‚“。" #: pypowervm/exceptions.py:363 msgid "There are no active vNIC-capable VIOSes." msgstr "アクティブ㪠vNIC 対応 VIOS ãŒã‚りã¾ã›ã‚“。" #: pypowervm/exceptions.py:367 #, python-format msgid "" "A redundancy of %(red)d was specified, but the Managed System is not vNIC" " failover capable." msgstr "" "冗長度 %(red)d ãŒæŒ‡å®šã•れã¾ã—ãŸãŒã€ç®¡ç†å¯¾è±¡ã‚·ã‚¹ãƒ†ãƒ ãŒ vNIC フェイルオーãƒãƒ¼å¯¾å¿œã§ã¯ã‚りã¾ã›ã‚“。" #: pypowervm/exceptions.py:372 #, python-format msgid "" "A redundancy of %(red)d was specified, but there are no active vNIC " "failover-capable VIOSes." msgstr "" "冗長度 %(red)d ãŒæŒ‡å®šã•れã¾ã—ãŸãŒã€ã‚¢ã‚¯ãƒ†ã‚£ãƒ–㪠vNIC " "フェイルオーãƒãƒ¼å¯¾å¿œ VIOS ãŒã‚りã¾ã›ã‚“。" #: pypowervm/exceptions.py:377 #, python-format msgid "" "Unable to locate the volume group %(vol_grp)s to store the virtual " "optical media within. Unable to create the media repository." msgstr "" "仮想光メディアã®ä¿ç®¡å ´æ‰€ã¨ãªã‚‹ãƒœãƒªãƒ¥ãƒ¼ãƒ ãƒ»ã‚°ãƒ«ãƒ¼ãƒ— %(vol_grp)s ㌠" "ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。 メディア・リãƒã‚¸ãƒˆãƒªãƒ¼ã‚’作æˆã§ãã¾ã›ã‚“。" #: pypowervm/exceptions.py:383 #, python-format msgid "" "The ManagedSystem update was not attempted because changes were requested" " to one or more SR-IOV physical ports which are in use by vNICs.\n" "%(warnings)s" msgstr "" "vNIC ã§ä½¿ç”¨ã•れã¦ã„ã‚‹ 1 ã¤ä»¥ä¸Šã® SR-IOV 物ç†ãƒãƒ¼ãƒˆã«å¯¾ã—ã¦å¤‰æ›´ãŒè¦æ±‚ã•れãŸãŸã‚ã€" "ManagedSystem ã®æ›´æ–°ã¯è©¦ã¿ã‚‰ã‚Œã¾ã›ã‚“ã§ã—ãŸã€‚\n" "%(warnings)s" #: pypowervm/exceptions.py:389 #, python-format msgid "Unable to create VNC based virtual terminal: %(err)s" msgstr "VNC ベースã®ä»®æƒ³ç«¯æœ«ã‚’作æˆã§ãã¾ã›ã‚“: %(err)s" #: pypowervm/exceptions.py:393 msgid "The Adapter cache is not supported." msgstr "アダプター・キャッシュãŒã‚µãƒãƒ¼ãƒˆã•れã¦ã„ã¾ã›ã‚“。" #: pypowervm/exceptions.py:397 #, python-format msgid "" "Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s" msgstr "" "「%(enum)sã€ã®å€¤ã€Œ%(value)sã€ãŒç„¡åйã§ã™ã€‚有効ãªå€¤: " "%(valid_values)s" #: pypowervm/exceptions.py:402 #, python-format msgid "No VIOS found with name %(vios_name)s." msgstr "%(vios_name)s ã¨ã„ã†åå‰ã® VIOS ã¯è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" #: pypowervm/exceptions.py:406 #, python-format msgid "No volume group found with name %(vg_name)s." msgstr "%(vg_name)s ã¨ã„ã†åå‰ã®ãƒœãƒªãƒ¥ãƒ¼ãƒ ãƒ»ã‚°ãƒ«ãƒ¼ãƒ—ã¯è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" #: pypowervm/exceptions.py:410 #, python-format msgid "Partition with name %(part_name)s is not an IBMi partition." msgstr "åå‰ %(part_name)s ã®ãƒ‘ーティションã¯ã€IBMi パーティションã§ã¯ã‚りã¾ã›ã‚“。" #: pypowervm/exceptions.py:414 msgid "PanelJob function partition argument is empty." msgstr "PanelJob 関数パーティション引数ãŒç©ºã§ã™ã€‚" #: pypowervm/exceptions.py:418 #, python-format msgid "" "Panel function operation %(op_name)s is invalid. One of %(valid_ops)s " "expected." msgstr "" "Panel 関数æ“作 %(op_name)s ã¯ç„¡åйã§ã™ã€‚%(valid_ops)s ã®ã„ãšã‚Œã‹ãŒäºˆæœŸã•れã¦ã„ã¾ã—ãŸã€‚" #: pypowervm/exceptions.py:423 #, python-format msgid "ISCSI discovery failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "VIOS %(vios_uuid)s ã® ISCSI ディスカãƒãƒªãƒ¼ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚戻りコード: %(status)s" #: pypowervm/exceptions.py:429 #, python-format msgid "ISCSI Logout failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "VIOS %(vios_uuid)s ã® ISCSI ログアウトãŒå¤±æ•—ã—ã¾ã—ãŸã€‚戻りコード: %(status)s" #: pypowervm/exceptions.py:434 #, python-format msgid "ISCSI Remove failed for VIOS %(vios_uuid)s. Return code: %(status)s" msgstr "VIOS %(vios_uuid)s ã® ISCSI 削除ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚戻りコード: %(status)s" #: pypowervm/exceptions.py:439 #, python-format msgid "Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s." msgstr "VIOS %(vios_uuid)s ã® Vstor %(stor_udid)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" #: pypowervm/util.py:125 #, python-format msgid "" "Proposed extended attribute group '%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'" msgstr "" "ææ¡ˆã•ã‚ŒãŸæ‹¡å¼µå±žæ€§ã‚°ãƒ«ãƒ¼ãƒ—「%(arg_xag)sã€ã¯ã€æ—¢å­˜ã®æ‹¡å¼µå±žæ€§ã‚°ãƒ«ãƒ¼ãƒ—「%(path_xag)sã€ã¨ä¸€è‡´ã—ã¾ã›ã‚“" #: pypowervm/util.py:221 msgid "Certificate has expired." msgstr "è¨¼æ˜Žæ›¸ã®æœ‰åŠ¹æœŸé™ãŒåˆ‡ã‚Œã¾ã—ãŸã€‚" #: pypowervm/util.py:371 #, python-format msgid "Prefix and suffix together may not be more than %d characters." msgstr "プレフィックスãŠã‚ˆã³ã‚µãƒ•ィックスã¯åˆã‚ã›ã¦ %d 文字を超ãˆã¦ã¯ãªã‚Šã¾ã›ã‚“。" #: pypowervm/util.py:376 msgid "Total length must be at least 1 character." msgstr "åˆè¨ˆã®é•·ã•㯠1 文字以上ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" #: pypowervm/util.py:399 msgid "The name parameter must be at least one character long." msgstr "åå‰ãƒ‘ラメーターã®é•·ã•㯠1 文字以上ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" #: pypowervm/util.py:402 #, python-format msgid "The name parameter must not exceed %d characters when trunk_ok is False." msgstr "trunk_ok ㌠False ã®å ´åˆã€åå‰ãƒ‘ラメーター㯠%d 文字を超ãˆã¦ã¯ãªã‚Šã¾ã›ã‚“。" #: pypowervm/util.py:500 msgid "Developer error: partial parent specification." msgstr "開発者エラー: 部分的ãªè¦ªã®æŒ‡å®šã€‚" #: pypowervm/util.py:505 msgid "" "Developer error: parent_type must be either a string schema type or a " "Wrapper subclass." msgstr "" "開発者エラー: parent_type ã¯ã€ã‚¹ãƒˆãƒªãƒ³ã‚°ãƒ»ã‚¹ã‚­ãƒ¼ãƒžãƒ»ã‚¿ã‚¤ãƒ—ã‹ãƒ©ãƒƒãƒ‘ー・サブクラスã®ã©ã¡ã‚‰ã‹ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" #: pypowervm/util.py:602 #, python-format msgid "Invalid value '%(bad_val)s'. Expected one of %(good_vals)s, or a list." msgstr "値「%(bad_val)sã€ãŒç„¡åйã§ã™ã€‚予期ã•れãŸã‚‚ã®ã¯ %(good_vals)s ã®ã„ãšã‚Œã‹ã€ã¾ãŸã¯ãƒªã‚¹ãƒˆã§ã™ã€‚" #: pypowervm/helpers/log_helper.py:83 #, python-format msgid "REQUEST: %s" msgstr "è¦æ±‚: %s" #: pypowervm/helpers/log_helper.py:103 #, python-format msgid "RESPONSE: %s" msgstr "応答: %s" #: pypowervm/tasks/cluster_ssp.py:108 #, python-format msgid "Waiting for in-progress upload(s) to complete. Marker LU(s): %s" msgstr "進行中ã®ã‚¢ãƒƒãƒ—ロードãŒå®Œäº†ã™ã‚‹ã¾ã§å¾…機ã—ã¦ã„ã¾ã™ã€‚マーカー LU: %s" #: pypowervm/tasks/cluster_ssp.py:137 msgid "Abdicating in favor of in-progress upload." msgstr "å¯¾è±¡ãŒæ”¾æ£„ã•れã¦ã€é€²è¡Œä¸­ã®ã‚¢ãƒƒãƒ—ロードãŒé¸æŠžã•れã¾ã™ã€‚" #: pypowervm/tasks/cluster_ssp.py:147 #, python-format msgid "Abdicating upload in favor of marker %s." msgstr "ã‚¢ãƒƒãƒ—ãƒ­ãƒ¼ãƒ‰ãŒæ”¾æ£„ã•れã¦ã€ãƒžãƒ¼ã‚«ãƒ¼ %s ãŒé¸æŠžã•れã¾ã™ã€‚" #: pypowervm/tasks/cluster_ssp.py:202 #, python-format msgid "Using already-uploaded image LU %s." msgstr "アップロード済ã¿ã‚¤ãƒ¡ãƒ¼ã‚¸ LU %s を使用ã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/cluster_ssp.py:212 #, python-format msgid "Creating marker LU %s" msgstr "マーカー LU %s を作æˆã—ã¦ã„ã¾ã™" #: pypowervm/tasks/cluster_ssp.py:228 #, python-format msgid "Uploading to image LU %(lu)s (marker %(mkr)s)." msgstr "イメージ LU %(lu)s (マーカー %(mkr)s) ã«ã‚¢ãƒƒãƒ—ロードã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/cluster_ssp.py:240 #, python-format msgid "Removing failed LU %s." msgstr "障害ãŒç™ºç”Ÿã—㟠LU %s を削除ã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/cna.py:119 #, python-format msgid "Unable to find the Virtual Switch %s on the system." msgstr "システム上ã«ä»®æƒ³ã‚¹ã‚¤ãƒƒãƒ %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" #: pypowervm/tasks/cna.py:143 #, python-format msgid "Unable to find a valid VLAN for Virtual Switch %s." msgstr "仮想スイッム%s ã«æœ‰åŠ¹ãª VLAN ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" #: pypowervm/tasks/memory.py:102 #, python-format msgid "" "Error obtaining host memory overhead for host with UUID '%(host)s': " "%(error)s." msgstr "" "UUID ãŒã€Œ%(host)sã€ã®ãƒ›ã‚¹ãƒˆã®ãƒ›ã‚¹ãƒˆãƒ»ãƒ¡ãƒ¢ãƒªãƒ¼ãƒ»ã‚ªãƒ¼ãƒãƒ¼ãƒ˜ãƒƒãƒ‰ã®å–得エラーã§ã™: " "%(error)s." #: pypowervm/tasks/partition.py:333 #, python-format msgid "" "Timed out waiting for the RMC state of all the powered on Virtual I/O " "Servers to be active. Wait time was: %(time)d seconds. VIOSes that did " "not go active were: %(vioses)s." msgstr "" "é›»æºãŒã‚ªãƒ³ã«ãªã£ã¦ã„ã‚‹ã™ã¹ã¦ã® Virtual I/O Server ã® RMC 状態ãŒã‚¢ã‚¯ãƒ†ã‚£ãƒ–ã«ãªã‚‹ã¾ã§å¾…機ã—ã¦ã„ã‚‹ã¨ãã«ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—ãŸã€‚ 待機時間㯠%(time)d ç§’ã§ã—ãŸã€‚ アクティブã«ãªã‚‰ãªã‹ã£ãŸ VIOS 㯠%(vioses)s ã§ã—ãŸã€‚" #: pypowervm/tasks/partition.py:376 #, python-format msgid "Assuming description-less slot is physical I/O: %s" msgstr "記述ãªã—ã®ã‚¹ãƒ­ãƒƒãƒˆã¯ç‰©ç†å…¥å‡ºåŠ›ã§ã‚ã‚‹ã¨æƒ³å®šã•れã¾ã™: %s" #: pypowervm/tasks/power.py:88 #, python-format msgid "Partition %s already powered on." msgstr "パーティション %s ã®é›»æºã¯æ—¢ã«ã‚ªãƒ³ã«ãªã£ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/power.py:136 #, python-format msgid "Partition %s already powered off." msgstr "パーティション %s ã®é›»æºã¯æ—¢ã«ã‚ªãƒ•ã«ãªã£ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/power.py:199 #, python-format msgid "" "Specifying add_parms as a dict is deprecated. Please specify a %s " "instance instead." msgstr "" "add_parms をディクショナリーã¨ã—ã¦æŒ‡å®šã™ã‚‹ã“ã¨ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。 代ã‚り㫠%s " "インスタンスを指定ã—ã¦ãã ã•ã„。" #: pypowervm/tasks/power.py:265 #, python-format msgid "" "IBMi OS normal shutdown failed. Trying OS immediate shutdown. " "Partition: %s" msgstr "" "IBMi OS ã®é€šå¸¸ã‚·ãƒ£ãƒƒãƒˆãƒ€ã‚¦ãƒ³ã«å¤±æ•—ã—ã¾ã—ãŸã€‚ OS ã®å³æ™‚シャットダウンを試行ã—ã¦ã„ã¾ã™ã€‚ " "パーティション: %s" #: pypowervm/tasks/power.py:275 #, python-format msgid "" "IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "IBMi OS ã®å³æ™‚シャットダウンã«å¤±æ•—ã—ã¾ã—ãŸã€‚ VSP ã®é€šå¸¸ã‚·ãƒ£ãƒƒãƒˆãƒ€ã‚¦ãƒ³ã‚’試行ã—ã¦ã„ã¾ã™ã€‚ " "パーティション: %s" #: pypowervm/tasks/power.py:323 #, python-format msgid "" "Non-IBMi OS immediate shutdown timed out. Trying VSP hard shutdown. " "Partition: %s" msgstr "" "éž IBMi OS ã®å³æ™‚シャットダウンãŒã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—ãŸã€‚ VSP ã®ãƒãƒ¼ãƒ‰ãƒ»ã‚·ãƒ£ãƒƒãƒˆãƒ€ã‚¦ãƒ³ã‚’試行ã—ã¦ã„ã¾ã™ã€‚ " "パーティション: %s" #: pypowervm/tasks/power.py:327 #, python-format msgid "" "Non-IBMi OS immediate shutdown failed. Trying VSP normal shutdown. " "Partition: %s" msgstr "" "éž IBMi OS ã®å³æ™‚シャットダウンã«å¤±æ•—ã—ã¾ã—ãŸã€‚ VSP ã®é€šå¸¸ã‚·ãƒ£ãƒƒãƒˆãƒ€ã‚¦ãƒ³ã‚’試行ã—ã¦ã„ã¾ã™ã€‚ " "パーティション: %s" #: pypowervm/tasks/power.py:363 #, python-format msgid "VSP hard shutdown with default timeout. Partition: %s" msgstr "VSP ãŒãƒ‡ãƒ•ォルト・タイムアウトã§ãƒãƒ¼ãƒ‰ãƒ»ã‚·ãƒ£ãƒƒãƒˆãƒ€ã‚¦ãƒ³ã•れã¾ã—ãŸã€‚ パーティション: %s" #: pypowervm/tasks/scsi_mapper.py:39 msgid "Retrying modification of SCSI Mapping." msgstr "SCSI マッピングã®å¤‰æ›´ã‚’å†è©¦è¡Œã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/scsi_mapper.py:99 #, python-format msgid "" "Found existing mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Virtual I/O Server %(vios_name)s ã‹ã‚‰ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆ LPAR %(lpar_uuid)s ã¸ã® " "%(stg_type)s ストレージ・エレメント %(stg_name)s ã®æ—¢å­˜ã®ãƒžãƒƒãƒ”ングãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸã€‚" #: pypowervm/tasks/scsi_mapper.py:118 #, python-format msgid "" "Creating mapping of %(stg_type)s storage element %(stg_name)s from " "Virtual I/O Server %(vios_name)s to client LPAR %(lpar_uuid)s." msgstr "" "Virtual I/O Server %(vios_name)s ã‹ã‚‰ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆ LPAR %(lpar_uuid)s ã¸ã® " "%(stg_type)s ストレージ・エレメント %(stg_name)s ã®ãƒžãƒƒãƒ”ングを作æˆã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/scsi_mapper.py:489 msgid "Must not specify both match_func and stg_elem." msgstr "match_func 㨠stg_elem ã‚’ä¸¡æ–¹ä¸€ç·’ã«æŒ‡å®šã—ã¦ã¯ãªã‚Šã¾ã›ã‚“。" #: pypowervm/tasks/slot_map.py:194 msgid "" "The register_cna method is deprecated! Please use the register_vnet " "method." msgstr "" "register_cna ãƒ¡ã‚½ãƒƒãƒ‰ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。 register_vnet メソッドを" "使用ã—ã¦ãã ã•ã„。" #: pypowervm/tasks/slot_map.py:204 msgid "The drop_cna method is deprecated! Please use the drop_vnet method." msgstr "drop_cna ãƒ¡ã‚½ãƒƒãƒ‰ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。drop_vnet メソッドを使用ã—ã¦ãã ã•ã„。" #: pypowervm/tasks/sriov.py:398 #, python-format msgid "" "SR-IOV Physical Port at location %(loc_code)s is backing a vNIC belonging" " to LPAR %(lpar_name)s (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)." msgstr "" "ロケーション %(loc_code)s ã«ã‚ã‚‹ SR-IOV 物ç†ãƒãƒ¼ãƒˆã¯ã€" "LPAR %(lpar_name)s ã«å±žã™ã‚‹ vNIC を支æ´ã—ã¦ã„ã¾ã™ (LPAR UUID: %(lpar_uuid)s; vNIC UUID: " "%(vnic_uuid)s)。" #: pypowervm/tasks/sriov.py:515 msgid "" "Making changes to the following SR-IOV physical port labels even though " "they are in use by vNICs:" msgstr "" "次㮠SR-IOV 物ç†ãƒãƒ¼ãƒˆãƒ»ãƒ©ãƒ™ãƒ«ã¯ vNIC ã§ä½¿ç”¨ã•れã¦ã„ã¾ã™ã€‚ãれã«ã‚‚ã‹ã‹ã‚らãšã€" "ãã®ãƒ©ãƒ™ãƒ«ã‚’変更ã—よã†ã¨ã—ã¦ã„ã¾ã™:" #: pypowervm/tasks/storage.py:100 #, python-format msgid "Failed to delete vio_file with UUID %s. It must be manually deleted." msgstr "UUID %s ã® vio_file を削除ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚ ã“ã‚Œã¯æ‰‹å‹•ã§å‰Šé™¤ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚" #: pypowervm/tasks/storage.py:356 msgid "Encountered an issue while uploading. Will retry." msgstr "アップロード時ã«å•題ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚ å†è©¦è¡Œã—ã¾ã™ã€‚" #: pypowervm/tasks/storage.py:530 msgid "" "The crt_lu_linked_clone method is deprecated! Please use the crt_lu " "method (clone=src_lu, size=lu_size_gb)." msgstr "" "crt_lu_linked_clone ãƒ¡ã‚½ãƒƒãƒ‰ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。 crt_lu メソッドを使用ã—ã¦" "ãã ã•ã„ (clone=src_luã€size=lu_size_gb)。" #: pypowervm/tasks/storage.py:581 #, python-format msgid "Disk Logical Unit %(luname)s has no backing image LU. (UDID: %(udid)s) " msgstr "ディスク論ç†è£…ç½® %(luname)s ã«ã¯ã€ãƒãƒƒã‚­ãƒ³ã‚°ãƒ»ã‚¤ãƒ¡ãƒ¼ã‚¸ LU ãŒã‚りã¾ã›ã‚“。 (UDID: %(udid)s) " #: pypowervm/tasks/storage.py:665 msgid "Unable to locate new vDisk on file upload." msgstr "ãƒ•ã‚¡ã‚¤ãƒ«ãƒ»ã‚¢ãƒƒãƒ—ãƒ­ãƒ¼ãƒ‰æ™‚ã«æ–°è¦ä»®æƒ³ãƒ‡ã‚£ã‚¹ã‚¯ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" #: pypowervm/tasks/storage.py:752 #, python-format msgid "" "Ignoring device because it lacks a UDID:\n" "%s" msgstr "" "UDID ãŒãªã„ãŸã‚ã€ãƒ‡ãƒã‚¤ã‚¹ã‚’無視ã—ã¾ã™:\n" "%s" #: pypowervm/tasks/storage.py:758 #, python-format msgid "Device %s not found in list." msgstr "デãƒã‚¤ã‚¹ %s ãŒãƒªã‚¹ãƒˆã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" #: pypowervm/tasks/storage.py:787 #, python-format msgid "Deleting virtual disk %(vdisk)s from volume group %(vg)s" msgstr "ボリューム・グループ %(vg)s ã‹ã‚‰ä»®æƒ³ãƒ‡ã‚£ã‚¹ã‚¯ %(vdisk)s を削除ã—ã¦ã„ã¾ã™" #: pypowervm/tasks/storage.py:810 #, python-format msgid "Deleting virtual optical device %(vopt)s from volume group %(vg)s" msgstr "ボリューム・グループ %(vg)s ã‹ã‚‰ä»®æƒ³å…‰ãƒ‡ã‚£ã‚¹ã‚¯ãƒ»ãƒ‡ãƒã‚¤ã‚¹ %(vopt)s を削除ã—ã¦ã„ã¾ã™" #: pypowervm/tasks/storage.py:866 #, python-format msgid "Removing LU %(lu_name)s (UDID %(lu_udid)s)" msgstr "LU %(lu_name)s (UDID %(lu_udid)s) を削除ã—ã¦ã„ã¾ã™" #: pypowervm/tasks/storage.py:870 #, python-format msgid "" "LU %(lu_name)s was not found - it may have been deleted out of band. " "(UDID: %(lu_udid)s)" msgstr "" "LU %(lu_name)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—㟠- 帯域外ã§å‰Šé™¤ã•れãŸå¯èƒ½æ€§ãŒã‚りã¾ã™ã€‚ " "(UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:888 #, python-format msgid "" "Removing Image LU %(lu_name)s because it is no longer in use. (UDID: " "%(lu_udid)s)" msgstr "" "イメージ LU %(lu_name)s ã¯ä½¿ç”¨ã•れã¦ã„ãªã„ãŸã‚ã€å‰Šé™¤ã—ã¾ã™ã€‚(UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:893 #, python-format msgid "Backing LU %(lu_name)s was not found. (UDID: %(lu_udid)s)" msgstr "ãƒãƒƒã‚­ãƒ³ã‚° LU %(lu_name)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚ (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:919 msgid "Developer error: Either tier or lufeed is required." msgstr "開発者エラー: tier ã¾ãŸã¯ lufeed ãŒå¿…è¦ã§ã™ã€‚" #: pypowervm/tasks/storage.py:924 msgid "Developer error: The lufeed parameter must comprise LUEnt EntryWrappers." msgstr "開発者エラー: lufeed パラメーターã¯ã€è¤‡æ•°ã® LUEnt EntryWrapper ã‹ã‚‰æ§‹æˆã•れã¦ã„ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" #: pypowervm/tasks/storage.py:931 #, python-format msgid "Deleting LU %(lu_name)s (UDID: %(lu_udid)s)" msgstr "LU %(lu_name)s (UDID: %(lu_udid)s) を削除ã—ã¦ã„ã¾ã™" #: pypowervm/tasks/storage.py:936 #, python-format msgid "" "Ignoring HttpError for LU %(lu_name)s may have been deleted out of band." " (UDID: %(lu_udid)s)" msgstr "" "LU %(lu_name)s ã® HttpError を無視ã—ã¾ã™ - 帯域外ã§å‰Šé™¤ã•れãŸå¯èƒ½æ€§ãŒã‚りã¾ã™ã€‚" " (UDID: %(lu_udid)s)" #: pypowervm/tasks/storage.py:992 #, python-format msgid "" "Removing %(num_maps)d orphan %(stg_type)s mappings from VIOS " "%(vios_name)s." msgstr "" "%(num_maps)d 個ã®å­¤ç«‹ %(stg_type)s マッピングを VIOS %(vios_name)s ã‹ã‚‰" "削除ã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/storage.py:1018 #, python-format msgid "Removing %(num_maps)d port-less VFC mappings from VIOS %(vios_name)s." msgstr "%(num_maps)d 個ã®ãƒãƒ¼ãƒˆãªã— VFC マッピングを VIOS %(vios_name)s ã‹ã‚‰å‰Šé™¤ã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/storage.py:1045 #, python-format msgid "" "Removing %(num_maps)d %(stg_type)s mappings associated with LPAR ID " "%(lpar_id)d from VIOS %(vios_name)s." msgstr "" "LPAR ID %(lpar_id)d ã«é–¢é€£ä»˜ã‘られ㟠%(num_maps)d 個㮠%(stg_type)s マッピングを VIOS %(vios_name)s ã‹ã‚‰å‰Šé™¤ä¸­ã§ã™ã€‚" #: pypowervm/tasks/storage.py:1112 #, python-format msgid "" "Not removing storage %(stg_name)s of type %(stg_type)s because it cannot " "be determined whether it is still in use. Manual verification and " "cleanup may be necessary." msgstr "" "タイプ %(stg_type)s ã®ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ %(stg_name)s ã¯ã€ã¾ã ä½¿ç”¨ã•れã¦ã„ã‚‹ã‹ã©ã†ã‹ã‚’" "判別ã§ããªã„ãŸã‚削除ã•れã¾ã›ã‚“。手動ã§ã®ç¢ºèªã¨ã‚¯ãƒªãƒ¼ãƒ³ã‚¢ãƒƒãƒ—ãŒå¿…è¦ã¨ãªã‚‹" "å¯èƒ½æ€§ãŒã‚りã¾ã™ã€‚" #: pypowervm/tasks/storage.py:1123 #, python-format msgid "" "Storage scrub ignoring storage element %(stg_name)s because it is of " "unexpected type %(stg_type)s." msgstr "" "ストレージ・エレメント %(stg_name)s ã¯äºˆæœŸã—ãªã„タイプ %(stg_type)s ã§ã‚ã‚‹ãŸã‚ã€ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ä¿®æ­£ã§ç„¡è¦–ã•れã¾ã™ã€‚" #: pypowervm/tasks/storage.py:1141 #, python-format msgid "" "Scrubbing the following %(vdcount)d Virtual Disks from VIOS %(vios)s: " "%(vdlist)s" msgstr "" "VIOS %(vios)s ã«ã‚る次㮠%(vdcount)d 個ã®ä»®æƒ³ãƒ‡ã‚£ã‚¹ã‚¯ã‚’修正ã—ã¦ã„ã¾ã™: " "%(vdlist)s" #: pypowervm/tasks/storage.py:1149 #, python-format msgid "" "Scrubbing the following %(vocount)d Virtual Opticals from VIOS %(vios)s: " "%(volist)s" msgstr "" "VIOS %(vios)s ã«ã‚る次㮠%(vocount)d 個ã®ä»®æƒ³å…‰ãƒ‡ã‚£ã‚¹ã‚¯ã‚’修正ã—ã¦ã„ã¾ã™: " "%(volist)s" #: pypowervm/tasks/storage.py:1220 #, python-format msgid "" "Skipping scrub of %(stg_type)s mappings from VIOS %(vios_name)s for the " "following LPAR IDs because those LPARs exist: %(lpar_ids)s" msgstr "" "次㮠LPAR ID ã¯å­˜åœ¨ã™ã‚‹ãŸã‚ã€ã“ã® ID ã® VIOS %(vios_name)s ã‹ã‚‰ã®%(stg_type)s マッピングã®ä¿®æ­£ã‚’スキップ中ã§ã™: %(lpar_ids)s" #: pypowervm/tasks/vfc_mapper.py:543 #, python-format msgid "" "Unable to find appropriate VIOS. The payload provided was likely " "insufficient. The payload data is:\n" " %s)" msgstr "" "é©åˆ‡ãª VIOS ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。指定ã•れãŸãƒšã‚¤ãƒ­ãƒ¼ãƒ‰ã¯ä¸å分ã§ã‚ã£ãŸå¯èƒ½æ€§ãŒã‚りã¾ã™ã€‚ペイロード・データ:\n" " %s)" #: pypowervm/tasks/vfc_mapper.py:572 #, python-format msgid "" "The matched VFC port map has no backing port set. Adding %(port)s to " "mapping for client wwpns: %(wwpns)s" msgstr "" "一致ã—㟠VFC ãƒãƒ¼ãƒˆãƒ»ãƒžãƒƒãƒ—ã§ã¯ãƒãƒƒã‚­ãƒ³ã‚°ãƒ»ãƒãƒ¼ãƒˆãŒè¨­å®šã•れã¦ã„ã¾ã›ã‚“。 " "%(port)s をクライアント wwpn %(wwpns)s ã®ãƒžãƒƒãƒ”ングã«è¿½åŠ ã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/vopt.py:75 msgid "" "An error occurred querying the virtual optical media repository. " "Attempting to re-establish connection with a virtual optical media " "repository." msgstr "" "仮想光メディア・リãƒã‚¸ãƒˆãƒªãƒ¼ã‚’照会ã™ã‚‹ã¨ãã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚ " "仮想光メディア・リãƒã‚¸ãƒˆãƒªãƒ¼ã¨ã®æŽ¥ç¶šã‚’å†ç¢ºç«‹ã—よã†ã¨" "ã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/tasks/vterm.py:86 msgid "Unable to close vterm." msgstr "vterm ã‚’é–‰ã˜ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“。" #: pypowervm/tasks/vterm.py:138 #, python-format msgid "Invalid output on vterm open. Trying to reset the vterm. Error was %s" msgstr "vterm ãŒé–‹ã„ãŸã¨ãã®å‡ºåŠ›ãŒç„¡åйã§ã™ã€‚vterm ã®ãƒªã‚»ãƒƒãƒˆã‚’試ã¿ã¦ã„ã¾ã™ã€‚エラー㯠%s ã§ã—ãŸã€‚" #: pypowervm/tasks/vterm.py:351 #, python-format msgid "VNCSocket Listener Listening on ip=%(ip)s port=%(port)s" msgstr "VNCSocket リスナー㌠ip=%(ip)s ãƒãƒ¼ãƒˆ=%(port)s ã§ listen ã—ã¦ã„ã¾ã™" #: pypowervm/tasks/vterm.py:480 #, python-format msgid "Error negotiating SSL for VNC Repeater: %s" msgstr "VNC リピーターã«å¯¾ã—㦠SSL を折è¡ã—ã¦ã„ã‚‹ã¨ãã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %s" #: pypowervm/tasks/hdisk/_fc.py:197 #, python-format msgid "" "hdisk discovery failed; will scrub stale storage for LPAR IDs %s and " "retry." msgstr "" "hdisk ディスカãƒãƒªãƒ¼ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚LPAR ID %s ã«ã¤ã„ã¦ã€ä¸æ•´åˆãª" "ストレージを修正ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。" #: pypowervm/tasks/hdisk/_fc.py:339 #, python-format msgid "LUA Recovery Successful. Device Found: %s" msgstr "LUA リカãƒãƒªãƒ¼ãŒæ­£å¸¸ã«è¡Œã‚れã¾ã—ãŸã€‚見ã¤ã‹ã£ãŸãƒ‡ãƒã‚¤ã‚¹: %s" #: pypowervm/tasks/hdisk/_fc.py:343 #, python-format msgid "ITL Error encountered: %s" msgstr "ITL エラーãŒç™ºç”Ÿã—ã¾ã—ãŸ: %s" #: pypowervm/tasks/hdisk/_fc.py:345 #, python-format msgid "%s Device is currently in use." msgstr "%s デãƒã‚¤ã‚¹ã¯ç¾åœ¨ä½¿ç”¨ä¸­ã§ã™ã€‚" #: pypowervm/tasks/hdisk/_fc.py:347 #, python-format msgid "%s Device discovered with unknown UDID." msgstr "䏿˜Žãª UDID ã‚’æŒã¤ %s デãƒã‚¤ã‚¹ãŒãƒ‡ã‚£ã‚¹ã‚«ãƒãƒ¼ã•れã¾ã—ãŸã€‚" #: pypowervm/tasks/hdisk/_fc.py:349 #, python-format msgid "Failed to Discover the Device : %s" msgstr "デãƒã‚¤ã‚¹ã‚’ディスカãƒãƒ¼ã§ãã¾ã›ã‚“ã§ã—ãŸ: %s" #: pypowervm/tasks/hdisk/_fc.py:419 #, python-format msgid "CLIRunner Error: %s" msgstr "CLIRunner エラー: %s" #: pypowervm/tasks/hdisk/_fc.py:451 msgid "" "QUERY_INVENTORY LUARecovery Job succeeded, but result contained neither " "OutputXML nor StdOut." msgstr "" "QUERY_INVENTORY LUARecovery ã‚¸ãƒ§ãƒ–ã¯æ­£å¸¸ã«çµ‚了ã—ã¾ã—ãŸãŒã€çµæžœã«ã¯ OutputXML ã‚‚ StdOut ã‚‚å«ã¾ã‚Œã¦ã„ã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/tasks/hdisk/_fc.py:475 #, python-format msgid "QUERY_INVENTORY produced invalid chunk of XML (%(chunk)s). Error: %(err)s" msgstr "QUERY_INVENTORY ã‹ã‚‰ç„¡åŠ¹ãª XML ãƒãƒ£ãƒ³ã‚¯ (%(chunk)s) ãŒç”Ÿæˆã•れã¾ã—ãŸã€‚ エラー: %(err)s" #: pypowervm/tasks/hdisk/_fc.py:483 #, python-format msgid "" "Failed to find pg83 descriptor in XML output:\n" "%s" msgstr "" "XML 出力㫠pg83 ディスクリプターãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ:\n" "%s" #: pypowervm/tasks/hdisk/_iscsi.py:64 msgid "ISCSI command completed successfully" msgstr "ISCSI ã‚³ãƒžãƒ³ãƒ‰ãŒæ­£å¸¸ã«å®Œäº†ã—ã¾ã—ãŸ" #: pypowervm/tasks/hdisk/_iscsi.py:66 msgid "ISCSI session already exists and logged in" msgstr "ISCSI ã‚»ãƒƒã‚·ãƒ§ãƒ³ã¯æ—¢ã«å­˜åœ¨ã—ã€ãƒ­ã‚°ã‚¤ãƒ³ã—ã¦ã„ã¾ã™" #: pypowervm/tasks/hdisk/_iscsi.py:68 msgid "ISCSI command performed on unsupported VIOS, host." msgstr "ISCSI コマンドãŒã€ã‚µãƒãƒ¼ãƒˆã•れã¦ã„ãªã„ VIOS (ホスト) ã§å®Ÿè¡Œã•れã¾ã—ãŸ" #: pypowervm/tasks/hdisk/_iscsi.py:71 msgid "ISCSI discovery found stale entries in the ODM database." msgstr "ISCSI ディスカãƒãƒªãƒ¼ã«ã‚ˆã£ã¦ã€ä¸æ•´åˆãªé …目㌠ODM データベースã«è¦‹ã¤ã‹ã‚Šã¾ã—ãŸã€‚" #: pypowervm/tasks/hdisk/_iscsi.py:74 msgid "ISCSI session could not be found " msgstr "ISCSI セッションãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ" #: pypowervm/tasks/hdisk/_iscsi.py:76 msgid "No records/targets/sessions/portals found to execute operation on" msgstr "æ“作を実行ã™ã‚‹å¯¾è±¡ã®ãƒ¬ã‚³ãƒ¼ãƒ‰/ターゲット/セッション/ãƒãƒ¼ã‚¿ãƒ«ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #: pypowervm/tasks/hdisk/_iscsi.py:79 #, python-format msgid "ISCSI command failed with internal error status = %s" msgstr "ISCSI コマンドãŒå¤±æ•—ã—ã¾ã—ãŸã€‚å†…éƒ¨ã‚¨ãƒ©ãƒ¼çŠ¶æ³ = %s" #: pypowervm/tasks/hdisk/_iscsi.py:82 msgid "ISCSI generic error code" msgstr "ISCSI ã®ä¸€èˆ¬çš„ãªã‚¨ãƒ©ãƒ¼ãƒ»ã‚³ãƒ¼ãƒ‰" #: pypowervm/tasks/hdisk/_iscsi.py:84 msgid "ISCSI session login failure" msgstr "ISCSI セッション・ログイン失敗" #: pypowervm/tasks/hdisk/_iscsi.py:86 msgid "ISCSI command invalid arguments" msgstr "ISCSI コマンドã®å¼•æ•°ãŒç„¡åй" #: pypowervm/tasks/hdisk/_iscsi.py:88 msgid "ISCSI connection timer exired while trying to connect." msgstr "接続を試ã¿ã¦ã„ã‚‹ã¨ãã«ã€ISCSI æŽ¥ç¶šã‚¿ã‚¤ãƒžãƒ¼ã®æœŸé™ãŒåˆ‡ã‚Œã¾ã—ãŸã€‚" #: pypowervm/tasks/hdisk/_iscsi.py:90 msgid "ISCSI command could not lookup host" msgstr "ISCSI コマンドã¯ãƒ›ã‚¹ãƒˆã‚’å‚ç…§ã§ãã¾ã›ã‚“ã§ã—ãŸ" #: pypowervm/tasks/hdisk/_iscsi.py:92 #, python-format msgid "ISCSI command returned unexpected status = %s" msgstr "ISCSI コマンドãŒäºˆæœŸã—ãªã„çŠ¶æ³ %s ã‚’è¿”ã—ã¾ã—ãŸ" #: pypowervm/tasks/hdisk/_iscsi.py:151 msgid "ISCSI command performed on unsupported VIOS " msgstr "ISCSI コマンドãŒã€ã‚µãƒãƒ¼ãƒˆã•れã¦ã„ãªã„ VIOS ã§å®Ÿè¡Œã•れã¾ã—ãŸ" #: pypowervm/tasks/hdisk/_iscsi.py:287 #, python-format msgid "Scrub stale storage for LPAR IDs %s and retry iSCSI discovery." msgstr "LPAR ID %s ã®ä¸æ•´åˆãªã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ã‚’修正ã—ã€iSCSI ディスカãƒãƒªãƒ¼ã‚’å†è©¦è¡Œã—ã¦ãã ã•ã„。" #: pypowervm/tasks/monitor/util.py:452 msgid "" "Metric data is not available. This may be due to the metrics being " "recently initialized." msgstr "" "メトリック・データãŒã‚りã¾ã›ã‚“。 ã“ã®åŽŸå› ã¨ã—ã¦ã€ãƒ¡ãƒˆãƒªãƒƒã‚¯ãŒæœ€è¿‘åˆæœŸåŒ–ã•れãŸã“ã¨ãŒè€ƒãˆã‚‰ã‚Œã¾ã™ã€‚" #: pypowervm/tests/test_i18n.py:34 msgid "This is a test" msgstr "ã“れã¯ãƒ†ã‚¹ãƒˆã§ã™" #: pypowervm/tests/test_i18n.py:37 msgid "This is a message for which a translation doesn't exist" msgstr "ã“ã®ãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã«ã¯ç¿»è¨³ãŒå­˜åœ¨ã—ã¾ã›ã‚“" #: pypowervm/utils/lpar_builder.py:216 #, python-format msgid "Processor units factor must be between 0.05 and 1.0. Value: %s" msgstr "処ç†è£…置係数㯠0.05 ã‹ã‚‰ 1.0 ã®ç¯„囲内ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。値: %s" #: pypowervm/utils/lpar_builder.py:244 #, python-format msgid "Logical partition name has invalid length. Name: %s" msgstr "è«–ç†åŒºç”»åã®é•·ã•ãŒç„¡åйã§ã™ã€‚ åå‰: %s" #: pypowervm/utils/lpar_builder.py:448 #, python-format msgid "Field '%(field)s' has invalid value: '%(value)s'" msgstr "フィールド「%(field)sã€ã«ç„¡åйãªå€¤ã€Œ%(value)sã€ãŒã‚りã¾ã™" #: pypowervm/utils/lpar_builder.py:503 msgid "None value is not valid." msgstr "値ãªã—ã¯ç„¡åйã§ã™ã€‚" #: pypowervm/utils/lpar_builder.py:510 #, python-format msgid "" "Value '%(value)s' is not valid for field '%(field)s' with acceptable " "choices: %(choices)s" msgstr "" "値「%(value)sã€ã¯ã€è¨±å®¹ã•ã‚Œã‚‹é¸æŠžé …ç›® %(choices)s ã®ã‚るフィールド" "「%(field)sã€ã«ã¯ç„¡åйã§ã™" #: pypowervm/utils/lpar_builder.py:535 #, python-format msgid "" "Field '%(field)s' has a value below the minimum. Value: %(value)s; " "Minimum: %(minimum)s" msgstr "" "フィールド「%(field)sã€ã®å€¤ãŒæœ€å°å€¤ã‚’下回ã£ã¦ã„ã¾ã™ã€‚ 値: %(value)s " "最å°: %(minimum)s" #: pypowervm/utils/lpar_builder.py:544 #, python-format msgid "" "Field '%(field)s' has a value above the maximum. Value: %(value)s; " "Maximum: %(maximum)s" msgstr "" "フィールド「%(field)sã€ã®å€¤ãŒæœ€å¤§å€¤ã‚’上回ã£ã¦ã„ã¾ã™ã€‚ 値: %(value)s " "最大: %(maximum)s" #: pypowervm/utils/lpar_builder.py:598 #, python-format msgid "" "The '%(desired_field)s' has a value above the '%(max_field)s' value. " "Desired: %(desired)s Maximum: %(maximum)s" msgstr "" "「%(desired_field)sã€ã®å€¤ãŒã€Œ%(max_field)sã€å€¤ã‚’上回ã£ã¦ã„ã¾ã™ã€‚ " "推奨: %(desired)s 最大: %(maximum)s" #: pypowervm/utils/lpar_builder.py:611 #, python-format msgid "" "The '%(desired_field)s' has a value below the '%(min_field)s' value. " "Desired: %(desired)s Minimum: %(minimum)s" msgstr "" "「%(desired_field)sã€ã®å€¤ãŒã€Œ%(min_field)sã€å€¤ã‚’下回ã£ã¦ã„ã¾ã™ã€‚ " "推奨: %(desired)s 最å°: %(minimum)s" #: pypowervm/utils/lpar_builder.py:655 #, python-format msgid "" "Memory value is not a multiple of the logical memory block size " "(%(lmb_size)s) of the host. Value: %(value)s" msgstr "" "メモリー値ãŒãƒ›ã‚¹ãƒˆã®è«–ç†ãƒ¡ãƒ¢ãƒªãƒ¼ãƒ»ãƒ–ロック・サイズ (%(lmb_size)s) ã®" "倿•°ã§ã¯ã‚りã¾ã›ã‚“。値: %(value)s" #: pypowervm/utils/lpar_builder.py:666 #, python-format msgid "" "The managed system does not support active memory expansion. The " "expansion factor value '%(value)s' is not valid." msgstr "" "ã“ã®ç®¡ç†å¯¾è±¡ã‚·ã‚¹ãƒ†ãƒ ã§ã¯ Active Memory Expansion ã¯ã‚µãƒãƒ¼ãƒˆã•れã¦ã„ã¾ã›ã‚“。 " "拡張係数値「%(value)sã€ãŒç„¡åйã§ã™ã€‚" #: pypowervm/utils/lpar_builder.py:672 #, python-format msgid "" "Active memory expansion value must be greater than or equal to 1.0 and " "less than or equal to 10.0. A value of 0 is also valid and indicates that" " AME is off. '%(value)s' is not valid." msgstr "" "Active Memory Expansion 値 㯠1.0 以上 10.0 以下ã§ãªã‘れ㰠" "ãªã‚Šã¾ã›ã‚“。 値 0 も有効ã§ã™ã€‚値 0 ã¯ã€AME ãŒã‚ªãƒ•ã§ã‚ã‚‹ã“ã¨ã‚’" "示ã—ã¾ã™ã€‚" "「%(value)sã€ã¯ç„¡åйã§ã™ã€‚" #: pypowervm/utils/retry.py:203 #, python-format msgid "" "Attempt %(retry)d of total %(total)d for URI %(uri)s. Error was a known " "retry response code: %(resp_code)s" msgstr "" "URI %(uri)s ã«ã¤ã„ã¦åˆè¨ˆ %(total)d 回ã®ã†ã¡ %(retry)d 回目ã®è©¦è¡Œã§ã™ã€‚ エラーã¯" "既知ã®å†è©¦è¡Œå¿œç­”コード %(resp_code)s ã§ã—ãŸ" #: pypowervm/utils/retry.py:210 #, python-format msgid "" "Attempt %(retry)d of %(total)d failed. Will retry. The exception was:\n" " %(except)s." msgstr "" "試行 %(retry)d / %(total)d ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚ å†è©¦è¡Œã—ã¾ã™ã€‚ 例外:\n" " %(except)s。" #: pypowervm/utils/transaction.py:348 msgid "Must supply either EntryWrapper or EntryWrapperGetter." msgstr "EntryWrapper ã¾ãŸã¯ EntryWrapperGetter を指定ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚" #: pypowervm/utils/transaction.py:374 msgid "Must supply a valid Subtask." msgstr "有効ãªã‚µãƒ–タスクを指定ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚" #: pypowervm/utils/transaction.py:378 #, python-format msgid "Duplicate 'provides' name %s." msgstr "é‡è¤‡ã«ã‚ˆã‚Šåå‰ %s ãŒã€Œæä¾›ã€ã•れã¾ã™ã€‚" #: pypowervm/utils/transaction.py:453 #, python-format msgid "WrapperTask %s has no Subtasks; no-op execution." msgstr "WrapperTask %s ã«ã¯ã‚µãƒ–タスクãŒã‚りã¾ã›ã‚“。æ“作ã¯å®Ÿè¡Œã•れã¾ã›ã‚“。" #: pypowervm/utils/transaction.py:585 msgid "Must supply either a list of EntryWrappers or a FeedGetter." msgstr "EntryWrapper ã®ãƒªã‚¹ãƒˆã‚’指定ã™ã‚‹ã‹ã€ã¾ãŸã¯ FeedGetter を指定ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚" #: pypowervm/utils/transaction.py:764 #, python-format msgid "FeedTask %s has no Subtasks; no-op execution." msgstr "FeedTask %s ã«ã¯ã‚µãƒ–タスクãŒã‚りã¾ã›ã‚“。æ“作ã¯å®Ÿè¡Œã•れã¾ã›ã‚“。" #: pypowervm/utils/transaction.py:789 #, python-format msgid "" "FeedTask %s experienced multiple exceptions. They are logged individually" " below." msgstr "" "FeedTask %s ã§è¤‡æ•°ã®ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚ ãれらã®ä¾‹å¤–ã¯å€‹ã€…ã«è¨˜éŒ²ã•れã¦" " ã„ã¾ã™ã€‚以下をå‚ç…§ã—ã¦ãã ã•ã„。" #: pypowervm/utils/validation.py:174 #, python-format msgid "" "Insufficient available %(res_name)s on host for virtual machine " "'%(instance_name)s' (%(requested)s requested, %(avail)s available)" msgstr "" "仮想マシン「%(instance_name)sã€ã«ã¤ã„ã¦ãƒ›ã‚¹ãƒˆã§ä½¿ç”¨ã§ãã‚‹ %(res_name)s ãŒ" "ä¸è¶³ã—ã¦ã„ã¾ã™ (è¦æ±‚: %(requested)sã€ä½¿ç”¨å¯èƒ½: %(avail)s)" #: pypowervm/utils/validation.py:209 msgid "memory" msgstr "メモリー" #: pypowervm/utils/validation.py:230 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum memory. Power off virtual machine %s and try again." msgstr "" "最å°ãƒ¡ãƒ¢ãƒªãƒ¼ã¾ãŸã¯æœ€å¤§ãƒ¡ãƒ¢ãƒªãƒ¼ã‚’変更ã™ã‚‹å‰ã«ä»®æƒ³ãƒžã‚·ãƒ³ã®é›»æºã‚’オフã«ã™ã‚‹" "å¿…è¦ãŒã‚りã¾ã™ã€‚ 仮想マシン %s ã®é›»æºã‚’オフã«ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。" #: pypowervm/utils/validation.py:235 #, python-format msgid "" "The virtual machine must be powered off before changing the expansion " "factor. Power off virtual machine %s and try again." msgstr "" "拡張係数を変更ã™ã‚‹å‰ã«ä»®æƒ³ãƒžã‚·ãƒ³ã®é›»æºã‚’オフã«ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚" " 仮想マシン %s ã®é›»æºã‚’オフã«ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。" #: pypowervm/utils/validation.py:328 msgid "CPUs" msgstr "CPU" #: pypowervm/utils/validation.py:344 msgid "processing units" msgstr "処ç†è£…ç½®" #: pypowervm/utils/validation.py:385 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processors. Power off virtual machine %s and try again." msgstr "" "最å°ãƒ—ロセッサー数ã¾ãŸã¯æœ€å¤§ãƒ—ロセッサー数を変更ã™ã‚‹å‰ã«ä»®æƒ³ãƒžã‚·ãƒ³ã®é›»æºã‚’" "オフã«ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚ 仮想マシン %s ã®é›»æºã‚’オフã«ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。" #: pypowervm/utils/validation.py:395 #, python-format msgid "" "The virtual machine must be powered off before changing the minimum or " "maximum processor units. Power off virtual machine %s and try again." msgstr "" "最å°å‡¦ç†è£…置数ã¾ãŸã¯æœ€å¤§å‡¦ç†è£…置数を変更ã™ã‚‹å‰ã«ä»®æƒ³ãƒžã‚·ãƒ³ã®é›»æºã‚’オフã«ã™ã‚‹" "å¿…è¦ãŒã‚りã¾ã™ã€‚ 仮想マシン %s ã®é›»æºã‚’オフã«ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。" #: pypowervm/utils/validation.py:411 #, python-format msgid "" "The virtual machine must be powered off before changing the processor " "compatibility mode. Power off virtual machine %s and try again." msgstr "" "プロセッサー互æ›ãƒ¢ãƒ¼ãƒ‰ã‚’変更ã™ã‚‹å‰ã«ä»®æƒ³ãƒžã‚·ãƒ³ã®é›»æºã‚’オフã«ã™ã‚‹å¿…è¦ãŒ" "ã‚りã¾ã™ã€‚ 仮想マシン %s ã®é›»æºã‚’オフã«ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。" #: pypowervm/utils/validation.py:419 #, python-format msgid "" "The virtual machine must be powered off before changing the processing " "mode. Power off virtual machine %s and try again." msgstr "" "処ç†ãƒ¢ãƒ¼ãƒ‰ã‚’変更ã™ã‚‹å‰ã«ä»®æƒ³ãƒžã‚·ãƒ³ã®é›»æºã‚’オフã«ã™ã‚‹å¿…è¦ãŒ" "ã‚りã¾ã™ã€‚ 仮想マシン %s ã®é›»æºã‚’オフã«ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。" #: pypowervm/utils/validation.py:448 #, python-format msgid "" "The desired processors (%(vcpus)d) cannot be above the maximum allowed " "processors per partition (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "推奨プロセッサー数 (%(vcpus)d) ã¯ä»®æƒ³ãƒžã‚·ãƒ³ã€Œ%(instance_name)sã€ã®åŒºç”»ã‚ãŸã‚Šã«" "許å¯ã•れる最大数 (%(max_allowed)d) ã‚’è¶…ãˆã¦ã¯ãªã‚Šã¾ã›ã‚“。" #: pypowervm/utils/validation.py:460 #, python-format msgid "" "The maximum processors (%(vcpus)d) cannot be above the maximum system " "capacity processor limit (%(max_allowed)d) for virtual machine " "'%(instance_name)s'." msgstr "" "最大プロセッサー数 (%(vcpus)d) ã¯ä»®æƒ³ãƒžã‚·ãƒ³ã€Œ%(instance_name)sã€ã®æœ€å¤§" "システム・キャパシティー・プロセッサーé™åº¦ (%(max_allowed)d) ã‚’è¶…ãˆã¦ã¯" "ãªã‚Šã¾ã›ã‚“。" #: pypowervm/utils/validation.py:533 #, python-format msgid "" "The virtual machine must be powered off before changing the simplified " "remote restart capability. Power off virtual machine %s and try again." msgstr "" "簡易リモートå†å§‹å‹•機能を変更ã™ã‚‹å‰ã«ä»®æƒ³ãƒžã‚·ãƒ³ã®é›»æºã‚’オフã«ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚ 仮想マシン %s ã®é›»æºã‚’オフã«ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/base_partition.py:434 msgid "Partition does not have an active RMC connection." msgstr "アクティブ㪠RMC 接続ãŒãƒ‘ーティションã«ã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/base_partition.py:437 #, python-format msgid "Partition does not have an active DLPAR capability for %s." msgstr "%s 用ã®ã‚¢ã‚¯ãƒ†ã‚£ãƒ–㪠DLPAR 機能ãŒãƒ‘ーティションã«ã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/base_partition.py:449 msgid "I/O" msgstr "入出力" #: pypowervm/wrappers/base_partition.py:459 msgid "Memory" msgstr "メモリー" #: pypowervm/wrappers/base_partition.py:469 msgid "Processors" msgstr "プロセッサー" #: pypowervm/wrappers/base_partition.py:649 #, python-format msgid "Invalid KeylockPos '%s'." msgstr "KeylockPos「%sã€ãŒç„¡åйã§ã™ã€‚" #: pypowervm/wrappers/base_partition.py:660 #, python-format msgid "Invalid BootMode '%s'." msgstr "BootMode「%sã€ãŒç„¡åйã§ã™ã€‚" #: pypowervm/wrappers/base_partition.py:1501 msgid "IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead." msgstr "IOSlot.adapter ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。代ã‚り㫠IOSlot.io_adapter を使用ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/enterprise_pool.py:143 #, python-format msgid "" "Unable to determine master management console MTMS (machine type, model, " "serial number) from %(identifier)s because no %(param)s was marked as the" " master console for the pool." msgstr "" "%(param)s ãŒãƒ—ールã®ãƒžã‚¹ã‚¿ãƒ¼ãƒ»ã‚³ãƒ³ã‚½ãƒ¼ãƒ«ã¨ã—ã¦ãƒžãƒ¼ã‚¯ã•れã¦ã„ãªã‹ã£ãŸãŸã‚ã€" "%(identifier)s ã‹ã‚‰ã®ãƒžã‚¹ã‚¿ãƒ¼ç®¡ç†ã‚³ãƒ³ã‚½ãƒ¼ãƒ« MTMS (マシン・タイプã€ãƒ¢ãƒ‡ãƒ«ã€" "シリアル番å·) を判別ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/wrappers/entry_wrapper.py:251 msgid "Cannot set uuid." msgstr "UUID を設定ã§ãã¾ã›ã‚“。" #: pypowervm/wrappers/entry_wrapper.py:443 #, python-format msgid "Cannot convert %(property_name)s='%(value)s' in object %(pvmobject)s" msgstr "オブジェクト %(pvmobject)s ã§ %(property_name)s='%(value)s' を変æ›ã§ãã¾ã›ã‚“" #: pypowervm/wrappers/entry_wrapper.py:586 msgid "" "Refusing set href over multiple links.\n" "Path: %{path}s\n" "Number of links found: %{nlinks}d" msgstr "" "複数ã®ãƒªãƒ³ã‚¯ã«ã‚ãŸã£ã¦ href を設定ã™ã‚‹ã“ã¨ã‚’æ‹’å¦ã—ã¦ã„ã¾ã™ã€‚\n" "パス: %{path}s\n" "見ã¤ã‹ã£ãŸãƒªãƒ³ã‚¯ã®æ•°: %{nlinks}d" #: pypowervm/wrappers/entry_wrapper.py:635 msgid "Refusing to construct and wrap an Element without a tag." msgstr "ã‚¿ã‚°ãªã—ã§ã‚¨ãƒ¬ãƒ¡ãƒ³ãƒˆã‚’æ§‹æˆãŠã‚ˆã³ãƒ©ãƒƒãƒ—ã™ã‚‹ã“ã¨ã‚’æ‹’å¦ã—ã¦ã„ã¾ã™ã€‚" #: pypowervm/wrappers/entry_wrapper.py:760 msgid "Response is missing 'entry' property." msgstr "応答ã«ã€Œentryã€ãƒ—ロパティーãŒã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/entry_wrapper.py:774 #, python-format msgid "Must supply a Response or Entry to wrap. Got %s" msgstr "ラップã™ã‚‹å¿œç­”ã¾ãŸã¯é …目を指定ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚%s ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #: pypowervm/wrappers/entry_wrapper.py:861 msgid "" "Developer error: specify 'parent' or ('parent_type' and 'parent_uuid') to" " retrieve a CHILD object." msgstr "" "開発者エラー: CHILD オブジェクトをå–å¾—ã™ã‚‹ã«ã¯ã€ã€Œparentã€ã¾ãŸã¯ (「parent_typeã€ã¨ã€Œparent_uuidã€) を指定ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/entry_wrapper.py:866 msgid "Specify either 'uuid' or 'root_id' when requesting a ROOT object." msgstr "ãƒ«ãƒ¼ãƒˆãƒ»ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆã‚’è¦æ±‚ã™ã‚‹ã¨ãã¯ã€ã€Œuuidã€ã¾ãŸã¯ã€Œroot_idã€ã‚’指定ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/entry_wrapper.py:879 msgid "" "Both parent_type and parent_uuid are required when retrieving a CHILD " "feed or entry." msgstr "" "å­ãƒ•ィードã¾ãŸã¯å­é …目をå–å¾—ã™ã‚‹ã¨ãã¯ã€parent_type 㨠parent_uuid ã®ä¸¡æ–¹ãŒ" "å¿…è¦ã§ã™ã€‚" #: pypowervm/wrappers/entry_wrapper.py:882 msgid "Specify the parent's UUID via the parent_uuid parameter." msgstr "parent_uuid パラメーターを使用ã—ã¦è¦ªã® UUID を指定ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/entry_wrapper.py:886 msgid "Specify either 'uuid' or 'child_id' when requesting a CHILD object." msgstr "å­ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆã‚’è¦æ±‚ã™ã‚‹ã¨ãã¯ã€ã€Œuuidã€ã¾ãŸã¯ã€Œchild_idã€ã‚’指定ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/entry_wrapper.py:1001 msgid "Parent UUID specified without parent type." msgstr "親 UUID ãŒè¦ªã‚¿ã‚¤ãƒ—ãªã—ã§æŒ‡å®šã•れã¾ã—ãŸã€‚" #: pypowervm/wrappers/entry_wrapper.py:1004 msgid "The search() method requires exactly one key=value argument." msgstr "search() メソッドã«ã¯ã€key=value 引数㌠1 ã¤ã®ã¿å¿…è¦ã§ã™ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1035 #, python-format msgid "Wrapper class %(class)s does not support search key '%(key)s'." msgstr "ラッパー・クラス %(class)s ã¯ã€æ¤œç´¢ã‚­ãƒ¼ã€Œ%(key)sã€ã‚’サãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“。" #: pypowervm/wrappers/entry_wrapper.py:1136 msgid "" "The 'xag' parameter to EntryWrapper.update is deprecated! At best, using" " it will result in a no-op. At worst, it will give you incurable etag " "mismatch errors." msgstr "" "EntryWrapper.update ã«å¯¾ã™ã‚‹ã€Œxagã€ãƒ‘ãƒ©ãƒ¡ãƒ¼ã‚¿ãƒ¼ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。 ã“れを" "使用ã—ãŸå ´åˆã¯ã€ã‚ˆãã¦ã‚‚何もæ“作ãŒè¡Œã‚れã¾ã›ã‚“。 最悪ã®å ´åˆã¯ã€ä¿®æ­£ä¸å¯ã® etag " "ä¸ä¸€è‡´ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã™ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1406 msgid "No such child element." msgstr "ãã®ã‚ˆã†ãªå­ã‚¨ãƒ¬ãƒ¡ãƒ³ãƒˆã¯ã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/entry_wrapper.py:1467 msgid "Cannot set UUID on Wrapper with no Metadata." msgstr "メタデータãªã—ã«ãƒ©ãƒƒãƒ‘ー㧠UUID を設定ã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“。" #: pypowervm/wrappers/entry_wrapper.py:1472 #, python-format msgid "uuid value not valid: %s" msgstr "UUID 値ãŒç„¡åйã§ã™: %s" #: pypowervm/wrappers/entry_wrapper.py:1527 msgid "Must specify a Wrapper subclass." msgstr "ラッパー・サブクラスを指定ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚" #: pypowervm/wrappers/entry_wrapper.py:1536 msgid "Must specify both parent class and parent UUID, or neither." msgstr "親クラスã¨è¦ª UUID ã®ä¸¡æ–¹ã‚’指定ã™ã‚‹ã‹ã€ã©ã¡ã‚‰ã‚‚指定ã—ãªã„よã†ã«ã™ã‚‹" "å¿…è¦ãŒã‚りã¾ã™ã€‚" #: pypowervm/wrappers/job.py:276 #, python-format msgid "Job %(job_id)s monitoring for %(time)i seconds." msgstr "ジョブ %(job_id)s 㯠%(time)i 秒間モニターã•れã¦ã„ã¾ã™ã€‚" #: pypowervm/wrappers/job.py:327 #, python-format msgid "" "Issuing cancel request for job %(job_id)s. Will poll the job indefinitely" " for termination." msgstr "" "ジョブ %(job_id)s ã®ã‚­ãƒ£ãƒ³ã‚»ãƒ«è¦æ±‚を発行ã—ã¦ã„ã¾ã™ã€‚ ã“ã®ã‚¸ãƒ§ãƒ–ã¯çµ‚了ã«ã¤ã„ã¦ãƒãƒ¼ãƒªãƒ³ã‚°ã•れã¾ã™ã€‚" #: pypowervm/wrappers/job.py:343 #, python-format msgid "Job %s not deleted. Job is in running state." msgstr "ジョブ %s ã¯å‰Šé™¤ã•れã¾ã›ã‚“。ジョブã¯å®Ÿè¡Œä¸­çŠ¶æ…‹ã§ã™ã€‚" #: pypowervm/wrappers/logical_partition.py:169 msgid "LPAR is not in an active state." msgstr "LPAR ã¯ã‚¢ã‚¯ãƒ†ã‚£ãƒ–状態ã§ã¯ã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/logical_partition.py:179 msgid "Target system does not have the IBM i LPAR Mobility Capability." msgstr "ターゲット・システム㫠IBM i LPAR モビリティー機能ãŒã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/logical_partition.py:183 msgid "IBM i LPAR does not have restricted I/O." msgstr "IBM i LPAR ã«åˆ¶é™ä»˜ã入出力ãŒã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/logical_partition.py:186 msgid "Source system does not have the IBM i LPAR Mobility Capability." msgstr "ソース・システム㫠IBM i LPAR モビリティー機能ãŒã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/logical_partition.py:190 msgid "LPAR does not have an active RMC connection." msgstr "アクティブ㪠RMC 接続㌠LPAR ã«ã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/logical_partition.py:193 msgid "LPAR is the management partition" msgstr "LPAR ã¯ç®¡ç†åŒºç”»ã§ã™ã€‚" #: pypowervm/wrappers/logical_partition.py:197 msgid "LPAR is not available for LPM due to missing DLPAR capabilities." msgstr "DLPAR 機能ãŒãªã„ãŸã‚ã€LPAR 㯠LPM ã«ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“。" #: pypowervm/wrappers/logical_partition.py:214 #: pypowervm/wrappers/logical_partition.py:223 #: pypowervm/wrappers/logical_partition.py:231 msgid "" "This is not the property you are looking for. Use srr_enabled in a " "NovaLink environment." msgstr "" "ã“れã¯ã€è¦‹ã¤ã‘よã†ã¨ã—ã¦ã„るプロパティーã§ã¯ã‚りã¾ã›ã‚“。 NovaLink 環境ã§ã¯" "srr_enabled を使用ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/logical_partition.py:267 #, python-format msgid "Invalid IPLSrc '%s'." msgstr "IPLSrc「%sã€ãŒç„¡åйã§ã™ã€‚" #: pypowervm/wrappers/managed_system.py:506 msgid "This property is deprecated! Use pci_subsys_dev_id instead." msgstr "ã“ã®ãƒ—ãƒ­ãƒ‘ãƒ†ã‚£ãƒ¼ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。 代ã‚り㫠pci_subsys_dev_id を使用ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/managed_system.py:518 msgid "This property is deprecated! Use pci_rev_id instead." msgstr "ã“ã®ãƒ—ãƒ­ãƒ‘ãƒ†ã‚£ãƒ¼ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。 代ã‚り㫠pci_rev_id を使用ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/managed_system.py:534 msgid "This property is deprecated! Use pci_subsys_vendor_id instead." msgstr "ã“ã®ãƒ—ãƒ­ãƒ‘ãƒ†ã‚£ãƒ¼ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。 代ã‚り㫠pci_subsys_vendor_id を使用ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/managed_system.py:546 msgid "This property is deprecated! Use drc_index instead." msgstr "ã“ã®ãƒ—ãƒ­ãƒ‘ãƒ†ã‚£ãƒ¼ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。 代ã‚り㫠drc_index を使用ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/managed_system.py:558 msgid "This property is deprecated! Use drc_name instead." msgstr "ã“ã®ãƒ—ãƒ­ãƒ‘ãƒ†ã‚£ãƒ¼ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。 代ã‚り㫠drc_name を使用ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/network.py:1160 msgid "Invalid parent spec for CNA.create." msgstr "CNA.create ã®è¦ªã‚¹ãƒšãƒƒã‚¯ãŒç„¡åйã§ã™ã€‚" #: pypowervm/wrappers/storage.py:886 #, python-format msgid "" "PV had encoded pg83 descriptor \"%(pg83_raw)s\", but it failed to decode " "(%(type_error)s)." msgstr "" "PV ã§ pg83 ディスクリプター「%(pg83_raw)sã€ãŒã‚¨ãƒ³ã‚³ãƒ¼ãƒ‰ã•れã¾ã—ãŸãŒã€(%(type_error)s) をデコードã§ãã¾ã›ã‚“ã§ã—ãŸã€‚" #: pypowervm/wrappers/virtual_io_server.py:163 msgid "" "The 'xags' property of the VIOS EntryWrapper class is deprecated! Please" " use values from pypowervm.const.XAG instead." msgstr "" "VIOS EntryWrapper クラスã®ã€Œxagsã€ãƒ—ãƒ­ãƒ‘ãƒ†ã‚£ãƒ¼ã¯æŽ¨å¥¨ã•れã¾ã›ã‚“。代ã‚り㫠" "pypowervm.const.XAG ã®å€¤ã‚’使用ã—ã¦ãã ã•ã„。" #: pypowervm/wrappers/virtual_io_server.py:400 msgid "Partition of VIOS type is not LPM capable" msgstr "VIOS タイプã®åŒºç”»ã¯ LPM 対応ã§ã¯ã‚りã¾ã›ã‚“。" #: pypowervm/wrappers/virtual_io_server.py:670 msgid "Can't specify target device LUA without a backing storage device!" msgstr "ãƒãƒƒã‚­ãƒ³ã‚°ãƒ»ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ãƒ»ãƒ‡ãƒã‚¤ã‚¹ãŒãªã‘れã°ã€ã‚¿ãƒ¼ã‚²ãƒƒãƒˆãƒ»ãƒ‡ãƒã‚¤ã‚¹ LUA を指定ã§ãã¾ã›ã‚“。" # ENGL1SH_VERS10N 29539_40 DO NOT REMOVE OR CHANGE THIS LINE # T9N_SRC_ID 23 # T9N_SH1P_STR1NG VC142AAP001 2 pypowervm-1.1.24/pypowervm/util.py0000664000175000017500000006072613571367171016662 0ustar neoneo00000000000000# Copyright 2014, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Pervasive/commonly-used utilities.""" import abc import datetime as dt import errno import hashlib import math import re import six import socket import ssl try: import urlparse except ImportError: import urllib.parse as urlparse from oslo_log import log as logging from oslo_utils import units from pyasn1.codec.der import decoder as der_decoder from pyasn1_modules import rfc2459 from pypowervm import const from pypowervm.i18n import _ # Set up logging LOG = logging.getLogger(__name__) XPATH_DELIM = '/' def dice_href(href, include_scheme_netloc=False, include_query=True, include_fragment=True): """Parse, sanitize, and reassemble an href. :param href: A full link string of the form ':///;?#'. This method also works if the :// is omitted, (but obviously include_scheme_netloc has no effect). :param include_scheme_netloc: If True, the :// portion is included in the returned string. If False, it is stripped. :param include_query: If True, any ? portion of the link will be included in the return value. :param include_fragment: If True, any # portion of the link will be included in the return value. :return: A string representing the specified portion of the input link. """ parsed = urlparse.urlparse(href) ret = '' if include_scheme_netloc: ret += parsed.scheme + '://' + parsed.netloc ret += parsed.path # trim trailing '/'s from path, if present while ret.endswith('/'): ret = ret[:-1] if include_query and parsed.query: ret += '?' + parsed.query if include_fragment and parsed.fragment: ret += '#' + parsed.fragment return ret def check_and_apply_xag(path, xag): """Validate extended attribute groups and produce the correct path. If the existing path already has a group=* other than None, we use it. However, if there is a proposed xag - including [] - it must match the existing xag, or ValueError is raised. Otherwise, we construct the group=* query param according to the proposed xag list, as follows: If xag is None, use group=None. If xag is [] (the empty list), omit the group= query param entirely. Otherwise the group= value is a sorted, comma-separated string of the xag list. E.g. for xag=['b', 'c', 'a'], produce 'group=a,b,c'. :param path: Input path or href, which may or may not contain a query string, which may or may not contain a group=*. (Multiple group=* not handled.) Values in the group=* must be alpha sorted. :param xag: Iterable of proposed extended attribute values to be included in the query string of the resulting path. :return: path, with at most one group=* in the query string. That group= query param's value will be alpha sorted. """ parsed = urlparse.urlsplit(path) # parse_qs returns { 'key': ['value'], ... } qparms = urlparse.parse_qs(parsed.query) if parsed.query else {} path_xag = qparms.pop('group', ['None'])[0] if xag is None: arg_xag = 'None' else: # Ensure we have a mutable copy to sort xag = list(xag) xag.sort() arg_xag = ','.join(map(str, xag)) # may be '' if path_xag == 'None': # No existing xag. (Treat existing 'group=None' as if not there.) # Use whatever was proposed (which may be implicit group=None or # may be nothing). path_xag = arg_xag elif arg_xag != 'None': # There was xag in the path already, as well as proposed xag (maybe # empty). Previous xag must match proposed xag if specified # (including empty). if path_xag != arg_xag: raise ValueError(_("Proposed extended attribute group " "'%(arg_xag)s' doesn't match existing " "extended attribute group '%(path_xag)s'") % dict(arg_xag=arg_xag, path_xag=path_xag)) # else proposed xag is None, so use whatever was already in the path, # Whatever we decided on, add it back to the query params if nonempty. if path_xag != '': qparms['group'] = [path_xag] # Rebuild the querystring. Honor multiples (e.g. foo=bar&foo=baz). # (We didn't expect/handle multiple group=*, but need to support it in # other keys.) qstr = '&'.join(['%s=%s' % (key, val) for key, vals in qparms.items() for val in vals]) return urlparse.urlunsplit((parsed.scheme, parsed.netloc, parsed.path, qstr, parsed.fragment)) def extend_basepath(href, add): """Extends the base path of an href, accounting for querystring/fragment. For example, extend_basepath('http://server:1234/foo?a=b&c=d#frag', '/bar') => 'http://server:1234/foo/bar?a=b&c=d#frag' :param href: Path or href to augment. Scheme, netloc, query string, and fragment are allowed but not required. :param add: String to add onto the base path of the href. Must not contain unescaped special characters such as '?', '&', '#'. :return: The augmented href. """ parsed = urlparse.urlsplit(href) basepath = parsed.path + add return urlparse.urlunsplit((parsed.scheme, parsed.netloc, basepath, parsed.query, parsed.fragment)) def is_instance_path(href): """Does the path or href represent an instance (end with UUID)? :param href: Path or href to check. Scheme, netloc, query string, and fragment are allowed but not required. :return: True if href's path ends with a UUID, indicating that it represents an instance (as opposed to a Feed or some special URI such as quick or search). """ path = dice_href(href, include_scheme_netloc=False, include_query=False, include_fragment=False) return re.match(const.UUID_REGEX_WORD, path.rsplit('/', 1)[1]) # TODO(IBM): fix (for MITM attacks) or remove (if using loopback only) def validate_certificate(host, port, certpath, certext): hostname = re.sub('[:.]', '_', host) cert_file = '%s%s%s' % (certpath, hostname, certext) try: with open(cert_file, 'r') as f: # Retrieve previously trusted certificate trusted_cert = ssl.PEM_cert_to_DER_cert(f.read()) except Exception: # found no trusted certificate return False # Read current certificate from host conn = None try: # workaround for http://bugs.python.org/issue11811 # should go back to using get_server_certificate when fixed # (Issue is resolved as of python 3.3. Workaround still needed for # python 2.7 support.) # rawcert = ssl.get_server_certificate((host, port)) # current_cert = ssl.PEM_cert_to_DER_cert(rawcert) conn = socket.create_connection((host, port)) sock = ssl.wrap_socket(conn) current_cert = sock.getpeercert(True) except Exception: # couldn't get certificate from host return False finally: if conn is not None: conn.shutdown(socket.SHUT_RDWR) conn.close() # Verify certificate finger prints are the same if not (hashlib.sha1(trusted_cert).digest() == hashlib.sha1(current_cert).digest()): return False # check certificate expiration try: cert = der_decoder.decode(current_cert, asn1Spec=rfc2459.Certificate())[0] tbs = cert.getComponentByName('tbsCertificate') validity = tbs.getComponentByName('validity') not_after = validity.getComponentByName('notAfter').getComponent() not_after = dt.datetime.strptime(str(not_after), '%y%m%d%H%M%SZ') if dt.datetime.utcnow() >= not_after: LOG.warning(_('Certificate has expired.')) return False except Exception: LOG.exception('error parsing cert for expiration check') return False return True def get_req_path_uuid(path, preserve_case=False, root=False): """Extract request target uuid of sanitized path. :param path: Path or URI from which to extract the UUID. :param preserve_case: If False, the returned UUID will be lowercased. If True, it will be returned as it exists in the path. :param root: If True, and path represents a CHILD entry, the UUID of the ROOT is returned. Otherwise, the UUID of the target is returned. """ ret = None p = dice_href(path, include_query=False, include_fragment=False) if '/' in p: for maybe_id in p.rsplit('/', 3)[1::2]: uuid_match = re.match(const.UUID_REGEX_WORD, maybe_id) if uuid_match: ret = maybe_id if preserve_case else maybe_id.lower() if root: # Want to return the first one. (If it's a ROOT path, this # will also happen to be the last one.) break return ret def get_uuid_xag_from_path(path): uuid = get_req_path_uuid(path) parsed = urlparse.urlsplit(path) # parse_qs returns { 'key': ['value'], ... } qparms = urlparse.parse_qs(parsed.query) if parsed.query else {} return uuid.lower(), qparms.get('group', [None])[0] def convert_bytes_to_gb(bytes_, low_value=.0001, dp=None): """Converts an integer of bytes to a decimal representation of gigabytes. If the value is too low, will return the 'low_value'. This is useful for converting a small number of bytes (ex. 50) into gigabytes. Rounding may be required. :param bytes_: The integer number of bytes. :param low_value: The minimum value that should be returned. (Note: if dp is also specified, the value returned may be rounded up and thus be higher than low_value.) :param dp: If specified, the value is rounded up to the specified number of decimal places by round_gb_size_up. (Note: None and zero are very different.) :returns: The decimal value. """ gb_size = bytes_ / float(units.Gi) if gb_size < low_value: gb_size = low_value if dp is not None: gb_size = round_gb_size_up(gb_size, dp=dp) return gb_size def round_gb_size_up(gb_size, dp=2): """Rounds a GB disk size (as a decimal float) up to suit the platform. Use this method to ensure that new vdisks, LUs, etc. are big enough, as the platform generally rounds inputs to the nearest [whatever]. For example, a disk of size 4.321GB may wind up at 4.32GB after rounding, possibly leaving insufficient space for the image. :param gb_size: A decimal float representing the GB size to be rounded. :param dp: The number of decimal places to round (up) to. May be zero (round to next highest integer) or negative, (e.g. -1 will round to the next highest ten). :return: A new decimal float which is greater than or equal to the input. """ shift = 10.0**dp return float(math.ceil(gb_size * shift))/shift def sanitize_mac_for_api(mac): """Converts a generalized mac address to one for the API. Takes any standard mac (case-insensitive, with or without colons) and formats it to uppercase and removes colons. This is the format for the API. :param mac: The input mac. :returns: The sanitized mac. """ return mac.replace(':', '').upper() def sanitize_bool_for_api(bool_val): """Sanitizes a boolean value for use in the API.""" return str(bool_val).lower() def sanitize_float_for_api(float_val, precision=2): """Sanitizes a float value for use in the API.""" template = '%.' + str(precision) + 'f' return template % float(float_val) def sanitize_percent_for_api(float_val, precision=2): """Sanitizes a percent value for use in the API. :param float_val: A float where valid values are 0.0 <= x <= 1.0. For example the input 0.02 will produce output '2%'. :return: A string representation of the passed percentage. """ percent_float = float(float_val) if percent_float < 0 or percent_float > 1: raise ValueError('A float value 0 <= x <= 1.0 must be provided.') percent_float *= 100 percent_float = sanitize_float_for_api(percent_float, precision) return str(percent_float) + '%' def sanitize_wwpn_for_api(wwpn): """Updates the format of the WWPN to match the expected PowerVM format. :param wwpn: The original WWPN. :return: A WWPN of the format expected by the API. """ return wwpn.upper().replace(':', '') def sanitize_file_name_for_api(name, prefix='', suffix='', max_len=const.MaxLen.FILENAME_DEFAULT): """Generate a sanitized file name based on PowerVM's FileName.Pattern. :param name: The base name to sanitize. :param prefix: (Optional) A prefix to prepend to the 'name'. No delimiter is added. :param suffix: (Optional) A suffix to append to the 'name'. No delimiter is added. :param max_len: (Optional) The maximum allowable length of the final sanitized string. Defaults to the API's defined length for FileName.Pattern. :return: A string scrubbed of all forbidden characters and trimmed for length as necessary. """ def _scrub(in_name): """Returns in_name with illegal characters replaced with '_'.""" return re.sub(r'[^.0-9A-Z_a-z]', '_', in_name) name, prefix, suffix = (_scrub(val) for val in (name, prefix, suffix)) base_len = max_len - len(prefix) - len(suffix) if base_len <= 0: raise ValueError(_("Prefix and suffix together may not be more than " "%d characters."), max_len - 1) name = name[:base_len] ret = prefix + name + suffix if not len(ret): raise ValueError(_("Total length must be at least 1 character.")) return ret def sanitize_partition_name_for_api(name, trunc_ok=True): """Sanitize a string to be suitable for use as a partition name. PowerVM's partition name restrictions are: - Between 1 and 31 characters, inclusive; - Containing ASCII characters between 0x20 (space) and 0x7E (~), inclusive, except ()\<>*$&?|[]'"` :param name: The name to scrub. Invalid characters will be replaced with '_'. :param trunc_ok: If True, and name exceeds 31 characters, it is truncated. If False, and name exceeds 31 characters, ValueError is raised. :return: The scrubbed string. :raise ValueError: If name is None or zero length; or if it exceeds length 31 and trunk_ok=False. """ max_len = 31 if not name: raise ValueError(_("The name parameter must be at least one character " "long.")) if not trunc_ok and len(name) > max_len: raise ValueError(_("The name parameter must not exceed %d characters " "when trunk_ok is False."), max_len) return re.sub(r'[^- !#%+,./0-9:;=@A-Z^_a-z{}]', '_', name)[:max_len] def find_equivalent(elem, find_list): """Returns the element from the list that is equal to the one passed in. For remove operations and what not, the exact object may need to be provided. This method will find the functionally equivalent element from the list. :param elem: The original element. :param find_list: The list to search through. :returns: An element from the that is functionally equivalent (based on __eq__). If it does not exist, None is returned. """ for find_elem in find_list: if find_elem == elem: return find_elem return None def find_wrapper(haystack, needle_uuid): """Finds the corresponding wrapper from a list given the UUID. :param haystack: A list of wrappers. Usually generated from a 'feed' that has been loaded via the wrapper's wrap(response) method. :param needle_uuid: The UUID of the object to find in the list. :return: The corresponding wrapper for that UUID. If not found, None. """ for wrapper in haystack: if wrapper.uuid == needle_uuid: return wrapper return None def xpath(*toks): """Constructs an XPath out of the passed-in string components.""" return XPATH_DELIM.join(toks) def part_id_by_loc_code(loc_code): """Get a partition short ID for a provided virtual device location code. All location codes on a virtual device are of the form: ..-V-C :return: An int of the associated partition short ID. """ id_match = re.search('.*-V(.+?)-.*', loc_code) return int(id_match.group(1)) if id_match else None def xag_attrs(xagstr, base=const.DEFAULT_SCHEMA_ATTR): """Produce XML attributes for a property using extended attribute groups. :param xagstr: Extended attribute group name (from pypowervm.const.XAG). :param base: The dict of attributes to which to add the extended attribute group. Usually one of the pypowervm.const values near DEFAULT_SCHEMA_ATTR (the default). :return: Dict of XML attributes suitable for the 'attrib' kwarg of a (pypowervm.entities or etree) Element constructor. """ return dict(base, group=xagstr) if xagstr else base def my_partition_id(): """Return the short ID (not UUID) of the current partition, as an int.""" with open('/proc/ppc64/lparcfg') as lparcfg: for line in lparcfg: if line.startswith('partition_id='): return int(line.split('=')[1].rstrip()) def parent_spec(parent, parent_type, parent_uuid): """Produce a canonical parent type and UUID suitable for read(). :param parent: EntryWrapper representing the parent. If specified, parent_type and parent_uuid are ignored. :param parent_type: EntryWrapper class or schema_type string representing the schema type of the parent. :param parent_uuid: String UUID of the parent. :return parent_type: String schema type of the parent. The parent_type and parent_uuid returns are both None or both valid strings. :return parent_uuid: String UUID of the parent. The parent_type and parent_uuid returns are both None or both valid strings. :raise ValueError: If parent is None and parent_type xor parent_uuid is specified. """ if all(param is None for param in (parent, parent_type, parent_uuid)): return None, None if parent is not None: return parent.schema_type, parent.uuid if any(param is None for param in (parent_type, parent_uuid)): # parent_type xor parent_uuid specified raise ValueError(_("Developer error: partial parent specification.")) # Allow either string or class for parent_type if hasattr(parent_type, 'schema_type'): parent_type = parent_type.schema_type elif type(parent_type) is not str: raise ValueError(_("Developer error: parent_type must be either a " "string schema type or a Wrapper subclass.")) return parent_type, parent_uuid def retry_io_command(base_cmd, *argv): """PEP475: Retry syscalls if EINTR signal received. https://www.python.org/dev/peps/pep-0475/ Certain system calls can be interrupted by signal 4 (EINTR) for no good reason. Per PEP475, these signals should be ignored. This is implemented by default at the lowest level in py3, but we have to account for it in py2. :param base_cmd: The syscall to wrap. :param argv: Arguments to the syscall. :return: The return value from invoking the syscall. """ while True: try: return base_cmd(*argv) except EnvironmentError as enve: if enve.errno != errno.EINTR: raise @six.add_metaclass(abc.ABCMeta) class _AllowedList(object): """For REST fields taking 'ALL', 'NONE', or [list of values]. Subclasses should override parse_val and sanitize_for_api. """ ALL = 'ALL' NONE = 'NONE' _GOOD_STRINGS = (ALL, NONE) @staticmethod def parse_val(val): """Parse a single list value from string to appropriate native type. :param val: A single value to parse. :return: The converted value. """ # Default native type: str return val @staticmethod def sanitize_for_api(val): """Convert a native value to the expected string format for REST. :param val: The native value to convert. :return: Sanitized string value suitable for the REST API. :raise ValueError: If the string can't be converted. """ # Default: Just string-convert return str(val) @classmethod def unmarshal(cls, rest_val): """Convert value from REST to a list of vals or an accepted string.""" rest_val = rest_val.strip() if rest_val in cls._GOOD_STRINGS: return rest_val return [cls.parse_val(val) for val in rest_val.split()] @classmethod def const_or_list(cls, val): """Return one of the _GOOD_STRINGS, or the (sanitized) original list. :param val: One of: - A string representing one of the _GOOD_STRINGS (case- insensitive. - A list containing a single value as above. - A list containing values appropriate to the subclass. :return: One of: - A string representing one of the _GOOD_STRINGS (in the appropriate case). - A list of the original values, validated and sanitized for the REST API. The objective is to be able to pass the return value directly into a setter or bld method expecting the relevant type. :raise ValueError: If the input could not be interpreted/sanitized as appropriate to the subclass. """ ret = None if isinstance(val, str) and val.upper() in cls._GOOD_STRINGS: ret = val.upper() elif isinstance(val, list): if (len(val) == 1 and isinstance(val[0], str) and val[0].upper() in cls._GOOD_STRINGS): ret = val[0].upper() else: ret = [cls.sanitize_for_api(ival) for ival in val] if ret is not None: return ret # Not a list, not a good value raise ValueError(_("Invalid value '%(bad_val)s'. Expected one of " "%(good_vals)s, or a list.") % {'bad_val': val, 'good_vals': str(cls._GOOD_STRINGS)}) @classmethod def marshal(cls, val): """Produce a string suitable for the REST API.""" val = cls.const_or_list(val) return (' '.join([str(ival) for ival in val]) if isinstance(val, list) else val) class VLANList(_AllowedList): """For REST fields of type AllowedVLANIDs.Union.""" @staticmethod def parse_val(val): return int(val) @staticmethod def sanitize_for_api(val): try: return int(val) except (ValueError, TypeError): raise ValueError("Specify a list of VLAN integers or integer " "strings; or 'ALL' for all VLANS or 'NONE' for " "no VLANS.") class MACList(_AllowedList): """For REST fields of type AllowedMACAddresses.Union.""" # Default parse_val is fine @staticmethod def sanitize_for_api(val): return sanitize_mac_for_api(val) pypowervm-1.1.24/pypowervm/const.py0000664000175000017500000001055513571367171017026 0ustar neoneo00000000000000# Copyright 2014, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Pervasive, widely-used constants.""" import six _DEFAULT_SCHEMA_VERSION = 'V1_0' _SCHEMA_VER120 = 'V1_2_0' _SCHEMA_VER130 = 'V1_3_0' _SCHEMA_VER140 = 'V1_4_0' _SCHEMA_VER150 = 'V1_5_0' _SCHEMA_VER160 = 'V1_6_0' _SCHEMA_VER170 = 'V1_7_0' _SCHEMA_VER180 = 'V1_8_0' _SCHEMA_VER190 = 'V1_9_0' _SCHEMA_VER1100 = 'V1_10_0' _ATTR_SCHEMA_VER = 'schemaVersion' _ATTR_KSV = 'ksv' DEFAULT_SCHEMA_ATTR = {_ATTR_SCHEMA_VER: _DEFAULT_SCHEMA_VERSION} ATTR_KSV120 = {_ATTR_KSV: _SCHEMA_VER120} ATTR_KSV130 = {_ATTR_KSV: _SCHEMA_VER130} ATTR_KSV140 = {_ATTR_KSV: _SCHEMA_VER140} ATTR_KSV150 = {_ATTR_KSV: _SCHEMA_VER150} ATTR_KSV160 = {_ATTR_KSV: _SCHEMA_VER160} ATTR_KSV170 = {_ATTR_KSV: _SCHEMA_VER170} ATTR_KSV180 = {_ATTR_KSV: _SCHEMA_VER180} ATTR_KSV190 = {_ATTR_KSV: _SCHEMA_VER190} ATTR_KSV1100 = {_ATTR_KSV: _SCHEMA_VER1100} ATTR_SCHEMA_KSV130 = {_ATTR_KSV: _SCHEMA_VER130, _ATTR_SCHEMA_VER: _SCHEMA_VER130} API_BASE_PATH = '/rest/api/' LOGON_PATH = API_BASE_PATH + 'web/Logon' TYPE_TEMPLATE = 'application/vnd.ibm.powervm.%s+xml; type=%s' # The following is interpolated *twice*. The first time, we insert either the # Password element or the GenerateX-API-SessionFile element after the UserID. # We don't want to interpolate 'userid' until the second interpolation, which # happens at runtime in the Session's login routine. _LOGONREQUEST_TEMPLATE_TEMPLATE = six.u( '\n' + '\n' + ' %%(userid)s\n' + ' %(pass_or_file)s\n' + '') _PASS_TEMPLATE = '%(passwd)s' _SESS_FILE = 'true' # LogonRequest template to be used for password-based authentication LOGONREQUEST_TEMPLATE_PASS = _LOGONREQUEST_TEMPLATE_TEMPLATE % dict( pass_or_file=_PASS_TEMPLATE) # LogonRequest template to be used for file-based authentication LOGONREQUEST_TEMPLATE_FILE = _LOGONREQUEST_TEMPLATE_TEMPLATE % dict( pass_or_file=_SESS_FILE) ATOM_NS = 'http://www.w3.org/2005/Atom' XSI_NS = 'http://www.w3.org/2001/XMLSchema-instance' WEB_NS = 'http://www.ibm.com/xmlns/systems/power/firmware/web/mc/2012_10/' PCM_NS = 'http://www.ibm.com/xmlns/systems/power/firmware/pcm/mc/2012_10/' UOM_BASE_NS = 'http://www.ibm.com/xmlns/systems/power/firmware/uom/mc' UOM_NS = UOM_BASE_NS + '/2012_10/' # Match a UUID anywhere in the search string UUID_REGEX = '%(x)s{8}-%(x)s{4}-%(x)s{4}-%(x)s{4}-%(x)s{12}' % { 'x': '[A-Fa-f0-9]'} # Entire search string must be a UUID and nothing more UUID_REGEX_WORD = '^%s$' % UUID_REGEX # XPath to the UUID of a metadata-having XML object UUID_XPATH = 'Metadata/Atom/AtomID' SUFFIX_TYPE_DO = 'do' LINK = 'link' PORT_DEFAULT_BY_PROTO = { 'http': 12080, 'https': 12443 } SERVICE_BY_NS = { WEB_NS: 'web', UOM_NS: 'uom', PCM_NS: 'pcm' } class HTTPStatus(object): """Small subset of HTTP status codes as used by PowerVM.""" OK_NO_CONTENT = 204 NO_CHANGE = 304 UNAUTHORIZED = 401 NOT_FOUND = 404 ETAG_MISMATCH = 412 INTERNAL_ERROR = 500 SERVICE_UNAVAILABLE = 503 class MaxLen(object): """Maximum lengths for various PowerVM entities.""" # FileName.Pattern FILENAME_DEFAULT = 79 VOPT_NAME = 37 VDISK_NAME = 15 class XAG(object): """Enumeration of all extended attribute group strings.""" ALL = 'All' NONE = 'None' ADV = 'Advanced' ENERGY = 'EnergyManagement' HYP = 'Hypervisor' NVRAM = 'NVRAM' SYS_NET = 'SystemNetwork' TIER_THRESH = 'TierThreshold' VIO_FMAP = 'ViosFCMapping' VIO_NET = 'ViosNetwork' VIO_SMAP = 'ViosSCSIMapping' VIO_STOR = 'ViosStorage' VNIC = 'VNIC' pypowervm-1.1.24/pypowervm/log.py0000664000175000017500000000365113571367171016460 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Logging helpers.""" import functools import logging from oslo_log import log as oslo_logging LOG = oslo_logging.getLogger('pypowervm') def _logcall(filter_=None, dump_parms=False): def func_parms(f): @functools.wraps(f) def wrapper(*args, **kwds): logging_dbg = LOG.isEnabledFor(logging.DEBUG) if logging_dbg: if dump_parms: d_args, d_kwds = ((args, kwds) if filter_ is None else filter_(*args, **kwds)) LOG.debug("Entering args:%s kwds:%s '%s' %s" % (d_args, d_kwds, f.__name__, f.__module__)) else: LOG.debug("Entering '%s' %s" % (f.__name__, f.__module__)) r = f(*args, **kwds) if logging_dbg: if dump_parms: LOG.debug("Exiting: return '%s' '%s' %s" % (r, f.__name__, f.__module__)) else: LOG.debug("Exiting: return '%s' %s" % (f.__name__, f.__module__)) return r return wrapper return func_parms logcall = _logcall() logcall_args = _logcall(dump_parms=True) pypowervm-1.1.24/pypowervm/utils/0000775000175000017500000000000013571367172016461 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/utils/validation.py0000664000175000017500000005537513571367171021203 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Extended validation utilities.""" import abc import six from oslo_log import log as logging from pypowervm.i18n import _ from pypowervm.wrappers import base_partition as bp LOG = logging.getLogger(__name__) class ValidatorException(Exception): """Exceptions thrown from the validators.""" pass class LPARWrapperValidator(object): """LPAR Validator. This class implements additional validation for LPARs for use during resize or deployment. It is meant to catch any violations that would cause errors at the PowerVM management interface. """ def __init__(self, lpar_w, host_w, cur_lpar_w=None): """Initialize the validator :param lpar_w: LPAR wrapper intended to be validated :param host_w: managed_system wrapper intended to be validated against such as making sure the host has the desired resources of the LPAR available. :param cur_lpar_w: (Optional) current LPAR wrapper used to validate deltas during a resize operation. If this is passed in then it assumes resize validation. """ self.lpar_w = lpar_w self.host_w = host_w self.cur_lpar_w = cur_lpar_w def validate_all(self, check_dlpar=True): """Invoke attribute validation classes to perform validation :param check_dlpar: indicates the need to validate the dlpar capability. It is False when update is requested with force option. """ ProcValidator(self.lpar_w, self.host_w, cur_lpar_w=self.cur_lpar_w).validate( check_dlpar=check_dlpar) MemValidator(self.lpar_w, self.host_w, cur_lpar_w=self.cur_lpar_w).validate( check_dlpar=check_dlpar) CapabilitiesValidator(self.lpar_w, self.host_w, cur_lpar_w=self.cur_lpar_w).validate( check_dlpar=check_dlpar) @six.add_metaclass(abc.ABCMeta) class BaseValidator(object): """Base Validator. This class is responsible for delegating validation depending on if it's a deploy, active resize, or inactive resize. If the caller intends to perform resize validation then they must pass the cur_lpar_w argument. The cur_lpar_w is the current LPAR wrapper describing the LPAR before any resizing has taken place, while lpar_w represents the LPAR with new (resized) values. If cur_lpar_w is None then deploy validation logic will ensue. """ def __init__(self, lpar_w, host_w, cur_lpar_w=None): """Initialize LPAR and System Wrappers.""" self.lpar_w = lpar_w self.host_w = host_w self.cur_lpar_w = cur_lpar_w def validate(self, check_dlpar=True): """Determines what validation is requested and invokes it. :param check_dlpar: flag indicating if we need to validate dlpar capability. """ # Deploy self._populate_new_values() if self.cur_lpar_w is None: self._validate_deploy() # Resize else: if check_dlpar: self._can_modify() self._populate_resize_diffs() # Inactive Resize if self.cur_lpar_w.state == bp.LPARState.NOT_ACTIVATED: self._validate_inactive_resize() # Active Resize else: self._validate_active_resize() self._validate_common() @abc.abstractmethod def _populate_new_values(self): """Abstract method for populating deploy values This method will always be called in validate() and should populate instance attributes with the new LPARWrapper values. """ @abc.abstractmethod def _populate_resize_diffs(self): """Abstract method for populating resize values This method will only be called in validate() for resize operations and should populate instance attributes with the differences between the old and new LPARWrapper values. """ @abc.abstractmethod def _validate_deploy(self): """Abstract method for deploy validation only.""" @abc.abstractmethod def _validate_active_resize(self): """Abstract method for active resize validation only.""" @abc.abstractmethod def _validate_inactive_resize(self): """Abstract method for inactive resize validation only.""" @abc.abstractmethod def _validate_common(self): """Abstract method for common validation This method should be agnostic to the operation being validated (deploy or resize) because the instance attributes will be populated accordingly in validate(). """ @abc.abstractmethod def _can_modify(self): """Abstract method to check if resource may be modified This method should invoke the corresponding can_modify method in the LPAR class for the resource and raise an exception if it returns False. Should only be called for resize validation when cur_lpar_w is passed in. """ def _validate_host_has_available_res(self, des, avail, res_name): if round(des, 2) > round(avail, 2): ex_args = {'requested': '%.2f' % des, 'avail': '%.2f' % avail, 'instance_name': self.lpar_w.name, 'res_name': res_name} msg = _("Insufficient available %(res_name)s on host for virtual " "machine '%(instance_name)s' (%(requested)s " "requested, %(avail)s available)") % ex_args LOG.error(msg) raise ValidatorException(msg) class MemValidator(BaseValidator): """Memory Validator. This class implements memory validation for lpars in the case of deploy, inactive resize, and active resize. Instance attributes populated by _populate_new_values :attr des_mem: desired memory of the new lpar :attr max_mem: maximum memory of the new lpar :attr min_mem: minimum memory of the new lpar :attr avail_mem: available memory on the host :attr ppt_ratio: desired ppt ratio of the new lpar :attr res_name: name of the resource """ def __init__(self, lpar_w, host_w, cur_lpar_w=None): super(MemValidator, self).__init__(lpar_w, host_w, cur_lpar_w=cur_lpar_w) self.ppt_ratio = None def _populate_new_values(self): """Set newly desired LPAR attributes as instance attributes.""" mem_cfg = self.lpar_w.mem_config self.des_mem = mem_cfg.desired self.max_mem = mem_cfg.max self.min_mem = mem_cfg.min self.exp_fact = mem_cfg.exp_factor self.avail_mem = self.host_w.memory_free self.ppt_ratio = mem_cfg.ppt_ratio self.res_name = _('memory') def _populate_resize_diffs(self): """Calculate lpar_w vs cur_lpar_w diffs and set as attributes.""" deltas = self._calculate_resize_deltas() self.delta_des_mem = deltas['delta_mem'] self.delta_max_mem = deltas['delta_max_mem'] self.delta_exp_fact = deltas['delta_exp_factor'] def _validate_deploy(self): """Enforce validation rules specific to LPAR deployment.""" self._validate_host_has_available_res( self.des_mem, self.avail_mem, self.res_name) def _validate_active_resize(self): """Enforce validation rules specific to active resize.""" curr_mem_cfg = self.cur_lpar_w.mem_config curr_min_mem = curr_mem_cfg.min curr_max_mem = curr_mem_cfg.max # min/max values cannot be changed when lpar is not powered off. if self.max_mem != curr_max_mem or self.min_mem != curr_min_mem: msg = (_("The virtual machine must be powered off before changing " "the minimum or maximum memory. Power off virtual " "machine %s and try again.") % self.cur_lpar_w.name) raise ValidatorException(msg) if self.delta_exp_fact != 0: msg = (_("The virtual machine must be powered off before changing " "the expansion factor. Power off virtual machine %s and " "try again.") % self.cur_lpar_w.name) raise ValidatorException(msg) if (self.ppt_ratio is not None and self.ppt_ratio != curr_mem_cfg.ppt_ratio): msg = ("The virtual machine must be powered off before changing " "the physical page table ratio. Power off virtual " "machine %s and try again.") % self.cur_lpar_w.name raise ValidatorException(msg) # Common validations for both active & inactive resizes. self._validate_resize_common() def _validate_inactive_resize(self): """Enforce validation rules specific to inactive resize.""" self._validate_resize_common() def _validate_common(self): """Enforce operation agnostic validation rules.""" # TODO(IBM): pass def _can_modify(self): """Checks mem dlpar and rmc state if LPAR not activated.""" modifiable, reason = self.cur_lpar_w.can_modify_mem() if not modifiable: LOG.error(reason) raise ValidatorException(reason) def _validate_resize_common(self): """Validation rules common for both active and inactive resizes. Helper method to enforce validation rules that are common for both active and inactive resizes. """ self._validate_host_has_available_res(self.delta_des_mem, self.avail_mem, self.res_name) def _calculate_resize_deltas(self): """Helper method to calculate the memory deltas for resize operation. :return dict of memory deltas. """ deltas = {} # Current LPAR values curr_mem_cfg = self.cur_lpar_w.mem_config curr_des_mem = curr_mem_cfg.desired curr_max_mem = curr_mem_cfg.max curr_exp_fact = curr_mem_cfg.exp_factor # Calculate memory deltas deltas['delta_mem'] = self.des_mem - curr_des_mem deltas['delta_max_mem'] = self.max_mem - curr_max_mem deltas['delta_exp_factor'] = self.exp_fact - curr_exp_fact return deltas class ProcValidator(BaseValidator): """Processor Validator. This class implements processor validation for LPARs in the case of deploy, inactive resize, and active resize. Instance attributes populated by _populate_new_values :attr has_dedicated: LPAR has dedicated processors boolean :attr procs_avail: available procs on host :attr des_procs: desired processors from new LPAR :attr res_name: name of the resource :attr max_procs_per_aix_linux_lpar: max procs per LPAR on host :attr max_sys_procs_limit: LPAR max procs limit on host :attr des_vcpus: LPAR desired vcpus :attr max_vcpus: LPAR max vcpus :attr min_vcpus: LPAR min vcpus :attr proc_compat_mode: Processor compatibility mode :attr pool_id: LPAR shared processor pool ID (only for shared proc mode) :attr max_proc_units: LPAR max proc units (only for shared processor mode) :attr min_proc_units: LPAR min proc units (only for shared processor mode) """ def _populate_new_values(self): """Set newly desired LPAR values as instance attributes.""" self.has_dedicated = self.lpar_w.proc_config.has_dedicated self.procs_avail = self.host_w.proc_units_avail self.proc_compat_mode = self.lpar_w.proc_compat_mode if self.has_dedicated: self._populate_dedicated_proc_values() else: self._populate_shared_proc_values() def _populate_dedicated_proc_values(self): """Set dedicated proc values as instance attributes.""" ded_proc_cfg = self.lpar_w.proc_config.dedicated_proc_cfg self.des_procs = ded_proc_cfg.desired self.res_name = _('CPUs') # Proc host limits for dedicated proc self.max_procs_per_aix_linux_lpar = ( self.host_w.max_procs_per_aix_linux_lpar) self.max_sys_procs_limit = self.host_w.max_sys_procs_limit # VCPUs doesn't mean anything in dedicated proc cfg # FAIP in dedicated proc cfg vcpus == procs for naming convention self.des_vcpus = self.des_procs self.max_vcpus = ded_proc_cfg.max self.min_vcpus = ded_proc_cfg.min def _populate_shared_proc_values(self): """Set shared proc values as instance attributes.""" shr_proc_cfg = self.lpar_w.proc_config.shared_proc_cfg self.des_procs = shr_proc_cfg.desired_units self.res_name = _('processing units') # VCPU host limits for shared proc self.max_procs_per_aix_linux_lpar = ( self.host_w.max_vcpus_per_aix_linux_lpar) self.max_sys_procs_limit = self.host_w.max_sys_vcpus_limit self.des_vcpus = shr_proc_cfg.desired_virtual self.max_vcpus = shr_proc_cfg.max_virtual self.min_vcpus = shr_proc_cfg.min_virtual self.max_proc_units = shr_proc_cfg.max_units self.min_proc_units = shr_proc_cfg.min_units self.pool_id = shr_proc_cfg.pool_id def _populate_resize_diffs(self): """Calculate lpar_w vs cur_lpar_w diffs and set as attributes.""" deltas = self._calculate_resize_deltas() self.delta_des_vcpus = deltas['delta_vcpu'] def _validate_deploy(self): """Enforce validation rules specific to LPAR deployment.""" self._validate_host_has_available_res( self.des_procs, self.procs_avail, self.res_name) def _validate_active_resize(self): """Enforce validation rules specific to active resize.""" # Extract current values from existing LPAR. curr_has_dedicated = self.cur_lpar_w.proc_config.has_dedicated if curr_has_dedicated: lpar_proc_config = self.cur_lpar_w.proc_config.dedicated_proc_cfg curr_max_vcpus = lpar_proc_config.max curr_min_vcpus = lpar_proc_config.min else: lpar_proc_config = self.cur_lpar_w.proc_config.shared_proc_cfg curr_max_vcpus = lpar_proc_config.max_virtual curr_min_vcpus = lpar_proc_config.min_virtual curr_max_proc_units = lpar_proc_config.max_units curr_min_proc_units = lpar_proc_config.min_units # min/max cannot be changed when lpar is not powered off. if (self.max_vcpus != curr_max_vcpus or self.min_vcpus != curr_min_vcpus): msg = (_("The virtual machine must be powered off before changing " "the minimum or maximum processors. Power off virtual " "machine %s and try again.") % self.cur_lpar_w.name) raise ValidatorException(msg) if not self.has_dedicated and not curr_has_dedicated: curr_min_proc_units = round(float(curr_min_proc_units), 2) curr_max_proc_units = round(float(curr_max_proc_units), 2) if (round(self.max_proc_units, 2) != curr_max_proc_units or round(self.min_proc_units, 2) != curr_min_proc_units): msg = (_("The virtual machine must be powered off before " "changing the minimum or maximum processor units. " "Power off virtual machine %s and try again.") % self.cur_lpar_w.name) raise ValidatorException(msg) # Processor compatibility mode cannot be changed when lpar is not # powered off. curr_proc_compat = self.cur_lpar_w.proc_compat_mode curr_pend_proc_compat = self.cur_lpar_w.pending_proc_compat_mode if self.proc_compat_mode is not None: proc_compat = self.proc_compat_mode.lower() if (proc_compat != curr_proc_compat.lower() and (proc_compat != curr_pend_proc_compat.lower())): # If requested was not the same as current, this is # not supported when instance is not powered off. msg = (_("The virtual machine must be powered off before " "changing the processor compatibility mode. " "Power off virtual machine %s and try again.") % self.cur_lpar_w.name) raise ValidatorException(msg) # Processing mode cannot be changed when lpar is not powered off. if self.has_dedicated != curr_has_dedicated: msg = (_("The virtual machine must be powered off before changing " "the processing mode. Power off virtual machine %s and " "try again.") % self.cur_lpar_w.name) raise ValidatorException(msg) # Validations common for both active & inactive resizes. self._validate_resize_common() def _validate_inactive_resize(self): """Enforce validation rules specific to inactive resize.""" self._validate_resize_common() def _validate_common(self): """Enforce operation agnostic validation rules.""" self._validate_host_max_allowed_procs_per_lpar() self._validate_host_max_sys_procs_limit() def _can_modify(self): """Checks proc dlpar and rmc state if LPAR not activated.""" modifiable, reason = self.cur_lpar_w.can_modify_proc() if not modifiable: LOG.error(reason) raise ValidatorException(reason) def _validate_host_max_allowed_procs_per_lpar(self): if self.des_vcpus > self.max_procs_per_aix_linux_lpar: ex_args = {'vcpus': self.des_vcpus, 'max_allowed': self.max_procs_per_aix_linux_lpar, 'instance_name': self.lpar_w.name} msg = _("The desired processors (%(vcpus)d) cannot be above " "the maximum allowed processors per partition " "(%(max_allowed)d) for virtual machine " "'%(instance_name)s'.") % ex_args LOG.error(msg) raise ValidatorException(msg) def _validate_host_max_sys_procs_limit(self): if self.max_vcpus > self.max_sys_procs_limit: ex_args = {'vcpus': self.max_vcpus, 'max_allowed': self.max_sys_procs_limit, 'instance_name': self.lpar_w.name} msg = _("The maximum processors (%(vcpus)d) cannot be above " "the maximum system capacity processor limit " "(%(max_allowed)d) for virtual machine " "'%(instance_name)s'.") % ex_args LOG.error(msg) raise ValidatorException(msg) def _validate_resize_common(self): """Validation rules common for both active and inactive resizes. Helper method to enforce validation rules that are common for both active and inactive resizes. """ self._validate_host_has_available_res(self.delta_des_vcpus, self.procs_avail, self.res_name) def _calculate_resize_deltas(self): """Helper method to calculate the procs deltas for resize operation. :return dict of processor deltas. """ deltas = {} # Extract current values from existing LPAR. curr_has_dedicated = self.cur_lpar_w.proc_config.has_dedicated if curr_has_dedicated: lpar_proc_config = self.cur_lpar_w.proc_config.dedicated_proc_cfg curr_des_vcpus = lpar_proc_config.desired else: lpar_proc_config = self.cur_lpar_w.proc_config.shared_proc_cfg curr_des_vcpus = lpar_proc_config.desired_virtual curr_proc_units = lpar_proc_config.desired_units # Calculate VCPU deltas deltas['delta_vcpu'] = self.des_vcpus - curr_des_vcpus # If this is dedicated processor mode, there are no proc_units. if self.has_dedicated: if not curr_has_dedicated and curr_proc_units is not None: # Resize from Shared to Dedicated mode deltas['delta_vcpu'] = ( round(self.des_vcpus - curr_proc_units, 2)) else: if curr_has_dedicated: # Resize from Dedicated to Shared mode deltas['delta_vcpu'] = ( round(self.des_procs - curr_des_vcpus, 2)) else: deltas['delta_vcpu'] = ( round(self.des_procs - curr_proc_units, 2)) return deltas class CapabilitiesValidator(BaseValidator): """Capabilities Validator. This class implements capabilities validation for lpars in the case of deploy, inactive resize, and active resize. Instance attributes populated by _populate_new_values :attr srr_enabled: srr capability of the lpar """ def _populate_new_values(self): """Set newly desired resize attributes as instance attributes.""" self.srr_enabled = self.lpar_w.srr_enabled def _validate_active_resize(self): """Enforce validation rules specific to active resize.""" # If not dynamic SRR toggle capable, Simplified Remote Restart # capability cannot be changed unless lpar is powered off. if self.cur_lpar_w.srr_enabled != self.srr_enabled: dyn_srr_cap = self.host_w.get_capability('dynamic_srr_capable') if not dyn_srr_cap: msg = (_("The virtual machine must be powered off before " "changing the simplified remote restart capability. " "Power off virtual machine %s and try again.") % self.cur_lpar_w.name) raise ValidatorException(msg) def _populate_resize_diffs(self): """Calculate lpar_w vs cur_lpar_w diffs and set as attributes.""" pass def _validate_deploy(self): """Enforce validation rules specific to LPAR deployment.""" pass def _validate_inactive_resize(self): """Enforce validation rules specific to inactive resize.""" pass def _validate_common(self): """Enforce operation agnostic validation rules.""" pass def _can_modify(self): """Check if capabilities may be modified.""" pass pypowervm-1.1.24/pypowervm/utils/__init__.py0000664000175000017500000000000013571367171020557 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/utils/lpar_builder.py0000664000175000017500000011221513571367171021500 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Construction and basic validation of an LPAR or VIOS EntryWrapper.""" import abc import six from oslo_log import log as logging from pypowervm.i18n import _ from pypowervm.wrappers import base_partition as bp from pypowervm.wrappers import logical_partition as lpar from pypowervm.wrappers import virtual_io_server as vios # Dict keys used for input to the builder NAME = 'name' ENV = 'env' UUID = 'uuid' ID = 'id' MEM = 'memory' MAX_MEM = 'max_mem' MIN_MEM = 'min_mem' AME_FACTOR = 'ame_factor' PPT_RATIO = 'ppt_ratio' # Contains the mapping of the ratio to REST accepted values. REST does # not take the actual ratio value as argument, instead it takes an # enumeration of accepted values starting from 0. ALLOWED_PPT_RATIOS = {'1:64': 0, '1:128': 1, '1:256': 2, '1:512': 3, '1:1024': 4, '1:2048': 5, '1:4096': 6} ENFORCE_AFFINITY_CHECK = 'enforce_affinity_check' DED_PROCS = 'dedicated_proc' VCPU = 'vcpu' MAX_VCPU = 'max_vcpu' MIN_VCPU = 'min_vcpu' DED_PROC_KEYS = () PROC_UNITS = 'proc_units' MAX_PROC_U = 'max_proc_units' MIN_PROC_U = 'min_proc_units' PROC_UNITS_KEYS = (PROC_UNITS, MAX_PROC_U, MIN_PROC_U) SHARING_MODE = 'sharing_mode' UNCAPPED_WEIGHT = 'uncapped_weight' SPP = 'proc_pool' AVAIL_PRIORITY = 'avail_priority' SRR_CAPABLE = 'srr_capability' PROC_COMPAT = 'processor_compatibility' ENABLE_LPAR_METRIC = 'enable_lpar_metric' SECURE_BOOT = 'secure_boot' # I/O configuration # Sure would love to change this to MAX_VIRT_IO_SLOTS or similar, but compat... MAX_IO_SLOTS = 'max_io_slots' PHYS_IO_SLOTS = 'phys_io_slots' # IBMi specific keys ALT_LOAD_SRC = 'alt_load_src' CONSOLE = 'console' LOAD_SRC = 'load_src' RESTRICTED_IO = 'restricted_io' # The minimum attributes that must be supplied to create an LPAR MINIMUM_ATTRS = (NAME, MEM, VCPU) # Keys that indicate that shared processors are being configured SHARED_PROC_KEYS = (PROC_UNITS_KEYS + (UNCAPPED_WEIGHT,)) MEM_LOW_BOUND = 128 VCPU_LOW_BOUND = 1 PROC_UNITS_LOW_BOUND = 0.05 MAX_LPAR_NAME_LEN = 31 # Defaults DEF_PROC_UNIT_FACT = 0.5 DEF_MAX_SLOT = 64 DEF_UNCAPPED_WT = 64 DEF_SPP = 0 DEF_AVAIL_PRI = 127 DEF_SRR = 'false' DEF_LPAR_METRIC = False DEF_SECURE_BOOT = 0 DEF_PHYS_IO_SLOTS = None LOG = logging.getLogger(__name__) class LPARBuilderException(Exception): """Exceptions thrown from the lpar builder.""" pass @six.add_metaclass(abc.ABCMeta) class Standardize(object): """Interface class to standardize the LPAR definition A standardizer is responsible for validating the LPAR attributes that are presented and augmenting those which are required to create the LPAR. """ def __init__(self): self.attr = None def set_attr(self, attr): """Set the attributes to be validated and standardized. :param attr: dict of lpar attributes provided by the user """ self.attr = attr def general(self): """Validates and standardizes the general LPAR attributes. :returns: dict of attributes. Expected: NAME, ENV, MAX_IO_SLOTS, AVAIL_PRIORITY, PROC_COMPAT Optional: SRR_CAPABLE, UUID, ID IBMi value: CONSOLE, LOAD_SRC, ALT_LOAD_SRC, RESTRICTED_IO """ pass def memory(self): """Validates and standardizes the memory LPAR attributes. :returns: dict of attributes. Expected: MEM, MIN_MEM, MAX_MEM """ pass def ded_proc(self): """Validates and standardizes the dedicated processor LPAR attributes. :returns: dict of attributes. Expected: VCPU, MIN_VCPU, MAX_VCPU, SHARING_MODE """ pass def shr_proc(self): """Validates and standardizes the shared processor LPAR attributes. :returns: dict of attributes. Expected: VCPU, MIN_VCPU, MAX_VCPU, PROC_UNITS, MAX_PROC_U, MIN_PROC_U, SHARING_MODE, UNCAPPED_WEIGHT(if UNCAPPED) """ pass def io_config(self): """Validates and standardizes the LPAR's I/O configuration. :returns: dict of attributes. Expected: MAX_IO_SLOTS, PHYS_IO_SLOTS (may be empty) """ pass class DefaultStandardize(Standardize): """Default standardizer. This class implements the Standardize interface. It takes a simple approach for augmenting missing LPAR settings. It does reasonable validation of the LPAR attributes. It first validates the user input as-is, then fills in any missing attributes that are required and supported by the host. Finally, it validates what it's sending back to the caller. If any validation rules are missed, the PowerVM management interface will catch them and surface an error at that time. """ def __init__(self, mngd_sys, proc_units_factor=DEF_PROC_UNIT_FACT, max_slots=DEF_MAX_SLOT, uncapped_weight=DEF_UNCAPPED_WT, spp=DEF_SPP, avail_priority=DEF_AVAIL_PRI, srr=DEF_SRR, proc_compat=bp.LPARCompat.DEFAULT, enable_lpar_metric=DEF_LPAR_METRIC, secure_boot=DEF_SECURE_BOOT): """Initialize the standardizer :param mngd_sys: managed_system wrapper of the host to deploy to. This is used to validate the fields and standardize against the host. :param proc_units_factor: amount of proc units to assign to each vcpu if proc units are not specified :param max_slots: number of max io slots to assign, if not specified :param uncapped_weight: the uncapped weight to use if the processors are shared and a weight is not specified :param spp: shared processor pool to assign if the processors are shared and the pool is not specified :param avail_priority: availability priority of the LPAR :param srr: simplified remote restart capable :param proc_compat: processor compatibility mode value :param enable_lpar_metric: LPAR performance data collection attribute enabled only if value is true :param secure_boot: The secure boot policy value """ super(DefaultStandardize, self).__init__() self.mngd_sys = mngd_sys self.proc_units_factor = proc_units_factor if proc_units_factor > 1 or proc_units_factor < 0.05: msg = _('Processor units factor must be between 0.05 and 1.0. ' 'Value: %s') % proc_units_factor raise LPARBuilderException(msg) self.max_slots = max_slots self.uncapped_weight = uncapped_weight self.spp = spp self.avail_priority = avail_priority self.srr = srr self.enable_lpar_metric = enable_lpar_metric self.proc_compat = proc_compat self.secure_boot = secure_boot def _set_prop(self, attr, prop, base_prop, convert_func=str): """Copies a property if present or copies the base property.""" attr[prop] = convert_func(self.attr.get(prop, self.attr[base_prop])) def _set_val(self, attr, prop, value=None, convert_func=str): """Copies a property if present or uses the supplied value.""" val = self.attr.get(prop, value) if val is not None: attr[prop] = convert_func(val) def _validate_general(self, attrs=None, partial=False): if attrs is None: attrs = self.attr name_len = len(attrs[NAME]) if name_len < 1 or name_len > MAX_LPAR_NAME_LEN: msg = _("Logical partition name has invalid length. " "Name: %s") % attrs[NAME] raise LPARBuilderException(msg) LPARType(attrs.get(ENV), allow_none=partial).validate() IOSlots(attrs.get(MAX_IO_SLOTS), allow_none=partial).validate() AvailPriority(attrs.get(AVAIL_PRIORITY), allow_none=partial).validate() EnableLparMetric(attrs.get(ENABLE_LPAR_METRIC), allow_none=partial).validate() IDBoundField(attrs.get(ID), allow_none=True).validate() # SRR is always optional since the host may not be capable of it. SimplifiedRemoteRestart(attrs.get(SRR_CAPABLE), allow_none=True).validate() ProcCompatMode(attrs.get(PROC_COMPAT), host_modes=self.mngd_sys.proc_compat_modes, allow_none=partial).validate() secure_boot_cap = self._can_secure_boot_for_lpar(attrs.get(ENV, '')) SecureBoot(attrs.get(SECURE_BOOT, DEF_SECURE_BOOT), secure_boot_cap).validate() # Validate fields specific to IBMi if attrs.get(ENV, '') == bp.LPARType.OS400: RestrictedIO(attrs.get(RESTRICTED_IO), allow_none=True).validate() # Validate affinity check attribute based on host capability host_affinity_cap = self.mngd_sys.get_capability( 'affinity_check_capable') EnforceAffinityCheck(attrs.get(ENFORCE_AFFINITY_CHECK), host_affinity_cap).validate() def _can_secure_boot_for_lpar(self, lpar_type): if lpar_type == bp.LPARType.OS400: # NOTE(edmondsw) Secure boot is initially supported only for RPA # partitions (AIX/Linux). It is blocked at the schema level for # IBMi. See https://bugs.launchpad.net/pypowervm/+bug/1805610 # TODO(edmondsw): If/when secure boot is supported for IBMi, a new # capability will need to be checked here. return False else: return self.mngd_sys.get_capability( 'partition_secure_boot_capable') def _validate_memory(self, attrs=None, partial=False): if attrs is None: attrs = self.attr host_ame_cap = self.mngd_sys.get_capability( 'active_memory_expansion_capable') mem = Memory(attrs.get(MIN_MEM), attrs.get(MEM), attrs.get(MAX_MEM), attrs.get(AME_FACTOR), host_ame_cap, self.mngd_sys.memory_region_size, allow_none=partial) mem.validate() host_ppt_cap = self.mngd_sys.get_capability( 'physical_page_table_ratio_capable') pptr = PhysicalPageTableRatio(attrs.get(PPT_RATIO), host_ppt_cap) pptr.validate() if pptr.value: self.attr[PPT_RATIO] = pptr.convert_value(pptr.value) def _validate_shared_proc(self, attrs=None, partial=False): if attrs is None: attrs = self.attr # Validate the vcpu first VCpu(attrs.get(MIN_VCPU), attrs.get(VCPU), attrs.get(MAX_VCPU), partial).validate() # Validate the proc units ProcUnits(attrs.get(MIN_PROC_U), attrs.get(PROC_UNITS), attrs.get(MAX_PROC_U), allow_none=partial).validate() # TODO(IBM): Validate any shared CPU associated parameters def _validate_lpar_ded_cpu(self, attrs=None, partial=False): if attrs is None: attrs = self.attr VCpu(attrs.get(MIN_VCPU), attrs.get(VCPU), attrs.get(MAX_VCPU), allow_none=partial).validate() # If specified, ensure the dedicated procs value is valid DedicatedProc(attrs.get(DED_PROCS), allow_none=True).validate() DedProcShareMode(attrs.get(SHARING_MODE), allow_none=True).validate() def general(self): # Validate the settings sent in self._validate_general(partial=True) bld_attr = {NAME: self.attr[NAME]} self._set_val(bld_attr, ID, convert_func=int) self._set_val(bld_attr, UUID) self._set_val(bld_attr, ENV, bp.LPARType.AIXLINUX, convert_func=LPARType.convert_value) self._set_val(bld_attr, MAX_IO_SLOTS, self.max_slots) self._set_val(bld_attr, AVAIL_PRIORITY, self.avail_priority) self._set_val(bld_attr, ENABLE_LPAR_METRIC, self.enable_lpar_metric) # See if the host is capable of SRR before setting it. srr_cap = self.mngd_sys.get_capability( 'simplified_remote_restart_capable') if srr_cap: self._set_val(bld_attr, SRR_CAPABLE, self.srr, convert_func=SimplifiedRemoteRestart.convert_value) self._set_val(bld_attr, PROC_COMPAT, bp.LPARCompat.DEFAULT, convert_func=ProcCompatMode.convert_value) # See if the host is capable of secure boot before setting it. if self._can_secure_boot_for_lpar(bld_attr[ENV]): self._set_val(bld_attr, SECURE_BOOT, self.secure_boot, convert_func=int) # Build IBMi attributes if bld_attr[ENV] == bp.LPARType.OS400: self._set_val(bld_attr, CONSOLE, value='HMC') self._set_val(bld_attr, LOAD_SRC, value='0') self._set_val(bld_attr, ALT_LOAD_SRC, value='NONE') if self.mngd_sys.get_capability('ibmi_restrictedio_capable'): self._set_val(bld_attr, RESTRICTED_IO, value=True, convert_func=RestrictedIO.convert_value) # Validate the attributes self._validate_general(attrs=bld_attr) return bld_attr def memory(self): # Validate the partial settings self._validate_memory(partial=True) bld_attr = {MEM: self.attr[MEM]} self._set_prop(bld_attr, MAX_MEM, MEM) self._set_prop(bld_attr, MIN_MEM, MEM) # Validate the full memory settings self._validate_memory(attrs=bld_attr) return bld_attr def shr_proc(self): def _compare(prop, value, compare_func, typ): v1 = self.attr.get(prop) # Ensure the property is specified if v1 is None: return value # Compare return compare_func(typ(v1), value) # Validate the partial settings self._validate_shared_proc(partial=True) bld_attr = {VCPU: self.attr[VCPU]} self._set_prop(bld_attr, MAX_VCPU, VCPU) self._set_prop(bld_attr, MIN_VCPU, VCPU) # See if we need to calculate a default proc_units value and min/max # Before setting the proc units ensure it's between min/max spec_proc_units = self.attr.get(PROC_UNITS) if spec_proc_units is None: proc_units = int(bld_attr[VCPU]) * self.proc_units_factor # Ensure it's at least as large as a specified min value proc_units = _compare(MIN_PROC_U, proc_units, max, float) # Ensure it's smaller than a specified max value proc_units = _compare(MAX_PROC_U, proc_units, min, float) else: proc_units = float(spec_proc_units) self._set_val(bld_attr, PROC_UNITS, proc_units) self._set_val(bld_attr, MIN_PROC_U, proc_units) self._set_val(bld_attr, MAX_PROC_U, proc_units) self._set_val(bld_attr, SHARING_MODE, bp.SharingMode.UNCAPPED) # If uncapped sharing mode then set the weight if bld_attr.get(SHARING_MODE) == bp.SharingMode.UNCAPPED: self._set_val(bld_attr, UNCAPPED_WEIGHT, self.uncapped_weight) self._set_val(bld_attr, SPP, self.spp) # Validate all the values self._validate_shared_proc(attrs=bld_attr) return bld_attr def ded_proc(self): self._validate_lpar_ded_cpu(partial=True) # Set the proc based on vcpu field bld_attr = {VCPU: self.attr[VCPU]} self._set_prop(bld_attr, MAX_VCPU, VCPU) self._set_prop(bld_attr, MIN_VCPU, VCPU) self._set_val(bld_attr, SHARING_MODE, bp.DedicatedSharingMode.SHARE_IDLE_PROCS, convert_func=DedProcShareMode.convert_value) self._validate_lpar_ded_cpu(attrs=bld_attr) return bld_attr def io_config(self): """Validates and standardizes the LPAR's I/O configuration. :returns: dict of attributes. Expected: MAX_IO_SLOTS, PHYS_IO_SLOTS (may be empty) """ return { MAX_IO_SLOTS: self.attr.get(MAX_IO_SLOTS, self.max_slots), PHYS_IO_SLOTS: self.attr.get(PHYS_IO_SLOTS, DEF_PHYS_IO_SLOTS), } @six.add_metaclass(abc.ABCMeta) class Field(object): """Represents a field to validate.""" _type = str def __init__(self, value, name=None, allow_none=True): self.name = name if name is not None else self.__class__._name self.value = value self.typed_value = None self.allow_none = allow_none @classmethod def convert_value(cls, value): """Static converter for the Field type.""" return cls._type(value) def _type_error(self, value, exc=TypeError): values = dict(field=self.name, value=value) msg = _("Field '%(field)s' has invalid value: '%(value)s'") % values LOG.error(msg) raise exc(msg) def _convert_value(self, value): """Does the actual conversion of the value and returns it.""" try: return self.convert_value(value) except (TypeError, ValueError) as e: self._type_error(value, exc=e.__class__) def _convert(self): """Converts the value and saves it away for future use.""" self.typed_value = self._convert_value(self.value) def validate(self): # Check if the value is none and we allow that if self.value is None: if not self.allow_none: self._type_error(None) else: # The base value is not none, so see if we should convert it if self.typed_value is None: self._convert() @six.add_metaclass(abc.ABCMeta) class BoolField(Field): """Facilitates validating boolean fields.""" _type = bool @classmethod def convert_value(cls, value): # Special case string values, so random strings don't map to True if isinstance(value, six.string_types): if value.lower() in ['true', 'yes']: return True if value.lower() in ['false', 'no']: return False elif isinstance(value, bool): return value raise ValueError('Could not convert %s.' % value) @six.add_metaclass(abc.ABCMeta) class ChoiceField(Field): _choices = None @classmethod def convert_value(cls, value): return cls._validate_choices(value, cls._choices) @classmethod def _validate_choices(cls, value, choices): if value is None: raise ValueError(_('None value is not valid.')) for choice in choices: if value.lower() == choice.lower(): return choice # If we didn't find it, that's a problem... values = dict(value=value, field=cls._name, choices=choices) msg = _("Value '%(value)s' is not valid for field '%(field)s' with " "acceptable choices: %(choices)s") % values raise ValueError(msg) def validate(self): if self.value is None and self.allow_none: return super(ChoiceField, self).validate() self._validate_choices(self.value, self._choices) @six.add_metaclass(abc.ABCMeta) class BoundField(Field): _min_bound = None _max_bound = None def validate(self): super(BoundField, self).validate() # If value was not converted to the type, then don't validate bounds if self.typed_value is None: return if (self._min_bound is not None and self.typed_value < self._convert_value(self._min_bound)): values = dict(field=self.name, value=self.typed_value, minimum=self._min_bound) msg = _("Field '%(field)s' has a value below the minimum. " "Value: %(value)s; Minimum: %(minimum)s") % values LOG.error(msg) raise ValueError(msg) if (self._max_bound is not None and self.typed_value > self._convert_value(self._max_bound)): values = dict(field=self.name, value=self.typed_value, maximum=self._max_bound) msg = _("Field '%(field)s' has a value above the maximum. " "Value: %(value)s; Maximum: %(maximum)s") % values LOG.error(msg) raise ValueError(msg) @six.add_metaclass(abc.ABCMeta) class IntBoundField(BoundField): _type = int @six.add_metaclass(abc.ABCMeta) class FloatBoundField(BoundField): _type = float @six.add_metaclass(abc.ABCMeta) class MinDesiredMaxField(object): def __init__(self, field_type, min_name, des_name, max_name, min_value, desired_value, max_value, min_min=None, max_max=None, name=None, allow_none=True): self.name = name if name is not None else self.__class__._name self.min_field = field_type( min_value, name=min_name, allow_none=allow_none) self.min_field._max_bound = desired_value self.min_field._min_bound = min_min self.des_field = field_type( desired_value, name=des_name, allow_none=allow_none) self.des_field._min_bound = min_value self.des_field._max_bound = max_value self.max_field = field_type( max_value, name=max_name, allow_none=allow_none) self.max_field._min_bound = desired_value self.max_field._max_bound = max_max def validate(self): # Do specific validations before the general ones for fld in [self.min_field, self.des_field, self.max_field]: if fld.value is not None or not fld.allow_none: fld._convert() # Ensure the desired value is between the min and max if (self.des_field.typed_value and self.max_field.typed_value and self.des_field.typed_value > self.max_field.typed_value): values = dict(desired_field=self.des_field.name, max_field=self.max_field.name, desired=self.des_field.typed_value, maximum=self.max_field.typed_value) msg = _("The '%(desired_field)s' has a value above the " "'%(max_field)s' value. Desired: %(desired)s Maximum: " "%(maximum)s") % values LOG.error(msg) raise ValueError(msg) # Now the minimum if (self.des_field.typed_value and self.min_field.typed_value and self.des_field.typed_value < self.min_field.typed_value): values = dict(desired_field=self.des_field.name, min_field=self.min_field.name, desired=self.des_field.typed_value, minimum=self.min_field.typed_value) msg = _("The '%(desired_field)s' has a value below the " "'%(min_field)s' value. Desired: %(desired)s Minimum: " "%(minimum)s") % values LOG.error(msg) raise ValueError(msg) # Now the fields individually self.min_field.validate() self.des_field.validate() self.max_field.validate() class Memory(MinDesiredMaxField): _name = 'Memory' def __init__(self, min_value, desired_value, max_value, ame_ef, host_ame_cap, lmb_size, allow_none=True): super(Memory, self).__init__( IntBoundField, 'Minimum Memory', 'Desired Memory', 'Maximum Memory', min_value, desired_value, max_value, allow_none=allow_none) self.lmb_size = lmb_size # Set the lowest memory we'll honor self.min_field._min_bound = MEM_LOW_BOUND # Don't allow the desired memory to not be specified. self.des_field.allow_none = False self.ame_ef = ame_ef self.host_ame_cap = host_ame_cap def validate(self): super(Memory, self).validate() self._validate_lmb_size() self._validate_ame() def _validate_lmb_size(self): # Validate against the LMB size if self.lmb_size is not None: # For each value, make sure it's a multiple for x in [self.min_field.typed_value, self.des_field.typed_value, self.max_field.typed_value]: if x is not None and (x % self.lmb_size) != 0: values = dict(lmb_size=self.lmb_size, value=x) msg = _("Memory value is not a multiple of the " "logical memory block size (%(lmb_size)s) of " " the host. Value: %(value)s") % values raise ValueError(msg) def _validate_ame(self): # Validate the expansion factor value if self.ame_ef is not None: exp_fact_float = round(float(self.ame_ef), 2) values = dict(value=self.ame_ef) if not self.host_ame_cap and exp_fact_float != 0: msg = _("The managed system does not support active memory " "expansion. The expansion factor value '%(value)s' " "is not valid.") % values raise ValueError(msg) if (exp_fact_float != 0 and exp_fact_float < 1 or exp_fact_float > 10): msg = _("Active memory expansion value must be greater than " "or equal to 1.0 and less than or equal to 10.0. A " "value of 0 is also valid and indicates that AME is " "off. '%(value)s' is not valid.") % values raise ValueError(msg) class VCpu(MinDesiredMaxField): _name = 'VCPU' def __init__(self, min_value, desired_value, max_value, allow_none=True): super(VCpu, self).__init__( IntBoundField, 'Minimum VCPU', 'Desired VCPU', 'Maximum VCPU', min_value, desired_value, max_value, allow_none=allow_none) # Set the lowest VCPU we'll honor self.min_field._min_bound = VCPU_LOW_BOUND class ProcUnits(MinDesiredMaxField): _name = 'ProcUnits' def __init__(self, min_value, desired_value, max_value, allow_none=True): super(ProcUnits, self).__init__( FloatBoundField, 'Minimum Proc Units', 'Desired Proc Units', 'Maximum Proc Units', min_value, desired_value, max_value, allow_none=allow_none) # Set the lowest ProcUnits we'll honor self.min_field._min_bound = PROC_UNITS_LOW_BOUND class DedicatedProc(BoolField): _name = 'Dedicated Processors' class LPARType(ChoiceField): _choices = (bp.LPARType.AIXLINUX, bp.LPARType.OS400, bp.LPARType.VIOS) _name = 'Logical Partition Type' def __init__(self, value, allow_none=False): super(LPARType, self).__init__(value, allow_none=allow_none) class ProcCompatMode(ChoiceField): _choices = bp.LPARCompat.ALL_VALUES _name = 'Processor Compatability Mode' def __init__(self, value, host_modes=None, allow_none=True): super(ProcCompatMode, self).__init__(value, allow_none=allow_none) if host_modes: self._choices = host_modes class SecureBoot(IntBoundField): """Secure boot policy. """ _min_bound = 0 _max_bound = 9 _name = 'Secure Boot' def __init__(self, value, host_cap, allow_none=False): super(SecureBoot, self).__init__(value, allow_none=allow_none) self.host_cap = host_cap def validate(self): """Performs the additional host capability check for secure boot.""" super(SecureBoot, self).validate() if int(self.value) > 0 and not self.host_cap: msg = _("The managed system or partition type does not support " "secure boot.") raise ValueError(msg) class DedProcShareMode(ChoiceField): _choices = bp.DedicatedSharingMode.ALL_VALUES _name = 'Dedicated Processor Sharing Mode' def __init__(self, value, allow_none=False): super(DedProcShareMode, self).__init__(value, allow_none=allow_none) class IOSlots(IntBoundField): """Maximum virtual I/O slots. This is not to be confused with the actual io_slots (list of IOSlot) in the partition's io_config (PartitionIOConfiguration), which is set in the builder via the 'phys_io_slots' (PHYS_IO_SLOTS) key. """ _min_bound = 2 # slot 0 & 1 are always in use _max_bound = 65534 _name = 'Maximum Virtual I/O Slots' def __init__(self, value, allow_none=False): super(IOSlots, self).__init__(value, allow_none=allow_none) class AvailPriority(IntBoundField): _min_bound = 0 _max_bound = 255 _name = 'Availability Priority' class IDBoundField(IntBoundField): _min_bound = 1 _name = 'ID' class EnableLparMetric(BoolField): _name = 'Enable LPAR Metric' class SimplifiedRemoteRestart(BoolField): _name = 'Simplified Remote Restart' class PhysicalPageTableRatio(ChoiceField): _name = 'Physical Page Table Ratio' _choices = ALLOWED_PPT_RATIOS.keys() def __init__(self, value, host_cap, allow_none=True): super(PhysicalPageTableRatio, self).__init__( value, allow_none=allow_none) self.host_cap = host_cap @classmethod def convert_value(cls, value): """Converts the ratio as a string to the REST accepted values.""" return ALLOWED_PPT_RATIOS[value] def _convert_value(self, value): # Override Field class definition to avoid KeyErrors on bad values # when validation is run. return value def validate(self): """Performs validation of the PPT ratio attribute.""" super(PhysicalPageTableRatio, self).validate() # Validate the host capability if not self.host_cap and self.value: msg = _("The managed system does not support setting the physical " "page table ratio.") raise ValueError(msg) class EnforceAffinityCheck(BoolField): _name = 'Enforce Affinity Check' def __init__(self, value, host_cap, allow_none=True): super(EnforceAffinityCheck, self).__init__( value, allow_none=allow_none) self.host_cap = host_cap def validate(self): """Performs validation of the affinity check attribute.""" super(EnforceAffinityCheck, self).validate() # Validate the host capability if (str(self.value).lower() == 'true') and (not self.host_cap): msg = _("The managed system does not support affinity score " "checks as part of migration.") raise ValueError(msg) class RestrictedIO(BoolField): _name = 'Restricted IO' class LPARBuilder(object): def __init__(self, adapter, attr, stdz): self.adapter = adapter self.attr = attr self.stdz = stdz for val in MINIMUM_ATTRS: if self.attr.get(val) is None: raise LPARBuilderException('Missing required attribute: %s' % val) stdz.set_attr(attr) def build_ded_proc(self): # Ensure no shared proc keys are present # TODO(IBM): std = self.stdz.ded_proc() dproc = bp.PartitionProcessorConfiguration.bld_dedicated( self.adapter, std[VCPU], min_proc=std[MIN_VCPU], max_proc=std[MAX_VCPU], sharing_mode=std[SHARING_MODE]) return dproc def build_shr_proc(self): # Ensure no dedicated proc keys are present # TODO(IBM): std = self.stdz.shr_proc() # The weight may not be set if it's not uncapped uncapped_weight = std.get(UNCAPPED_WEIGHT) # Build the shared procs shr_proc = bp.PartitionProcessorConfiguration.bld_shared( self.adapter, std[PROC_UNITS], std[VCPU], sharing_mode=std[SHARING_MODE], uncapped_weight=uncapped_weight, min_proc_unit=std[MIN_PROC_U], max_proc_unit=std[MAX_PROC_U], min_proc=std[MIN_VCPU], max_proc=std[MAX_VCPU], proc_pool=std[SPP]) return shr_proc def build_mem(self): std = self.stdz.memory() mem_wrap = bp.PartitionMemoryConfiguration.bld( self.adapter, std[MEM], min_mem=std[MIN_MEM], max_mem=std[MAX_MEM]) # Determine AME enabled boolean value from expansion factor value if self.attr.get(AME_FACTOR) is not None: exp_fact_float = round(float(self.attr.get(AME_FACTOR)), 2) mem_wrap.exp_factor = exp_fact_float # The PPT ratio should've been converted to REST format by the # Standardizer. if self.stdz.attr.get(PPT_RATIO) is not None: mem_wrap.ppt_ratio = self.stdz.attr.get(PPT_RATIO) return mem_wrap def build_io_config(self): std = self.stdz.io_config() io_config = bp.PartitionIOConfiguration.bld( self.adapter, std[MAX_IO_SLOTS], io_slots=std[PHYS_IO_SLOTS]) return io_config def _shared_proc_keys_specified(self): # Check for any shared proc keys for key in SHARED_PROC_KEYS: if self.attr.get(key, None) is not None: return True # Check the sharing mode values if any smode = self.attr.get(SHARING_MODE, None) if (smode is not None and smode in bp.SharingMode.ALL_VALUES): return True return False def _dedicated_proc_keys_specified(self): # Check for dedicated proc keys # TODO(IBM): # Check for dedicated sharing mode smode = self.attr.get(SHARING_MODE, None) if (smode is not None and smode in bp.DedicatedSharingMode.ALL_VALUES): return True def _shared_procs_specified(self): """Determine if shared procs should be configured. General methodology is to try to check everything that would indicate shared processors first, then dedicated, and finally just default to shared if we can't determine either way. """ if self.attr.get(DED_PROCS, None) is not None: return not DedicatedProc.convert_value(self.attr[DED_PROCS]) # Check each key that would indicate sharing procs if self._shared_proc_keys_specified(): return True # Check for dedicated sharing mode if self._dedicated_proc_keys_specified(): return False # Default is to use shared if not proven otherwise return True def build(self): # Build a minimimal LPAR, the real work will be done in rebuild std = self.stdz.general() if std[ENV] == bp.LPARType.VIOS: lpar_w = vios.VIOS.bld( self.adapter, std[NAME], bp.PartitionMemoryConfiguration.bld(self.adapter, 0), bp.PartitionProcessorConfiguration.bld_dedicated( self.adapter, 0), io_cfg=bp.PartitionIOConfiguration.bld(self.adapter, 0)) else: lpar_w = lpar.LPAR.bld( self.adapter, std[NAME], bp.PartitionMemoryConfiguration.bld(self.adapter, 0), bp.PartitionProcessorConfiguration.bld_dedicated( self.adapter, 0), io_cfg=bp.PartitionIOConfiguration.bld(self.adapter, 0), env=std[ENV]) # Only set the uuid if one is sent in, otherwise it will be set # by PowerVM if std.get(UUID) is not None: lpar_w.uuid = std[UUID] if std.get(ID) is not None: lpar_w._id(std[ID]) return self.rebuild(lpar_w) def rebuild(self, lpar_w): # Build the memory section mem_cfg = self.build_mem() # Build proc section # Determine if using shared or dedicated processors if self._shared_procs_specified(): proc_cfg = self.build_shr_proc() else: proc_cfg = self.build_ded_proc() # Update any general attributes std = self.stdz.general() lpar_w.name = std[NAME] lpar_w.avail_priority = std[AVAIL_PRIORITY] lpar_w.proc_compat_mode = std[PROC_COMPAT] lpar_w.allow_perf_data_collection = std[ENABLE_LPAR_METRIC] if std.get(SECURE_BOOT) is not None: lpar_w.pending_secure_boot = std[SECURE_BOOT] # Host may not be capable of SRR, so only add it if it's in the # standardized attributes if std.get(SRR_CAPABLE) is not None: lpar_w.srr_enabled = std[SRR_CAPABLE] io_cfg = self.build_io_config() # Now start replacing the sections lpar_w.mem_config = mem_cfg lpar_w.proc_config = proc_cfg lpar_w.io_config = io_cfg # Add IBMi values if needed if lpar_w.env == bp.LPARType.OS400: lpar_w.io_config.tagged_io = bp.TaggedIO.bld( self.adapter, load_src=std[LOAD_SRC], console=std[CONSOLE], alt_load_src=std[ALT_LOAD_SRC]) if std.get(RESTRICTED_IO) is not None: lpar_w.restrictedio = std[RESTRICTED_IO] return lpar_w pypowervm-1.1.24/pypowervm/utils/wrappers.py0000664000175000017500000000365113571367171020702 0ustar neoneo00000000000000# Copyright 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """General utilities around wrappers.""" import importlib import os import sys import pypowervm.wrappers.entry_wrapper as ewrap _wrappers_pkg = ('pypowervm', 'wrappers') _imports = None _this_module = sys.modules[__name__] def _get_imports(): """Imports all modules in pypowervm.wrappers and returns them as a dict. The returned dict is of the form: { module_name: , ... } """ global _imports if _imports is None: _modnames = [fname.rsplit('.', 1)[0] for fname in os.listdir(os.path.join(*_wrappers_pkg)) if not fname.startswith('_') and fname.endswith('.py')] _imports = { modname: importlib.import_module( '.'.join(_wrappers_pkg) + '.' + modname) for modname in _modnames} return _imports def wrapper_class_iter(): """Iterator over all Wrapper subclasses defined in pypowervm.wrappers. Each yield is the Wrapper subclass itself. """ for klass in (cls for imp in _get_imports().values() for cls in vars(imp).values()): try: if issubclass(klass, ewrap.Wrapper): yield klass except TypeError: # issubclass can't handle the things that aren't classes pass pypowervm-1.1.24/pypowervm/utils/uuid.py0000664000175000017500000000337213571367171020005 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities around Universally-Unique Identifiers (UUIDs).""" from oslo_utils import uuidutils def convert_uuid_to_pvm(uuid): """Converts a standard UUID to PowerVM format PowerVM uuids always set the byte 0, bit 0 to 0. :param uuid: A standard format uuid string :returns: A PowerVM compliant uuid """ return "%x%s" % (int(uuid[0], 16) & 7, uuid[1:]) def id_or_uuid(an_id): """Sanitizes a short ID or string UUID, and indicates which was used. Use as: is_uuid, lpar_id = id_or_uuid(lpar_id) if is_uuid: # lpar_id is a string UUID else: # lpar_id is LPAR short ID of type int :param an_id: Short ID (may be string or int) or string UUID of, e.g., an LPAR. :return: Boolean. If True, the other return is a UUID string. If False, it is an integer. :return: The input ID, either converted to int, or in its original string form if a UUID. """ if uuidutils.is_uuid_like(an_id): is_uuid = True ret_id = an_id else: is_uuid = False ret_id = int(an_id) return is_uuid, ret_id pypowervm-1.1.24/pypowervm/utils/transaction.py0000664000175000017500000010766313571367171021374 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from concurrent.futures import thread as th import oslo_concurrency.lockutils as lock import oslo_context.context as ctx from oslo_log import log as logging from oslo_utils import reflection import six from taskflow import engines as tf_eng from taskflow import exceptions as tf_ex from taskflow.patterns import linear_flow as tf_lf from taskflow.patterns import unordered_flow as tf_uf from taskflow import task as tf_task import threading import pypowervm.exceptions as ex from pypowervm.i18n import _ from pypowervm.utils import retry import pypowervm.wrappers.entry_wrapper as ewrap LOG = logging.getLogger(__name__) _local = threading.local() def _get_locks(): """Returns the list of UUIDs locked by this thread.""" locks = getattr(_local, 'entry_transaction', None) if locks is None: locks = [] _set_locks(locks) return locks def _set_locks(locks): """Sets the list of UUIDs locked by this thread.""" _local.entry_transaction = locks def entry_transaction(func): """Decorator to facilitate transaction semantics on a PowerVM object. Typically, a method thus decorated will make some set of changes to an EntryWrapper and then perform one or more REST operations thereon. The *consumer* of the decorated method may pass either an EntryWrapper or an EntryWrapperGetter as the first argument. The *developer* of the decorated method is guaranteed that the first argument is an EntryWrapper. This decorator provides three things: 1) The decorated method may be invoked with either an EntryWrapper or an EntryWrapperGetter as its first argument. However, within the body of the method, that argument is guaranteed to be the appropriate EntryWrapper. 2) The decorated method is locked on the UUID of the PowerVM object on which it operates (represented by its first argument). Only one method thus decorated can operate on that PowerVM object at one time. 3) If the decorated method fails due to an etag mismatch - indicating that the wrapper was updated out-of-band between when it was retrieved and when it was updated - the wrapper is refreshed and the entire method is redriven. Note: If the etag mistmatch occurs, the STEPPED_DELAY function is used from the retry.py. This provides a gradual increase in the delay (except for the first retry - which is immediate). A maximum number of 6 retries will occur. Example usage: @entry_transaction def add_gizmos_to_vios_wrapper(vios_wrapper, gizmos): vios_wrapper.gizmo_list.extend(gizmos) return vios_wrapper.update() This method can then be invoked either as: add_gizmos_to_vios_wrapper(existing_vios_wrapper, gizmos) or as: add_gizmos_to_vios_wrapper(pvm_vios.VIOS.getter(adapter, uuid), gizmos) """ def _synchronize(wrp_or_spec, *a1, **k1): """Returned method is synchronized on the object's UUID.""" @lock.synchronized(wrp_or_spec.uuid) def _locked_resolve_wrapper(wos, *a2, **k2): try: # The synchronized decorator will hold off other threads # we just have to hold off lock attempts by methods further # down the stack. _get_locks().append(wrp_or_spec.uuid) return _resolve_wrapper(wos, *a2, **k2) finally: _get_locks().remove(wrp_or_spec.uuid) def _resolve_wrapper(wos, *a2, **k2): """Returned method guaranteed to be called with a wrapper.""" if isinstance(wos, ewrap.EntryWrapperGetter): wos = wos.get() @retry.retry(argmod_func=retry.refresh_wrapper, tries=60, delay_func=retry.STEPPED_RANDOM_DELAY) def _retry_refresh(wrapper, *a3, **k3): """Retry as needed, refreshing its wrapper each time.""" return func(wrapper, *a3, **k3) return _retry_refresh(wos, *a2, **k2) def _lock_if_needed(wos, *a2, **k2): # Check if this UUID is already locked if wrp_or_spec.uuid in _get_locks(): # It's already locked by this thread, so skip the lock. return _resolve_wrapper(wos, *a2, **k2) else: return _locked_resolve_wrapper(wos, *a2, **k2) return _lock_if_needed(wrp_or_spec, *a1, **k1) return _synchronize @six.add_metaclass(abc.ABCMeta) class Subtask(object): """A single EntryWrapper modification to be performed within a WrapperTask. A subclass performs its work by overriding the execute method. That method may or may not make changes to the EntryWrapper, which is its first argument. Its return value must indicate whether changes were made to the wrapper: this is the trigger used by WrapperTask to determine whether to POST the changes back to the REST server via update(). The return value is saved by the surrounding WrapperTask if the 'provides' argument is used on initialization. This value can then be retrieved by subsequent Subtasks. A Subtask should never update() or refresh() the wrapper. That is handled by the surrounding WrapperTask. See WrapperTask for example usage. """ def __init__(self, *save_args, **save_kwargs): """Create the Subtask, saving execution arguments for later. :param save_args: Positional arguments to be passed to the execute method - *after* the wrapper - when it is invoked under a WrapperTask. :param save_kwargs: Keyword arguments to be passed to the execute method when it is invoked under a WrapperTask. :param provides: (Optional) String name for the return value from the execute method. If this parameter is used, the return value will be saved by the surrounding WrapperTask and be available to subsequent Subtasks via the 'provided' keyword argument. The 'provides' name must be unique within a WrapperTask. :param flag_update: (Optional) Boolean indicating whether a True return from this Subtask should trigger an update() in the surrounding WrapperTask. By default, this is True. Set this to False, for example, to provide some data to subsequent Subtasks without forcing an update. """ self.provides = save_kwargs.pop('provides', None) self.flag_update = save_kwargs.pop('flag_update', True) self.save_args = save_args self.save_kwargs = save_kwargs @abc.abstractmethod def execute(self, *args, **kwargs): """Modify the EntryWrapper (must be overridden by the subclass). The execute method has two responsibilities: 1) Performs the modification to the EntryWrapper which is passed as its first argument. 2) Indicates whether any modifications were performed. Example: def execute(thingy_wrapper, primary_widget, provided=None): update_needed = False if primary_widget not in thingy_wrapper.widgets: thingy_wrapper.set_primary_widget(primary_widget) update_needed = True # Was a widget list provided by a prior Subtask? if provided is not None: widget_list = provided.get('widget_list', []) for widget in widget_list: thingy_wrapper.widgets.append(widget) update_needed = True return update_needed :param args: Positional arguments accepted by the execute method. The first argument will always be the EntryWrapper. Overrides may define their signatures using explicit parameter names. :param kwargs: Keyword arguments accepted by the execute method. Overrides may use explicit parameter names. :param provided: Dict of return values provided by Subtasks whose execution preceded this one, and which used the 'provides' keyword argument to save their returns. The keys of the dict are the 'provides' strings of the prior Subtasks. :return: The return value must be a single value (this may be a list, but not a tuple) which evaluates to True or False. Unless this Subtask was initialized with flag_update=False, any True value indicates that the wrapper was modified and should be POSTed back to the REST server via update(). Any False value (including None, [], {}, etc) indicates that this Subtask did not modify the wrapper. (Note that it may still be POSTed if modified by other Subtasks in the same WrapperTask.) """ class _FunctorSubtask(Subtask): """Shim to create a Subtask around an existing callable.""" def __init__(self, _func, *save_args, **save_kwargs): """Save the callable as well as the arguments. :param _func: Callable to be invoked under the WrapperTask. :param save_args: See Subtask.__init__(save_args). :param save_kwargs: See Subtask.__init__(save_kwargs). May contain the following values, which are treated specially and NOT passed to the callable _func: provides: See Subtask.__init__(provides). flag_update: See Subtask.__init__(flag_update). logspec: Iterable comprising a logging function, a format string, and zero or more arguments. The log method is invoked before the func. Example: logspec = [LOG.info, _LI("Deleting widget %(widget)s from " "instance %(instance)s."), {'widget': widg, 'instance': instance.name}] FunctorSubtask(..., logspec=logspec) """ self._logspec = save_kwargs.pop('logspec', []) super(_FunctorSubtask, self).__init__(*save_args, **save_kwargs) self._func = _func if self._logspec: if len(self._logspec) < 2 or not callable(self._logspec[0]): raise ValueError( "logspec must be a list comprising a callable followed by " "a format string and zero or more arguments.") def execute(self, wrapper, *_args, **_kwargs): """Invoke saved callable with saved args.""" if not ('provided' in reflection.get_callable_args(self._func) or reflection.accepts_kwargs(self._func)): _kwargs.pop('provided', None) if 'provided' in _kwargs and _kwargs['provided'] == {}: _kwargs.pop('provided', None) if self._logspec: # Execute the log method (the first element in the list) with its # arguments (the remaining elements in the list). self._logspec[0](*self._logspec[1:]) return self._func(wrapper, *_args, **_kwargs) class WrapperTask(tf_task.Task): """An atomic modify-and-POST transaction Task over a single EntryWrapper. The modifications should comprise some number of Subtask instances, added to this WrapperTask via the add_subtask and/or add_functor_subtask methods. These Subtasks should only modify the EntryWrapper, and should not POST (.update()) it back to the REST Server. The WrapperTask will decide whether a POST is needed based on the returns from the Subtasks' execute methods, and perform it if indicated. The WrapperTask's execute method is encompassed by @entry_transaction, meaning that: 1) The initial GET of the EntryWrapper may be deferred until after the lock is acquired. 2) The execute method is locked on the UUID of the Entry in question. 3) If the final update (POST) fails due to etag mismatch, the EntryWrapper is refetched and the entire transaction is redriven from the start. Usage: class ModifyGizmos(Subtask): def execute(self, wrapper, gizmo_list, provides='first_gizmo'): update_needed = None if gizmo_list: wrapper.gizmos.append(gizmo_list) update_needed = gizmo_list[0] return update_needed def add_widget(wrapper, widget, frob=False, provided=None): if provided is not None: widget.first_gizmo = provided.get('first_gizmo') wrapper.widgets.append(widget, frob) return len(wrapper.widgets) ... tx = WrapperTask("do_lpar_things", LPAR.getter(adapter, lpar_uuid)) or tx = WrapperTask("do_lpar_things", LPAR.getter(adapter, lpar_uuid), subtasks=existing_wrapper_task.subtasks) or # Not recommended - increased probability of retry wrapper = LPAR.wrap(adapter.read(LPAR.schema_type, lpar_uuid)) tx = WrapperTask("do_lpar_things", wrapper) ... tx.add_subtask(ModifyGizmos([giz1, giz2])) ... logspec = [LOG.info, _LI("Added widget %(widget)s to LPAR %(lpar)s."), {'widget': widget.name, 'lpar': lpar_uuid}] tx.add_functor_subtask(add_widget, widget, provides='widget_count', logspec=logspec) ... finalized_lpar = tx.execute() """ def __init__(self, name, wrapper_or_getter, subtasks=None, allow_empty=False, update_timeout=-1): """Initialize this WrapperTask. :param name: A descriptive string name for the WrapperTask. :param wrapper_or_getter: An EntryWrapper or EntryWrapperGetter representing the PowerVM object on which this WrapperTask is to be performed. :param subtasks: (Optional) Iterable of Subtask subclass instances with which to seed this WrapperTask. :param allow_empty: (Optional) By default, executing a WrapperTask containing no Subtasks will result in exception WrapperTaskNoSubtasks. If this flag is set to True, this condition will instead log an info message and return None (NOT the wrapper - note, this is different from "subtasks ran, but didn't change anything," which returns the wrapper). :param update_timeout: (Optional) Integer number of seconds after which to time out the POST request. -1, the default, causes the request to use the timeout value configured on the Session belonging to the Adapter. :raise WrapperTaskNoSubtasks: If allow_empty is False and this WrapperTask is executed without any Subtasks having been added. """ if isinstance(wrapper_or_getter, ewrap.EntryWrapperGetter): self._wrapper = None self._getter = wrapper_or_getter elif isinstance(wrapper_or_getter, ewrap.EntryWrapper): self._wrapper = wrapper_or_getter self._getter = None else: raise ValueError(_("Must supply either EntryWrapper or " "EntryWrapperGetter.")) super(WrapperTask, self).__init__( name, provides=('wrapper_%s' % wrapper_or_getter.uuid, 'subtask_rets_%s' % wrapper_or_getter.uuid)) self._tasks = [] if subtasks is None else list(subtasks) self.allow_empty = allow_empty self.update_timeout = update_timeout # Dict of return values provided by Subtasks using the 'provides' arg. self.provided = {} # Set of 'provided' names to prevent duplicates. (Some day we may want # to make this a list and use it to denote the order in which subtasks # were run.) self.provided_keys = set() def add_subtask(self, task): """Add a Subtask to this WrapperTask. Subtasks will be invoked serially and synchronously in the order in which they are added. :param task: Instance of a Subtask subclass containing the logic to invoke. :return: self, for chaining convenience. """ if not isinstance(task, Subtask): raise ValueError(_("Must supply a valid Subtask.")) # Seed the 'provided' dict and ensure no duplicate names if task.provides is not None: if task.provides in self.provided_keys: raise ValueError(_("Duplicate 'provides' name %s.") % task.provides) self.provided_keys.add(task.provides) self._tasks.append(task) return self def add_functor_subtask(self, func, *args, **kwargs): """Create and add a Subtask for an already-defined method. :param func: A callable to be the core of the Subtask. The contract for this method is identical to that of Subtask.execute - see that method's docstring for details. :param args: Positional arguments to be passed to the callable func (after the EntryWrapper parameter) when it is executed within the WrapperTask. :param kwargs: Keyword arguments to be passed to the callable func when it is executed within the WrapperTask. May contain the following values, which are treated specially and NOT passed to the callable func: provides: See Subtask.__init__(provides). flag_update: See Subtask.__init__(flag_update). logspec: Iterable comprising a logging function, a format string, and zero or more arguments. The log method is invoked before the func. Example: logspec = [LOG.info, _LI("Deleting widget %(widget)s from " "instance %(instance)s."), {'widget': widg, 'instance': instance.name}] FunctorSubtask(..., logspec=logspec) :return: self, for chaining convenience. """ return self.add_subtask(_FunctorSubtask(func, *args, **kwargs)) @property def wrapper(self): """(Fetches and) returns the EntryWrapper. Use this only if you need the EntryWrapper outside of the WrapperTask's execution itself. Note that this guarantees a GET outside of lock, and should therefore be used only if absolutely necessary. """ if not self._wrapper: self._wrapper = self._getter.get() # NOTE: This access of self._wrapper must remain atomic. # See TAG_WRAPPER_SYNC. return self._wrapper @property def subtasks(self): """Return the sequence of Subtasks registered with this WrapperTask. This is returned as a tuple (not modifiable). To add subtasks, use the add_[functor_]subtask method. """ return tuple(self._tasks) def execute(self): """Invoke subtasks and update under @entry_transaction. The flow is as follows: 1 Lock on wrapper UUID 2 GET wrapper if necessary 3 For each registered Subtask: - Invoke the Subtask to modify the wrapper 4 If update is necessary, POST the wrapper. If POST fails with etag mismatch: - Refresh the wrapper - goto 2 5 Unlock """ if len(self._tasks) == 0: if self.allow_empty: LOG.info(_("WrapperTask %s has no Subtasks; no-op execution."), self.name) return None raise ex.WrapperTaskNoSubtasks(name=self.name) @entry_transaction def _execute(wrapper): update_needed = False for task in self._tasks: kwargs = task.save_kwargs if ('provided' in reflection.get_callable_args(task.execute) or reflection.accepts_kwargs(task.execute)): kwargs['provided'] = self.provided ret = task.execute(wrapper, *task.save_args, **kwargs) if task.flag_update and ret: update_needed = True if task.provides is not None: self.provided[task.provides] = ret if update_needed: wrapper = wrapper.update(timeout=self.update_timeout) return wrapper # Use the wrapper if already fetched, or the getter if not # NOTE: This assignment must remain atomic. See TAG_WRAPPER_SYNC. self._wrapper = _execute(self._wrapper or self._getter) return self._wrapper, self.provided class ContextThreadPoolExecutor(th.ThreadPoolExecutor): def submit(self, fn, *args, **kwargs): context = ctx.get_current() # Get the list of locks held by this thread, we don't want sub # tasks locking the same thing! held_locks = list(_get_locks()) def wrapped(): # This is executed in the new thread. if context is not None: context.update_store() # Ensure the sub task knows about the parent's locks and doesn't # block on them. _set_locks(held_locks) return fn(*args, **kwargs) return super(ContextThreadPoolExecutor, self).submit(wrapped) class FeedTask(tf_task.Task): """Invokes WrapperTasks in parallel over each EntryWrapper in a feed. Usage Creation: # Preferred fm = FeedTask('lpar_frobnicate', LPAR.getter(adapter)) or # Non-preferred. See 'Greedy Methods' warning below feed = LPAR.wrap(adapter.read(LPAR.schema_type, ...)) fm = FeedTask('lpar_frobnicate', feed) Adding Subtasks: # Preferred fm.add_subtask(FrobnicateLpar(foo, bar)) fm.add_functor_subtask(frobnify, abc, xyz) and/or # Non-preferred. See 'Greedy Methods' warning below for uuid, txn in fm.wrapper_tasks.items(): if meets_criteria(txn.wrapper, uuid): txn.add_subtask(FrobnicateLpar(baz, blah)) fm.wrapper_tasks[known_uuid].add_subtask(FrobnicateLpar(baz, blah) Execution/TaskFlow management: main_flow.add(fm) ... taskflow.engines.run(main_flow) Warning: Greedy Methods This implementation makes every effort to defer the feed GET as long as possible. The more time passes between the GET and the execution of the WrapperTasks, the more likely it is that some out-of-band change will have modified one of the objects represented in the feed. This will cause an etag mismatch on that WrapperTask's update (POST), resulting in that WrapperTask being redriven, which costs an extra GET+POST to the REST server. Consumers of this class can thwart these efforts by: a) Initializing the FeedTask with an already-retrieved feed instead of a FeedGetter; or b) Using any of the following methods/properties prior to execution. All of these will trigger a GET of the feed if not already fetched: .wrapper_tasks .get_wrapper(uuid) .feed The cost is incurred only the first time one of these is used. If your workflow requires calling one of these early, it is not necessary to avoid them subsequently. """ def __init__(self, name, feed_or_getter, max_workers=10, update_timeout=-1): """Create a FeedTask with a FeedGetter (preferred) or existing feed. :param name: A descriptive string name. This will be used along with each wrapper's UUID to generate the name for that wrapper's WrapperTask. :param feed_or_getter: pypowervm.wrappers.entry_wrapper.FeedGetter or an already-fetched feed (list of EntryWrappers) over which to operate. :param max_workers: (Optional) Integer indicating the maximum number of worker threads to run in parallel within the .flow or by the .execute method. See concurrent.futures.ThreadPoolExecutor(max_workers). :param update_timeout: (Optional) Integer number of seconds after which to time each WrapperTask's POST request. -1, the default, causes the request to use the timeout value configured on the Session belonging to the Adapter. """ super(FeedTask, self).__init__(name) if isinstance(feed_or_getter, ewrap.FeedGetter): self._feed = None self._getter = feed_or_getter elif isinstance(feed_or_getter, list): # Make sure the feed has something in it. if len(feed_or_getter) == 0: raise ex.FeedTaskEmptyFeed() # Make sure it's a list of EntryWrapper if [i for i in feed_or_getter if not isinstance(i, ewrap.EntryWrapper)]: raise ValueError("List must contain EntryWrappers " "exclusively.") self._feed = feed_or_getter self._getter = None else: raise ValueError(_("Must supply either a list of EntryWrappers or " "a FeedGetter.")) # Max WrapperTasks to run in parallel self.max_workers = max_workers self.update_timeout = update_timeout # Map of {uuid: WrapperTask}. We keep this empty until we need the # individual WraperTasks. This is triggered by .wrapper_tasks and # .get_wrapper(uuid) (and obviously executing). self._tx_by_uuid = {} # Until we *need* individual WrapperTasks, save subtasks in one place. # EntryWrapperGetter is a cheat to allow us to build the WrapperTask. self._common_tx = WrapperTask( 'internal', ewrap.EntryWrapperGetter(None, ewrap.Wrapper, None)) self._post_exec = [] @property def wrapper_tasks(self): """(Greedy) Dictionary of {uuid: WrapperTask} for all wrappers. The first access of this property triggers a GET of the feed if it has not already been fetched, so use judiciously. """ if not self._tx_by_uuid: # Create a separate WrapperTask for each wrapper in the feed. # As long as the consumer uses FeedTask.add_[functor_]subtask # and doesn't ask for .wrapper_tasks, we keep only one copy of the # subtask list. Once the consumer "breaks the seal" and requests # individual WrapperTasks per wrapper, we need to (GET the feed - # this is triggered by .feed - and) create them based on this # common subtask list. # This is only done once. Thereafter, .add_[functor_]subtask will # add separately to each WrapperTask. for entry in self.feed: name = '%s_%s' % (self.name, entry.uuid) self._tx_by_uuid[entry.uuid] = WrapperTask( name, entry, subtasks=self._common_tx.subtasks, allow_empty=True, update_timeout=self.update_timeout) return self._tx_by_uuid def get_wrapper(self, uuid): """(Greedy) Returns the EntryWrapper associated with a particular UUID. Note that this method triggers a GET of the feed if it has not already been fetched, so use judiciously. :param uuid: The UUID of the wrapper of interest. :return: The EntryWrapper instance with the specified UUID. :raise KeyError: If there's no WrapperTask for a wrapper with the specified UUID. """ # Grab it from the WrapperTask map (O(1)) rather than the feed (O(n)). # It'll also be up to date without having to trigger a feed rebuild. return self.wrapper_tasks[uuid].wrapper def add_subtask(self, task): """Add a Subtask to *all* WrapperTasks in this FeedTask. To add Subtasks to individual WrapperTasks, iterate over the result of the 'wrapper_tasks' property. Specification is the same as for WrapperTask.add_subtask. """ if self._tx_by_uuid: # _tx_by_uuid is guaranteed to have WrapperTasks for all UUIDs, # including this one for txn in self._tx_by_uuid.values(): txn.add_subtask(task) else: self._common_tx.add_subtask(task) return self def add_functor_subtask(self, func, *args, **kwargs): """Add a functor Subtask to *all* WrapperTasks in this FeedTask. To add Subtasks to individual WrapperTasks, iterate over the result of the 'wrapper_tasks' property. Specification is the same as for WrapperTask.add_functor_subtask. """ return self.add_subtask(_FunctorSubtask(func, *args, **kwargs)) def add_post_execute(self, *tasks): """Add some number of TaskFlow Tasks to run after the WrapperTasks. Such Tasks may 'require' a parameter called wrapper_task_rets, which will be a dict of the form: {uuid: { 'wrapper': wrapper, label1: return_value, label2: return_value, ... labelN: return_value}} ...where: uuid is the UUID of the WrapperTask's wrapper. wrapper is the WrapperTask's wrapper in its final (possibly-updated) form. labelN: return_value are the return values from Subtasks using the 'provides' mechanism. Each label corresponds to the name given by the Subtask's 'provides' argument. :param tasks: Some number of TaskFlow Tasks (or Flows) to be executed linearly after the parallel WrapperTasks have completed. """ self._post_exec.extend(tasks) @property def feed(self): """(Greedy) Returns this FeedTask's feed (list of wrappers). The first access of this property triggers a GET of the feed if it has not already been fetched, so use this only if you need the EntryWrappers outside of the execution itself. """ if self._feed is None: self._feed = self._getter.get() if len(self._feed) == 0: raise ex.FeedTaskEmptyFeed() # Do we need to refresh the feed based on having been run? # If we haven't replicated WrapperTasks yet, there's no chance we're # out of sync - and we don't want to trigger GET/replication. if self._tx_by_uuid: # Rebuild the entire feed from the WrapperTasks' .wrappers. # TAG_WRAPPER_SYNC # Note that, if this happens while the WrapperTasks are running, # we may be grabbing the wrapper from a WrapperTask "while" it is # being changed as the result of an update(). This is threadsafe as # long as the assignment (by WrapperTask.execute) and the accessor # (WrapperTask.wrapper) remain atomic by using simple =/return. for wrap in self._feed: if hasattr(self.get_wrapper(wrap.uuid), 'etag') \ and self.get_wrapper(wrap.uuid).etag != wrap.etag: self._feed = [tx.wrapper for tx in self.wrapper_tasks.values()] break return self._feed @staticmethod def _process_subtask_rets(subtask_rets): """Reshape the dict of wrapper_{uuid} and subtask_rets_{uuid}. Input form: {'wrapper_%(uuid)s': EntryWrapper, 'subtask_rets_%(uuid)s': { label1: return_value, label2: return_value, ..., labelN: return_value}} Output form: {uuid: { 'wrapper': EntryWrapper, label1: return_value, label2: return_value, ..., labelN: return_value}} """ ret = {} for key, val in subtask_rets.items(): label, uuid = key.rsplit('_', 1) if label != 'wrapper': ret[uuid] = dict(val, wrapper=subtask_rets['wrapper_%s' % uuid]) return ret def execute(self): """Run this FeedTask's WrapperTasks in parallel TaskFlow engine. :return: Dictionary of results provided by subtasks and post-execs. The shape of this dict is as normally expected from TaskFlow, noting that the WrapperTasks are executed in a subflow and their results processed into wrapper_task_rets. For example: {'wrapper_task_rets': { uuid: {...}, uuid: {...}, ...} 'post_exec_x_provides': ..., 'post_exec_y_provides': ..., ...} """ # Ensure a true no-op (in particular, we don't want to GET the feed) if # there are no Subtasks if not any([self._tx_by_uuid, self._common_tx.subtasks, self._post_exec]): LOG.info(_("FeedTask %s has no Subtasks; no-op execution."), self.name) return rets = {'wrapper_task_rets': {}} try: # Calling .wrapper_tasks will cause the feed to be fetched and # WrapperTasks to be replicated, if not already done. Only do this # if there exists at least one WrapperTask with Subtasks. # (NB: It is legal to have a FeedTask that *only* has post-execs.) if self._tx_by_uuid or self._common_tx.subtasks: pflow = tf_uf.Flow("%s_parallel_flow" % self.name) pflow.add(*self.wrapper_tasks.values()) # Execute the parallel flow now so the results can be provided # to any post-execs. rets['wrapper_task_rets'] = self._process_subtask_rets( tf_eng.run( pflow, engine='parallel', executor=ContextThreadPoolExecutor(self.max_workers))) if self._post_exec: flow = tf_lf.Flow('%s_post_execs' % self.name) flow.add(*self._post_exec) eng = tf_eng.load(flow, store=rets) eng.run() rets = eng.storage.fetch_all() except tf_ex.WrappedFailure as wfail: LOG.error(_("FeedTask %s experienced multiple exceptions. They " "are logged individually below."), self.name) for fail in wfail: LOG.exception(fail.pformat(fail.traceback_str)) raise ex.MultipleExceptionsInFeedTask(self.name, wfail) # Let a non-wrapped exception (which happens if there's only one # element in the feed) bubble up as-is. return rets pypowervm-1.1.24/pypowervm/utils/retry.py0000664000175000017500000002570613571367171020211 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility decorator to retry the decorated method.""" import functools from oslo_log import log as logging import random from six import moves import time from pypowervm import const from pypowervm import exceptions as exc from pypowervm.i18n import _ LOG = logging.getLogger(__name__) DFT_RETRY_CODES = frozenset([const.HTTPStatus.ETAG_MISMATCH]) NO_TEST = lambda *args, **kwds: True NO_CHECKER = lambda *args, **kwds: False NO_DELAY = lambda *args, **kwds: None NO_ARGMOD = lambda this_try, max_tries, *args, **kwds: (args, kwds) # Used by STEPPED_RANDOM_DELAY. Each entry corresponds to the kwargs for # gen_random_delay. There's no magic to these numbers; they're fairly # arbitrary. RANDOM_DELAY_STEPS = ({'max_s': 0}, {'max_s': 1}, {'min_s': 0.5, 'max_s': 4}, {'min_s': 2, 'max_s': 13}, {'min_s': 6.5, 'max_s': 30}, # Subsequent steps 0-60s {'max_s': 60}) def STEPPED_DELAY(attempt, max_attempts, *args, **kwds): """A delay function that increases its delay per attempt. The steps will be: - Attempt 1: 0.0s - Attempt 2: 0.5s - Attempt 3: 2.0s - Attempt 4: 6.5s - Attempt 5: 20s - Attempt 6+: 30s """ sleep_time = (0.25 * (3**(attempt-1)) - 0.25) time.sleep(min(sleep_time, 30)) def gen_random_delay(min_s=0, max_s=10): """Generate a delay function that waits a random amount of time. :param min_s: Minimum number of seconds to delay (float). :param max_s: Maximum number of seconds to delay (float). :return: A delay method suitable for passing to retry's delay_func kwarg. """ def RANDOM_DELAY(attempt, max_attempts, *args, **kwargs): span = max_s - min_s sleep_time = min_s + (random.random() * span) time.sleep(sleep_time) return RANDOM_DELAY def STEPPED_RANDOM_DELAY(attempt, max_attempts, *args, **kwargs): """A delay function for increasing random sleep times. The RANDOM_DELAY_STEPS variable is used to determine the min/max for each step. This is a graduating scale - based on the overall max_attempts specified. If there are 60 max attempts, the first 10 will use the first delay in RANDOM_DELAY_STEPS, the next ten will use the second delay in RANDOM_DELAY_STEPS, etc... If there are only 6 retries, then the first will use the first position in RANDOM_DELAY_STEPS, the second will map to the second RANDOM_DELAY_STEPS and so on. """ # Generate the position, based off the max attempts and the current pos pos = int(((attempt - 1) * len(RANDOM_DELAY_STEPS)) / max_attempts) # If for some reason, the user goes above the max attempts, limit it to # to the last position. pos = (pos if pos < len(RANDOM_DELAY_STEPS) else (len(RANDOM_DELAY_STEPS) - 1)) # Run the random delay function gen_random_delay(**RANDOM_DELAY_STEPS[pos])(attempt, max_attempts, *args, **kwargs) def refresh_wrapper(trynum, maxtries, *args, **kwargs): """A @retry argmod_func to refresh a Wrapper, which must be the first arg. When using @retry to decorate a method which modifies a Wrapper, a common cause of retry is etag mismatch. In this case, the retry should refresh the wrapper before attempting the modifications again. This method may be passed to @retry's argmod_func argument to effect such a refresh. Note that the decorated method must be defined such that the wrapper is its first argument. """ arglist = list(args) # If we get here, we *usually* have an etag mismatch, so specifying # use_etag=False *should* be redundant. However, for scenarios where we're # retrying for some other reason, we want to guarantee a fresh fetch to # obliterate any local changes we made to the wrapper (because the retry # should be making those changes again). arglist[0] = arglist[0].refresh(use_etag=False) return arglist, kwargs def retry(tries=3, delay_func=NO_DELAY, retry_except=None, http_codes=DFT_RETRY_CODES, test_func=None, resp_checker=NO_CHECKER, limit_except=None, argmod_func=NO_ARGMOD): """Retry method decorator. :param tries: The max number of calls to the wrapped method. :param delay_func: A method to delay before retrying. Defaults to no delay. The parameters that are sent are: - the number of the current try - the maximum number of tries - the arguments to the decorated method - the keyword arguments to the decorated method No return value is expected. :param retry_except: An exception class (or tuple thereof) to retry if received. Defaults to no exceptions besides the HttpError which is handled separately by the http_codes parameter. :param http_codes: A list of http response codes to retry if received. Default is to not handle any specific http codes. :param test_func: A method to call to determine whether to retry. This method takes precedence over http codes. That is, if specified, the http codes are not considered. The parameters that are sent are: - the exception that was received - the number of the current try - the maximum number of tries - the arguments to the decorated method - the keyword arguments to the decorated method The return value is expected to be boolean, True or False, where True means to retry the decorated method. :param resp_checker: A method to call when no exception is caught, to check the response and determine whether to retry. The parameters that are sent are: - the number of the current try - the maximum number of tries - the arguments to the decorated method - the keyword arguments to the decorated method The return value is expected to be boolean, True or False, where True means to retry the decorated method. :param limit_except: An exception to raise if the number of tries is exhausted. :param argmod_func: A method to call after delay_func, before retrying, to modify the arguments to the main method. The input parameters are: - the number of the current try - the maximum number of tries - the non-keyword arguments to the decorated method - the keyword arguments to the decorated method The return is expected to a list and a dict of the new arguments to the decorated method. Example: def argmod(t, m, *a, **k): l = list(a) l[0] += 1 k['foo'] = bar return l, k :returns: The return value of the wrapped method. """ def _retry(func): @functools.wraps(func) def __retry(*args, **kwds): def _raise_exc(): if _limit_except: raise _limit_except else: raise def _test_retry(e): # Determine if an exception should be raised if (not _test_func(e, try_, _tries, *args, **kwds) or try_ == _tries): _raise_exc() # Otherwise, we will continue trying return def _log_response_retry(try_, max_tries, uri, resp_code): LOG.warning(_('Attempt %(retry)d of total %(total)d for URI ' '%(uri)s. Error was a known retry response ' 'code: %(resp_code)s'), {'retry': try_, 'total': max_tries, 'uri': uri, 'resp_code': resp_code}) def _log_exception_retry(try_, max_tries, exc): LOG.warning(_('Attempt %(retry)d of %(total)d failed. Will ' 'retry. The exception was:\n %(except)s.'), {'retry': try_, 'total': max_tries, 'except': exc}) # Standardize input # For some reason, if we use the parms in an 'if' directly # python throws an exception. Assigning them avoids it. _tries = tries _retry_except = retry_except _http_codes = http_codes _test_func = test_func _resp_checker = resp_checker _limit_except = limit_except _argmod_func = argmod_func if _retry_except is None: _retry_except = () if _http_codes is None: _http_codes = () caller_test_func = _test_func is not None if not caller_test_func: _test_func = NO_TEST if _resp_checker is None: _resp_checker = NO_CHECKER # Start retries for try_ in moves.range(1, _tries+1): try: resp = func(*args, **kwds) # No exception raised, call the response checker # If we're on the last iteration, we return the response. # The response checker should raise an exception if # it doesn't want this behavior. if (not _resp_checker(resp, try_, _tries, *args, **kwds) or try_ == _tries): return resp except exc.HttpError as e: if caller_test_func or e.response.status in _http_codes: _test_retry(e) _log_response_retry(try_, _tries, e.response.reqpath, e.response.status) else: _raise_exc() except _retry_except as e: _test_retry(e) _log_exception_retry(try_, _tries, e) # If we get here then we're going to retry delay_func(try_, _tries, *args, **kwds) # Adjust arguments if necessary args, kwds = _argmod_func(try_, _tries, *args, **kwds) return __retry return _retry pypowervm-1.1.24/pypowervm/wrappers/0000775000175000017500000000000013571367172017164 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/wrappers/storage.py0000664000175000017500000016510513571367171021211 0ustar neoneo00000000000000# Copyright 2014, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers for virtual storage elements and adapters.""" import abc import base64 import binascii from oslo_log import log as logging import six import pypowervm.const as c import pypowervm.entities as ent import pypowervm.exceptions as ex from pypowervm.i18n import _ import pypowervm.util as u import pypowervm.wrappers.entry_wrapper as ewrap LOG = logging.getLogger(__name__) UDID = 'UniqueDeviceID' # The following are common to all VSCSI storage types # Storage QoS Constants _STOR_READ_IOPS = 'ReadIOPS' _STOR_WRITE_IOPS = 'WriteIOPS' # Storage Encryption Constants _STOR_ENCRYPTION_STATE = 'EncryptionState' _STOR_ENCRYPTION_KEY = 'EncryptionKey' _STOR_ENCRYPTION_AGENT = 'EncryptionAgent' # Device tag _STOR_TAG = 'Tag' # Emulate model alias _STOR_EMULATE_MODEL = 'EmulateModel' _STOR_EL_ORDER = (_STOR_READ_IOPS, _STOR_WRITE_IOPS, _STOR_ENCRYPTION_STATE, _STOR_ENCRYPTION_KEY, _STOR_ENCRYPTION_AGENT, _STOR_TAG, _STOR_EMULATE_MODEL) class _EncryptionState(object): """From EncryptionState.Enum. This class is part of an experimental API change and may be subject to breaking changes until it is publicized. """ UNENCRYPTED = 'Unencrypted' FORMATTED = 'Formatted' UNLOCKED = 'Unlocked' # LUKS-specific encryptor constants _LUKS_ENCRYPTOR = 'LUKSEncryptor' _LUKS_CIPHER = 'Cipher' _LUKS_KEY_SIZE = 'KeySize' _LUKS_HASH_SPEC = 'Hash' _LUKS_EL_ORDER = [_LUKS_CIPHER, _LUKS_KEY_SIZE, _LUKS_HASH_SPEC] # "Any" server adapters are SCSI adapters without client # adapters that map to remote LPAR slot number 65535. They # can map to any client and are not recommended but are # still supported. ANY_SLOT = 65535 # Virtual Disk Constants DISK_ROOT = 'VirtualDisk' _DISK_CAPACITY = 'DiskCapacity' _DISK_LABEL = 'DiskLabel' DISK_NAME = 'DiskName' _DISK_MAX_LOGICAL_VOLS = 'MaxLogicalVolumes' _DISK_PART_SIZE = 'PartitionSize' _DISK_VG = 'VolumeGroup' _DISK_BASE = 'BaseImage' _DISK_UDID = UDID _DISK_TYPE = 'VirtualDiskType' _DISK_BACKSTORE_TYPE = 'BackStoreType' _DISK_FILEFORMAT = 'FileFormat' _DISK_RBD_USER = 'RbdUser' _DISK_OPTIONAL_PARMS = 'OptionalParameters' _VDISK_EL_ORDER = _STOR_EL_ORDER + ( _DISK_CAPACITY, _DISK_LABEL, DISK_NAME, _DISK_MAX_LOGICAL_VOLS, _DISK_PART_SIZE, _DISK_VG, _DISK_BASE, _DISK_UDID, _DISK_TYPE, _DISK_BACKSTORE_TYPE, _DISK_FILEFORMAT, _DISK_RBD_USER, _DISK_OPTIONAL_PARMS) class VDiskType(object): """From VirtualDiskType.Enum.""" FILE = 'File' LV = 'LogicalVolume' RBD = 'RBD' class BackStoreType(object): """From BackStoreType.Enum Desribes the type of backstore handler to use for VDisks. FILE_IO, USER_QCOW, and LOOP are used with the FileIO VDisk type. USER_RBD is used with the RBD VDisk type. """ # A kernel-space handler that supports raw files. FILE_IO = 'fileio' # A user-space handler that supports RAW, QCOW or QCOW2 files. USER_QCOW = 'user:qcow' # Create a loop device for the file, and use the kernel-space block # handler. LOOP has higher performance than FILE_IO. LOOP = 'loop' # A user-space handler that supports rbd. (Used with RBD) USER_RBD = 'user:rbd' class FileFormatType(object): """From FileFormatType.Enum The format type of the image that will be stored in the VDisk (aka LV). """ RAW = 'raw' QCOW2 = 'qcow2' # Physical Volume Constants PVS = 'PhysicalVolumes' PHYS_VOL = 'PhysicalVolume' _PV_AVAIL_PHYS_PART = 'AvailablePhysicalPartitions' _PV_VOL_DESC = 'Description' _PV_LOC_CODE = 'LocationCode' _PV_PERSISTENT_RESERVE = 'PersistentReserveKeyValue' _PV_RES_POLICY = 'ReservePolicy' _PV_RES_POLICY_ALGO = 'ReservePolicyAlgorithm' _PV_TOTAL_PHYS_PARTS = 'TotalPhysicalPartitions' _PV_UDID = UDID _PV_AVAIL_FOR_USE = 'AvailableForUsage' _PV_VOL_SIZE = 'VolumeCapacity' _PV_VOL_NAME = 'VolumeName' _PV_VOL_STATE = 'VolumeState' _PV_VOL_UNIQUE_ID = 'VolumeUniqueID' _PV_FC_BACKED = 'IsFibreChannelBacked' _PV_STG_LABEL = 'StorageLabel' _PV_PG83 = 'DescriptorPage83' _PV_EL_ORDER = _STOR_EL_ORDER + ( _PV_AVAIL_PHYS_PART, _PV_VOL_DESC, _PV_LOC_CODE, _PV_PERSISTENT_RESERVE, _PV_RES_POLICY, _PV_RES_POLICY_ALGO, _PV_TOTAL_PHYS_PARTS, _PV_UDID, _PV_AVAIL_FOR_USE, _PV_VOL_SIZE, _PV_VOL_NAME, _PV_VOL_STATE, _PV_VOL_UNIQUE_ID, _PV_FC_BACKED, _PV_STG_LABEL, _PV_PG83) class PVState(object): ACTIVE = "active" MISSING = "missing" REMOVED = "removed" VARIED_OFF = "varied off" # Virtual Optical Media Constants VOPT_ROOT = 'VirtualOpticalMedia' VOPT_NAME = 'MediaName' _VOPT_UDID = 'MediaUDID' _VOPT_MOUNT_TYPE = 'MountType' _VOPT_SIZE = 'Size' _VOPT_EL_ORDER = _STOR_EL_ORDER + ( VOPT_NAME, _VOPT_UDID, _VOPT_MOUNT_TYPE, _VOPT_SIZE) # Virtual Media Repository Constants _VREPO_ROOT = 'VirtualMediaRepository' _VREPO_OPTICAL_MEDIA_ROOT = 'OpticalMedia' _VREPO_NAME = 'RepositoryName' _VREPO_SIZE = 'RepositorySize' _VREPO_EL_ORDER = [_VREPO_OPTICAL_MEDIA_ROOT, _VREPO_NAME, _VREPO_SIZE] # Volume Group Constants _VG_AVAILABLE_SIZE = 'AvailableSize' _VG_BACKING_DEVICE_COUNT = 'BackingDeviceCount' _VG_FREE_SPACE = 'FreeSpace' _VG_CAPACITY = 'GroupCapacity' _VG_NAME = 'GroupName' _VG_SERIAL_ID = 'GroupSerialID' _VG_STATE = 'GroupState' _VG_MAX_LVS = 'MaximumLogicalVolumes' _VG_MEDIA_REPOS = 'MediaRepositories' _VG_MIN_ALLOC_SIZE = 'MinimumAllocationSize' _VG_PHS_VOLS = PVS _VG_UDID = UDID _VG_VDISKS = 'VirtualDisks' _VG_EL_ORDER = (_VG_AVAILABLE_SIZE, _VG_BACKING_DEVICE_COUNT, _VG_FREE_SPACE, _VG_CAPACITY, _VG_NAME, _VG_SERIAL_ID, _VG_STATE, _VG_MAX_LVS, _VG_MEDIA_REPOS, _VG_MIN_ALLOC_SIZE, _VG_PHS_VOLS, _VG_UDID, _VG_VDISKS) # LogicalUnit Constants _LU_THIN = 'ThinDevice' _LU_UDID = UDID _LU_CAPACITY = 'UnitCapacity' _LU_TYPE = 'LogicalUnitType' _LU_CLONED_FROM = 'ClonedFrom' _LU_IN_USE = 'InUse' _LU_NAME = 'UnitName' _LU_MIG = 'LogicalUnitMigration' _LU_EL_ORDER = _STOR_EL_ORDER + ( _LU_THIN, _LU_UDID, _LU_CAPACITY, _LU_TYPE, _LU_CLONED_FROM, _LU_IN_USE, _LU_NAME, _LU_MIG) class LUType(object): DISK = "VirtualIO_Disk" HIBERNATION = "VirtualIO_Hibernation" IMAGE = "VirtualIO_Image" AMS = "VirtualIO_Active_Memory_Sharing" _CAPACITY = 'Capacity' # Tier Constants _TIER_NAME = 'Name' _TIER_UDID = UDID _TIER_IS_DEFAULT = 'IsDefault' _TIER_CAPACITY = _CAPACITY _TIER_ASSOC_SSP = 'AssociatedSharedStoragePool' # Shared Storage Pool Constants _SSP_NAME = 'StoragePoolName' _SSP_UDID = UDID _SSP_CAPACITY = _CAPACITY _SSP_FREE_SPACE = 'FreeSpace' _SSP_TOTAL_LU_SIZE = 'TotalLogicalUnitSize' _SSP_LUS = 'LogicalUnits' _SSP_LU = 'LogicalUnit' _SSP_OCS = 'OverCommitSpace' _SSP_PVS = PVS _SSP_PV = PHYS_VOL # Virtual Adapter Constants CLIENT_ADPT = 'ClientAdapter' SERVER_ADPT = 'ServerAdapter' # Common to all Virtual Adapters _VADPT_TYPE = 'AdapterType' _VADPT_DRC_NAME = 'DynamicReconfigurationConnectorName' _VADPT_LOC_CODE = 'LocationCode' _VADPT_LOCAL_ID = 'LocalPartitionID' _VADPT_REQD = 'RequiredAdapter' _VADPT_VARIED_ON = 'VariedOn' _VADPT_NEXT_SLOT = 'UseNextAvailableSlotID' _VADPT_NEXT_HI_SLOT = 'UseNextAvailableHighSlotID' _VADPT_SLOT_NUM = 'VirtualSlotNumber' _VADPT_ENABLED = 'Enabled' _VADPT_NAME = 'AdapterName' _VADPT_UDID = 'UniqueDeviceID' # Common to VSCSI Adapters (Client & Server) _VSCSI_ADPT_BACK_DEV_NAME = 'BackingDeviceName' _VSCSI_ADPT_REM_BACK_DEV_NAME = 'RemoteBackingDeviceName' _VSCSI_ADPT_REM_LPAR_ID = 'RemoteLogicalPartitionID' _VSCSI_ADPT_REM_SLOT_NUM = 'RemoteSlotNumber' _VSCSI_ADPT_SVR_LOC_CODE = 'ServerLocationCode' # Common to Client Adapters _VCLNT_ADPT_SVR_ADPT = SERVER_ADPT # Common to VFC Adapters (Client & Server) _VFC_ADPT_CONN_PARTITION = 'ConnectingPartition' _VFC_ADPT_CONN_PARTITION_ID = 'ConnectingPartitionID' _VFC_ADPT_CONN_SLOT_NUM = 'ConnectingVirtualSlotNumber' # VFC Server Adapter-specific _VFC_SVR_ADPT_MAP_PORT = 'MapPort' _VFC_SVR_ADPT_PHYS_PORT = 'PhysicalPort' # VFC Client Adapter-specific _VFC_CLNT_ADPT_WWPNS = 'WWPNs' _VFC_CLNT_ADPT_LOGGED_IN = 'NportLoggedInStatus' _VFC_CLNT_ADPT_OS_DISKS = 'OperatingSystemDisks' # Element Ordering: # # A might be a VSCSI server adapter or a VFC server adapter. # Likewise . The schema inheritance hierarchy informs the # way we build up the element order constants: # # VirtualIOAdapter # VFCAdapter VSCSIAdapter == VSCSIServerAdapter # VFCClientAdapter VFCServerAdapter VSCSIClientAdapter # # However, this doesn't match up with the hierarchy of our wrapper classes: # # VClientStorageAdapterElement # VSCSIClientAdapterElement VFCClientAdapterElement # # VServerStorageAdapterElement # VSCSIServerAdapterElement VFCServerAdapterElement # # So we have to get creative with element ordering for the base classes, since # they hold the @pvm_type decorator. We interleave the VSCSI and VFC # properties to create an element order that can be used commonly for both # types. This only works because all overlapping properties happen to be in # the same order. # # Yes, this is funky. # Converged ordering base for VFC and VSCSI adapters _VADPT_BASE_EL_ORDER = ( _VADPT_TYPE, _VADPT_DRC_NAME, _VADPT_LOC_CODE, _VADPT_LOCAL_ID, _VADPT_REQD, _VADPT_VARIED_ON, _VADPT_NEXT_SLOT, _VADPT_NEXT_HI_SLOT, _VADPT_SLOT_NUM, _VADPT_ENABLED, _VADPT_NAME, _VSCSI_ADPT_BACK_DEV_NAME, _VSCSI_ADPT_REM_BACK_DEV_NAME, _VSCSI_ADPT_REM_LPAR_ID, _VFC_ADPT_CONN_PARTITION, _VFC_ADPT_CONN_PARTITION_ID, _VSCSI_ADPT_REM_SLOT_NUM, _VFC_ADPT_CONN_SLOT_NUM, _VSCSI_ADPT_SVR_LOC_CODE, _VADPT_UDID) # Converged (VSCSI & VFC) Server Adapter element order _V_SVR_ADPT_EL_ORDER = _VADPT_BASE_EL_ORDER + ( _VFC_SVR_ADPT_MAP_PORT, _VFC_SVR_ADPT_PHYS_PORT) # Converged (VSCSI & VFC) Client Adapter element order _V_CLNT_ADPT_EL_ORDER = _VADPT_BASE_EL_ORDER + ( _VCLNT_ADPT_SVR_ADPT, _VFC_CLNT_ADPT_WWPNS, _VFC_CLNT_ADPT_LOGGED_IN, _VFC_CLNT_ADPT_OS_DISKS) VFC_CLIENT_ADPT = 'VirtualFibreChannelClientAdapter' # TargetDevice Constants _TD_LU_TD = 'SharedStoragePoolLogicalUnitVirtualTargetDevice' _TD_PV_TD = 'PhysicalVolumeVirtualTargetDevice' _TD_VOPT_TD = 'VirtualOpticalTargetDevice' _TD_VDISK_TD = 'LogicalVolumeVirtualTargetDevice' _TD_LUA = 'LogicalUnitAddress' _TD_NAME = 'TargetName' @ewrap.EntryWrapper.pvm_type('VolumeGroup', child_order=_VG_EL_ORDER) class VG(ewrap.EntryWrapper): """Represents a Volume Group that resides on the Virtual I/O Server.""" @classmethod def bld(cls, adapter, name, pv_list): vg = super(VG, cls)._bld(adapter) vg.name = name vg.phys_vols = pv_list return vg @property def name(self): return self._get_val_str(_VG_NAME) @name.setter def name(self, val): self.set_parm_value(_VG_NAME, val) @property def capacity(self): """Overall capacity in GB (float).""" return self._get_val_float(_VG_CAPACITY) @property def available_size(self): """Available size for new volumes in GB (float).""" return self._get_val_float(_VG_AVAILABLE_SIZE) @property def free_space(self): """Current free space in GB (float).""" return self._get_val_float(_VG_FREE_SPACE) @property def serial_id(self): return self._get_val_str(_VG_SERIAL_ID) @property def vmedia_repos(self): """Returns a list of wrappers.""" es = ewrap.WrapperElemList(self._find_or_seed(_VG_MEDIA_REPOS), VMediaRepos) return es @vmedia_repos.setter def vmedia_repos(self, repos): """Replaces the VirtualMediaRepositories with the new value. :param repos: A list of VMediaRepos objects that will replace the existing repositories. """ self.replace_list(_VG_MEDIA_REPOS, repos) @property def phys_vols(self): """Returns a list of the Physical Volumes that back this repo.""" # TODO(efried): parent_entry=self not needed once VIOS supports pg83 # descriptor in Events es = ewrap.WrapperElemList(self._find_or_seed(_VG_PHS_VOLS), PV, parent_entry=self) return es @phys_vols.setter def phys_vols(self, phys_vols): """Replaces the physical volumes with the new value. :param phys_vols: A list of PV objects that will replace the existing Physcial Volumes. """ self.replace_list(_VG_PHS_VOLS, phys_vols) @property def virtual_disks(self): """Returns a list of the Virtual Disks that are in the repo.""" es = ewrap.WrapperElemList(self._find_or_seed(_VG_VDISKS), VDisk) return es @virtual_disks.setter def virtual_disks(self, virt_disks): """Replaces the virtual disks with the new value. :param virt_disks: A list of VDisk objects that will replace the existing Virtual Disks. """ self.replace_list(_VG_VDISKS, virt_disks) @ewrap.ElementWrapper.pvm_type(_VREPO_ROOT, has_metadata=True, child_order=_VREPO_EL_ORDER) class VMediaRepos(ewrap.ElementWrapper): """A Virtual Media Repository for a VIOS. Typically used to store an ISO file for image building. """ @classmethod def bld(cls, adapter, name, size): """Creates a fresh VMediaRepos wrapper. This should be used when adding a new Virtual Media Repository to a Volume Group. The name and size for the media repository is required. The other attributes are generated from the system. Additionally, once created, specific VirtualOpticalMedia can be added onto the object. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param name: The name of the Virtual Media Repository. :param size: The size of the repository in GB (float). :returns: A VMediaRepos wrapper that can be used for create. """ vmr = super(VMediaRepos, cls)._bld(adapter) vmr._name(name) vmr._size(size) return vmr @property def optical_media(self): """Returns a list of the VirtualOpticalMedia devices in the repo.""" seed = self._find_or_seed(_VREPO_OPTICAL_MEDIA_ROOT) return ewrap.WrapperElemList(seed, VOptMedia) @optical_media.setter def optical_media(self, new_media): """Sets the list of VirtualOpticalMedia devices in the repo. :param new_media: The list of new VOptMedia. """ self.replace_list(_VREPO_OPTICAL_MEDIA_ROOT, new_media) @property def name(self): return self._get_val_str(_VREPO_NAME) def _name(self, new_name): self.set_parm_value(_VREPO_NAME, new_name) @property def size(self): """Returns the size in GB (float).""" return self._get_val_float(_VREPO_SIZE) def _size(self, new_size): self.set_float_gb_value(_VREPO_SIZE, new_size) @six.add_metaclass(abc.ABCMeta) @ewrap.Wrapper.base_pvm_type class _VTargetDevMethods(ewrap.Wrapper): """Base class for {storage_type}TargetDevice of an active VSCSIMapping.""" @classmethod def bld(cls, adapter, lua=None, name=None): """Build a new Virtual Target Device. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param lua: (Optional, Default None) Logical Unit Address string to assign to the new VTD. :param name: (Optional, Default None) Name of the TargetDev. If None name will be assigned by the server :return: A new {storage_type}TargetDev, where {storage_type} is appropriate to the subclass. """ vtd = super(_VTargetDevMethods, cls)._bld(adapter) if lua is not None: vtd._lua(lua) if name is not None: vtd._name(name) return vtd @property def lua(self): """Logical Unit Address of the target device.""" return self._get_val_str(_TD_LUA) def _lua(self, val): """Set the Logical Unit Address of this target device.""" self.set_parm_value(_TD_LUA, val) @property def name(self): """Target Name of the device""" return self._get_val_str(_TD_NAME) def _name(self, val): """Set the Target Name of the device""" self.set_parm_value(_TD_NAME, val) @ewrap.ElementWrapper.pvm_type(_TD_LU_TD, has_metadata=True) class LUTargetDev(_VTargetDevMethods, ewrap.ElementWrapper): """SSP Logical Unit Virtual Target Device for a VSCSIMapping.""" pass @ewrap.ElementWrapper.pvm_type(_TD_PV_TD, has_metadata=True) class PVTargetDev(_VTargetDevMethods, ewrap.ElementWrapper): """Physical Volume Virtual Target Device for a VSCSIMapping.""" pass @ewrap.ElementWrapper.pvm_type(_TD_VDISK_TD, has_metadata=True) class VDiskTargetDev(_VTargetDevMethods, ewrap.ElementWrapper): """Virtual Disk (Logical Volume) Target Device for a VSCSIMapping.""" pass @ewrap.ElementWrapper.pvm_type(_TD_VOPT_TD, has_metadata=True) class VOptTargetDev(_VTargetDevMethods, ewrap.ElementWrapper): """Virtual Optical Media Target Device for a VSCSIMapping.""" pass @ewrap.ElementWrapper.pvm_type(VOPT_ROOT, has_metadata=True, child_order=_VOPT_EL_ORDER) class VOptMedia(ewrap.ElementWrapper): """A virtual optical piece of media.""" target_dev_type = VOptTargetDev @classmethod def bld(cls, adapter, name, size=None, mount_type='rw'): """Creates a fresh VOptMedia wrapper. This should be used when adding a new VirtualOpticalMedia device to a VirtualMediaRepository. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param name: The device name. :param size: The device size in GB, decimal precision. :param mount_type: The type of mount. Defaults to RW. Can be set to R :returns: A VOptMedia wrapper that can be used for create. """ vom = super(VOptMedia, cls)._bld(adapter) vom._media_name(name) if size is not None: vom._size(size) vom._mount_type(mount_type) return vom @classmethod def bld_ref(cls, adapter, name): """Creates a VOptMedia wrapper for referencing an existing VOpt.""" vom = super(VOptMedia, cls)._bld(adapter) vom._media_name(name) return vom @property def media_name(self): return self._get_val_str(VOPT_NAME) @property def name(self): """Same as media_name - for consistency with other storage types.""" return self.media_name def _media_name(self, new_name): self.set_parm_value(VOPT_NAME, new_name) @property def size(self): """Size is a float represented in GB.""" return self._get_val_float(_VOPT_SIZE) def _size(self, new_size): self.set_float_gb_value(_VOPT_SIZE, new_size) @property def udid(self): return self._get_val_str(_VOPT_UDID) @property def mount_type(self): return self._get_val_str(_VOPT_MOUNT_TYPE) def _mount_type(self, new_mount_type): self.set_parm_value(_VOPT_MOUNT_TYPE, new_mount_type) @ewrap.Wrapper.base_pvm_type class _StorageQoS(ewrap.Wrapper): """StorageQoS mixin fields/methods common to PV and VDisk.""" @property def read_iops_limit(self): """The device's I/O Read limit""" return self._get_val_int(_STOR_READ_IOPS) @read_iops_limit.setter def read_iops_limit(self, new_read_iops_limit): self.set_parm_value( _STOR_READ_IOPS, new_read_iops_limit, attrib=c.ATTR_KSV170) @property def write_iops_limit(self): """The device's I/O Write limit""" return self._get_val_int(_STOR_WRITE_IOPS) @write_iops_limit.setter def write_iops_limit(self, new_write_iops_limit): self.set_parm_value( _STOR_WRITE_IOPS, new_write_iops_limit, attrib=c.ATTR_KSV170) @ewrap.ElementWrapper.pvm_type(_LUKS_ENCRYPTOR, has_metadata=True, child_order=_LUKS_EL_ORDER) class _LUKSEncryptor(ewrap.ElementWrapper): """An encryption agent that uses Linux Unified Key Setup (LUKS). This class is part of an experimental API change and may be subject to breaking changes until it is publicized. """ @classmethod def bld(cls, adapter, cipher=None, key_size=None, hash_spec=None): """Creates a new LUKSEncryptor wrapper. This can be attached to a disk wrapper during disk create and update operations to specify encryption parameters for the device. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param cipher: A string containing the encryption algorithm, mode, and initialization vector (e.g. aes-xts-plain64). Optional. :param key_size: The key size of the data encryption key. Optional. :param hash_spec: The hash algorithm used for data encryption key derivation. Optional. """ encryptor = super(_LUKSEncryptor, cls)._bld(adapter) if cipher is not None: encryptor.cipher = cipher if key_size is not None: encryptor.key_size = key_size if hash_spec is not None: encryptor.hash_spec = hash_spec return encryptor @property def cipher(self): """Cipher mode for translating between ciphertext and cleartext.""" return self._get_val_str(_LUKS_CIPHER) @cipher.setter def cipher(self, new_cipher): self.set_parm_value(_LUKS_CIPHER, new_cipher, attrib=c.ATTR_KSV170) @property def key_size(self): """Size of the master encryption key used for data encryption.""" return self._get_val_int(_LUKS_KEY_SIZE) @key_size.setter def key_size(self, new_key_size): self.set_parm_value(_LUKS_KEY_SIZE, new_key_size, attrib=c.ATTR_KSV170) @property def hash_spec(self): """Hash algorithm used for key derivation.""" return self._get_val_str(_LUKS_HASH_SPEC) @hash_spec.setter def hash_spec(self, new_hash_spec): self.set_parm_value(_LUKS_HASH_SPEC, new_hash_spec, attrib=c.ATTR_KSV170) @ewrap.Wrapper.base_pvm_type class _StorageEncryption(ewrap.Wrapper): """Encryption properties/methods common to PV and VDisk. The members of this class are part of an experimental API change and may be subject to breaking changes until they are publicized. """ @property def _encryption_state(self): """The device's encryption state This property is part of an experimental API change and may be subject to breaking changes until it is publicized. """ return self._get_val_str(_STOR_ENCRYPTION_STATE) @_encryption_state.setter def _encryption_state(self, new_encryption_state): self.set_parm_value( _STOR_ENCRYPTION_STATE, new_encryption_state, attrib=c.ATTR_KSV170) @property def _encryption_agent(self): """The encryption agent used to encrypt the device. This property is part of an experimental API change and may be subject to breaking changes until it is publicized. """ elem = self._find(_STOR_ENCRYPTION_AGENT) if elem is None: return None agent_elems = list(elem) if len(agent_elems) != 1: return None return ewrap.ElementWrapper.wrap(agent_elems[0]) @_encryption_agent.setter def _encryption_agent(self, new_encryption_agent): agent_elem = ent.Element(_STOR_ENCRYPTION_AGENT, self.adapter, attrib=c.ATTR_KSV170) if new_encryption_agent is not None: agent_elem.inject(new_encryption_agent.element) self.inject(agent_elem) @property def _encryption_key(self): """The encryption key used to format and unlock an encrypted device This property is part of an experimental API change and may be subject to breaking changes until it is publicized. """ return self._get_val_str(_STOR_ENCRYPTION_KEY) @_encryption_key.setter def _encryption_key(self, new_encryption_key): self.set_parm_value( _STOR_ENCRYPTION_KEY, new_encryption_key, attrib=c.ATTR_KSV170) @ewrap.ElementWrapper.pvm_type(PHYS_VOL, has_metadata=True, child_order=_PV_EL_ORDER) class PV(ewrap.ElementWrapper, _StorageQoS, _StorageEncryption): """A physical volume that backs a Volume Group.""" target_dev_type = PVTargetDev @classmethod def bld(cls, adapter, name, udid=None, tag=None, emulate_model=None): """Creates the a fresh PV wrapper. This should be used when wishing to add physical volumes to a Volume Group. Only the name is required. The other attributes are generated from the system. The name matches the device name on the system. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param name: The name of the physical volume on the Virtual I/O Server to add to the Volume Group. Ex. 'hdisk1'. :param udid: Universal Disk Identifier. :param tag: String with which to tag the physical device upon mapping. :param emulate_model: Boolean emulate model alias flag to set on the physical device upon mapping. :returns: An Element that can be used for a PhysicalVolume create or mapping. """ pv = super(PV, cls)._bld(adapter) # Assignment order is significant if udid: pv.udid = udid pv.name = name if tag: pv.tag = tag if emulate_model is not None: pv.emulate_model = emulate_model return pv @property def udid(self): """The unique device id.""" return self._get_val_str(_PV_UDID) @udid.setter def udid(self, new_udid): self.set_parm_value(_PV_UDID, new_udid) @property def capacity(self): """Returns the capacity as an int in MB.""" return self._get_val_int(_PV_VOL_SIZE) @property def name(self): return self._get_val_str(_PV_VOL_NAME) @name.setter def name(self, newname): self.set_parm_value(_PV_VOL_NAME, newname) @property def state(self): return self._get_val_str(_PV_VOL_STATE) @property def is_fc_backed(self): return self._get_val_bool(_PV_FC_BACKED) @property def description(self): return self._get_val_str(_PV_VOL_DESC) @property def loc_code(self): return self._get_val_str(_PV_LOC_CODE) @property def avail_for_use(self): return self._get_val_bool(_PV_AVAIL_FOR_USE) @property def pg83(self): encoded = self._get_val_str(_PV_PG83) # TODO(efried): Temporary workaround until VIOS supports pg83 in Events # >>>CUT HERE>>> if not encoded: # The PhysicalVolume XML doesn't contain the DescriptorPage83 # property. (This could be because the disk really doesn't have # this attribute; but if the caller is asking for pg83, they likely # expect that it should.) More likely, it is because their VIOS is # running at a level which supplies this datum in a fresh inventory # query, but not in a PV ADD Event. In that case, use the # LUARecovery Job to perform the fresh inventory query to retrieve # this value. Since this is expensive, we cache the value. if not hasattr(self, '_pg83_encoded'): # Get the VIOS UUID from the parent_entry of this PV. Raise if # it doesn't exist. if not hasattr(self, 'parent_entry') or not self.parent_entry: raise ex.UnableToBuildPG83EncodingMissingParent( dev_name=self.name) # The parent_entry is either a VG or a VIOS. If a VG, it is a # child of the owning VIOS, so pull out the ROOT UUID of its # href. If a VIOS, we can't count on the href being a root URI, # so pull the target UUID regardless. use_root_uuid = isinstance(self.parent_entry, VG) vio_uuid = u.get_req_path_uuid( self.parent_entry.href, preserve_case=True, root=use_root_uuid) # Local import to prevent circular dependency from pypowervm.tasks import hdisk # Cache the encoded value for performance self._pg83_encoded = hdisk.get_pg83_via_job( self.adapter, vio_uuid, self.udid) encoded = self._pg83_encoded # << 1: # Drop the original primary adapter. new_list.extend(self._get_trunks()[1:]) self.replace_list(SEA_TRUNKS, new_list) def _get_trunks(self): """Returns all of the trunk adapters. The first is the primary adapter. All others are the additional adapters. """ # It is not expected that the API will return the adapters such that # the first is the primary. Yet to reduce complexity in the other # methods that work with the trunks, the returned value from here # will order it as such. trunk_elem_list = [TrunkAdapter.wrap(x) for x in self.element.findall(u.xpath(SEA_TRUNKS, TA_ROOT))] return _order_by_pvid(trunk_elem_list, self.pvid) @property def backing_device(self): """The BackingDeviceChoice for this SEA.""" elem = self.element.find(_SEA_BACKING_DEV) if elem is None: return None return ewrap.ElementWrapper.wrap(elem[0]) def _backing_device(self, eth_back_dev): """The BackingDeviceChoice for this SEA. :param eth_back_dev: The EthernetBackingDevice for this BackingDeviceChoice. """ stor_elem = ent.Element(_SEA_BACKING_DEV, self.adapter, attrib={}, children=[]) stor_elem.inject(eth_back_dev.element) self.inject(stor_elem) @property def control_channel(self): """Returns the control channel interface name. This may be None, indicating the lack of a control channel. Control channels are no longer required for a network bridge to be redundant. """ return self._get_val_str(_SEA_CONTROL_CHANNEL) @property def configuration_state(self): """Returns the configuration state. May be None. Refer to SEAState for valid values. """ return self._get_val_str(_SEA_CONFIGURATION_STATE) def contains_device(self, dev_name): """Returns if one of the child adapters is owned by this SEA. A child adapter is either the primary adapter, control channel, or is one of the additional adapters. :param dev_name: The name of the child device. :return: True if owned by this SEA, False otherwise. """ if self.control_channel == dev_name: return True # If this SEA has no trunk adapters, primary_adpt will be None if self.primary_adpt and (self.primary_adpt.dev_name == dev_name): return True return dev_name in [x.dev_name for x in self.addl_adpts] @ewrap.ElementWrapper.pvm_type('TrunkAdapter', child_order=_TA_EL_ORDER) class TrunkAdapter(ewrap.ElementWrapper): """Represents a Trunk Adapter, either within a LoadGroup or a SEA.""" @classmethod def bld(cls, adapter, pvid, vlan_ids, vswitch, trunk_pri=1): """Create the TrunkAdapter element that can be used for SEA creation. The returned adapter uses the "next available high slot" option, meaning that the API will attempt to assign the next available slot number that's higher than all the existing assigned slot numbers. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param pvid: The primary VLAN ID (ex. 1) for the Network Bridge. :param vlan_ids: Additional VLAN ids for the trunk adapters. :param vswitch: The vswitch wrapper to retrieve ID and href. :param trunk_pri: Trunk priority of this adapter. Defaults to 1. :returns: A new TrunkAdapter ElementWrapper that represents the new TrunkAdapter. """ ta = super(TrunkAdapter, cls)._bld(adapter) ta._required(True) ta.pvid = pvid ta.tagged_vlans = vlan_ids ta.has_tag_support = True if vlan_ids else False ta._vswitch_id(vswitch.switch_id) ta._trunk_pri(trunk_pri) # UseNextAvailableSlotID field - High only if available unasi_field = (_TA_USE_NEXT_AVAIL_HIGH_SLOT if adapter.traits.has_high_slot else _TA_USE_NEXT_AVAIL_SLOT) ta.set_parm_value(unasi_field, u.sanitize_bool_for_api(True)) ta._associated_vswitch_uri(vswitch.related_href) return ta @property def pvid(self): """Returns the Primary VLAN ID of the Trunk Adapter.""" return self._get_val_int(_TA_PVID) @pvid.setter def pvid(self, value): self.set_parm_value(_TA_PVID, value) @property def dev_name(self): """Returns the name of the device as represented by the hosting VIOS. If RMC is down, will not be available. """ return self._get_val_str(_TA_DEV_NAME, 'Unknown') @property def has_tag_support(self): """Does this Trunk Adapter support Tagged VLANs passing through it?""" return self._get_val_bool(_TA_TAG_SUPP) @has_tag_support.setter def has_tag_support(self, new_val): self.set_parm_value(_TA_TAG_SUPP, u.sanitize_bool_for_api(new_val)) @property def tagged_vlans(self): """Returns the tagged VLAN IDs that are allowed to pass through. Assumes has_tag_support() returns True. If not, an empty list will be returned. """ addl_vlans = self._get_val_str(_TA_VLAN_IDS, '') list_data = [] if addl_vlans != '': list_data = [int(i) for i in addl_vlans.split(' ')] def update_list(new_list): data = ' '.join([str(j) for j in new_list]) self.set_parm_value(_TA_VLAN_IDS, data) return ewrap.ActionableList(list_data, update_list) @tagged_vlans.setter def tagged_vlans(self, new_list): data = ' '.join([str(i) for i in new_list]) self.set_parm_value(_TA_VLAN_IDS, data) @property def vswitch_id(self): """Returns the virtual switch identifier.""" return self._get_val_int(_TA_VS_ID) def _vswitch_id(self, value): self.set_parm_value(_TA_VS_ID, value) @property def trunk_pri(self): """Returns the trunk priority of the adapter.""" return self._get_val_int(_TA_TRUNK_PRI) def _trunk_pri(self, value): self.set_parm_value(_TA_TRUNK_PRI, value) def _required(self, value): self.set_parm_value(_TA_REQUIRED, u.sanitize_bool_for_api(value)) @property def virtual_slot_number(self): """Returns the virtual slot number for this adapter.""" return self._get_val_int(_TA_VIRTUAL_SLOT) @property def associated_vswitch_uri(self): """Returns the associated vswitch href.""" return self.get_href(u.xpath(_TA_ASSOC_VSWITCH, c.LINK), one_result=True) def _associated_vswitch_uri(self, href): self.set_href(u.xpath(_TA_ASSOC_VSWITCH, c.LINK), href) @property def varied_on(self): """Returns the VariedOn property.""" return self._get_val_bool(_TA_VARIED_ON) @property def loc_code(self): """Returns the LocationCode property.""" return self._get_val_str(_TA_LOC_CODE) @property def vios_id(self): """Determines and returns the VIOS ID from the loc_code. :return: int representing the short ID of the associated VIOS. """ return u.part_id_by_loc_code(self.loc_code) @ewrap.ElementWrapper.pvm_type('LoadGroup', has_metadata=True) class LoadGroup(ewrap.ElementWrapper): """Load Group (how the I/O load should be distributed) for a Network Bridge. If using failover or load balancing, then the Load Group will have pairs of Trunk Adapters, each with their own unique Trunk Priority. """ @classmethod def bld(cls, adapter, pvid, vnet_uris): """Create the LoadGroup element that can be used for a create operation. This is used when adding a Load Group to a NetBridge. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param pvid: The primary VLAN ID (ex. 1) for the Load Group. :param vnet_uris: The virtual network URI list (mapping to each additional VLAN/vswitch combo). :returns: A new LoadGroup ElementWrapper that represents the new LoadGroup. """ lg = super(LoadGroup, cls)._bld(adapter) lg._pvid(pvid) lg.vnet_uri_list.extend(vnet_uris) return lg @classmethod def wrap(cls, element, **kwargs): wrap = super(LoadGroup, cls).wrap(element) # If created from a Network Bridge this will be set. Else it will # be None (ex. crt_load_group method) wrap._nb_root = kwargs.get('nb_root') return wrap @property def pvid(self): """Returns the Primary VLAN ID of the Load Group.""" return self._get_val_int(_LG_PVID) def _pvid(self, new_pvid): self.set_parm_value(_LG_PVID, new_pvid) @property def trunk_adapters(self): """Returns the Trunk Adapters for the Load Group. There is either one (no redundancy/wrap balancing) or two (typically the case in a multi VIOS scenario). :return: list of TrunkAdapter objects. """ return ewrap.WrapperElemList(self.element.find(_LG_TRUNKS), TrunkAdapter) @trunk_adapters.setter def trunk_adapters(self, new_list): self.replace_list(_LG_TRUNKS, new_list) @property def vnet_uri_list(self): """Returns a list of the Virtual Network URIs. If the vnet_aware trait (see traits.py) is set, then the addition of VLANs is driven via virtual networks rather than straight VLAN modification. This uri list is what drives the modification. If the trait is set to false, then the modification should be driven via the trunk adapters on the SEA directly. This list will also be empty. The task classes (cna.py and network_bridger.py) should abstract the user away from these deviations in traits. """ uri_resp_list = list(self.get_href(u.xpath(_LG_VNETS, c.LINK))) return ewrap.ActionableList(uri_resp_list, self.__update_uri_list) @vnet_uri_list.setter def vnet_uri_list(self, new_list): self.__update_uri_list(new_list) def __update_uri_list(self, new_list): new_vnet_elem = self._bld_link_list(_VSW_VIRT_NETS, new_list) old_elems = self.element.find(_LG_VNETS) # This is a bug where the API isn't returning vnets if just a PVID # on additional VEA if old_elems is not None: self.element.replace(old_elems, new_vnet_elem) else: self.element.append(new_vnet_elem) # If the Network Bridge was set, tell it to rebuild its VirtualNetwork # list. try: self._nb_root._rebuild_vnet_list() except AttributeError: # Network Bridge was not set - ignore pass @property def tagged_vlans(self): """The VLANs supported by this Load Group. Does not include PVID.""" return self.trunk_adapters[0].tagged_vlans @ewrap.EntryWrapper.pvm_type('VirtualNetwork') class VNet(ewrap.EntryWrapper): """The overall definition of a VLAN network within the hypervisor.""" @classmethod def bld(cls, adapter, name, vlan_id, vswitch_uri, tagged): """Creates a VirtualNetwork that can be used for a create operation. This is used when creating a new Virtual Network within the system :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param name: The name for the virtual network. :param vlan_id: The VLAN identifier (1 to 4094) for the network. :param vswitch_uri: The URI that points to the appropriate vSwitch. :param tagged: True if packets should have VLAN tags when they leave the system. False if tags should only be on the packets while in the system (but tag-less when on the physical network). :returns: The ElementWrapper that represents the new VirtualNetwork. """ vnet = super(VNet, cls)._bld(adapter) # Assignment order matters vnet.associated_switch_uri = vswitch_uri vnet.name = name vnet.vlan = vlan_id vnet.tagged = tagged return vnet @property def associated_switch_uri(self): return self.get_href(_VNET_ASSOC_SW, one_result=True) @associated_switch_uri.setter def associated_switch_uri(self, uri): self.set_href(_VNET_ASSOC_SW, uri) @property def name(self): return self._get_val_str(_VNET_NET_NAME) @name.setter def name(self, value): self.set_parm_value(_VNET_NET_NAME, value) @property def vlan(self): return self._get_val_int(_VNET_VLAN_ID) @vlan.setter def vlan(self, vlan_id): self.set_parm_value(_VNET_VLAN_ID, vlan_id) @property def vswitch_id(self): """The vSwitch identifier (int). 0 through 15 (max number vSwitches). Is not a UUID. """ return self._get_val_int(_VNET_SW_ID) @property def tagged(self): """If True, the VLAN tag is preserved when the packet leaves system.""" return self._get_val_bool(_VNET_TAG) @tagged.setter def tagged(self, is_tagged): self.set_parm_value(_VNET_TAG, u.sanitize_bool_for_api(is_tagged)) @ewrap.EntryWrapper.pvm_type('ClientNetworkAdapter', child_order=_VADPT_EL_ORDER) class CNA(ewrap.EntryWrapper): """Wrapper object for ClientNetworkAdapter schema.""" @classmethod def bld(cls, adapter, pvid, vswitch_href, slot_num=None, mac_addr=None, addl_tagged_vlans=None, trunk_pri=None, dev_name=None, ovs_bridge=None, ovs_ext_ids=None, configured_mtu=None): """Creates a fresh CNA EntryWrapper. This is used when creating a new CNA for a partition. This can be PUT to LogicalPartition//ClientNetworkAdapter or to VirtualIOServer//ClientNetworkAdapter. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param pvid: The Primary VLAN ID to use. :param vswitch_href: The URI that points to the Virtual Switch that will support this adapter. :param slot_num: The Slot on the Client LPAR that should be used. This defaults to 'None', which means the next available high slot will be used (the API will attempt to assign the next available slot number that's higher than all the existing assigned slot numbers. :param mac_addr: Optional user specified mac address to use. If left as None, the system will generate one. :param addl_tagged_vlans: A set of additional tagged VLANs that can be passed through this adapter (with client VLAN adapters). Input should be a list of int (or int string) Example: [51, 52, 53] Note: The limit is ~18 additional VLANs :param trunk_pri: Optional TrunkPriority integer that, if specified, will create this wrapper as a trunk. :param dev_name: (Optional, Default: None) Can only be set if the CNA is being created against the Management VM. Can be used to specify what the device name should be for the CNA on the Management VM. Ignored for all other LPAR types. :param ovs_bridge: (Optional, Default: None) If hosting through mgmt partition, this attribute specifies which Open vSwitch to connect to. This assumes that Open vSwitch is installed and active on the mgmt partition. :param ovs_ext_ids: (Optional, Default: None) A comma-delimited list of key=value pairs in string format. Ex. iface-id=abc123,iface-status=active This sets a dictionary of values on the Interface element within Open vSwitch. This assumes that Open vSwitch is installed and active on the mgmt partition. :param configured_mtu: (Optional, Default: None) Sets the MTU on the adapter. May only be valid if adapter is being created against mgmt partition. :returns: A CNA EntryWrapper that can be used for create. """ cna = super(CNA, cls)._bld(adapter) # Assignment order matters if slot_num is not None: cna._slot(slot_num) else: cna._use_next_avail_slot_id = True if mac_addr is not None: cna.mac = mac_addr # The primary VLAN ID cna.pvid = pvid # Additional VLANs if addl_tagged_vlans is not None: cna.tagged_vlans = addl_tagged_vlans cna.is_tagged_vlan_supported = True else: cna.is_tagged_vlan_supported = False # vSwitch URI cna.vswitch_uri = vswitch_href # Set the device name if not None if dev_name: cna._dev_name(dev_name) if ovs_bridge is not None: cna.ovs_bridge = ovs_bridge if ovs_ext_ids is not None: cna.ovs_ext_ids = ovs_ext_ids if configured_mtu is not None: cna.configured_mtu = configured_mtu # If a trunk priority is specified, set it. It will make this CNA # build out a trunk adapter. However, if it is not specified, we # do not want to set it as we don't want to include the element in # the payload we send for the CNA creation. if trunk_pri: cna._trunk_pri(trunk_pri) return cna def create(self, parent_type=None, parent_uuid=None, timeout=-1, parent=None, **kwargs): """Override to ensure default slot setting is correct. Create the CNA as specified *except*: If UseNextAvailableHighSlot is True (i.e. slot number was not given); and the parent is a VIOS or the management partition; then change to UseNextAvailableSlot (not High). This is because VIOS and the management partition don't care about slot ordering, and their longevity increases the probability of running out of slot space if we use 'High'. :param parent_type: See superclass. :param parent_uuid: See superclass. :param timeout: See superclass. :param parent: See superclass. """ # These checks are quick, so do them first. el2d = self._find(_TA_USE_NEXT_AVAIL_HIGH_SLOT) # If UseNextAvailableHighSlot is present *and* True. if el2d is not None and self._use_next_avail_slot_id: # If we have the parent wrapper, we don't have to GET. Otherwise... if parent is None: if any(val is None for val in (parent_type, parent_uuid)): raise ValueError(_("Invalid parent spec for CNA.create.")) # If the parent_type isn't a wrapper class, get it if type(parent_type) is str: parent_type = ewrap.Wrapper._pvm_object_registry[ parent_type]['entry'] # Aaaand get the parent parent = parent_type.get(self.adapter, uuid=parent_uuid) # Now we can find out whether the parent is VIOS or mgmt. if parent.env == bp.LPARType.VIOS or parent.is_mgmt_partition: # Delete the existing UNAHSI field self.element.remove(el2d) # Aaaand add the UNASI field self.set_parm_value(_TA_USE_NEXT_AVAIL_SLOT, u.sanitize_bool_for_api(True)) # Superclass does the real work. return super(CNA, self).create( parent_type=parent_type, parent_uuid=parent_uuid, timeout=timeout, parent=parent, **kwargs) @property def slot(self): return self._get_val_int(_VADPT_SLOT_NUM) def _slot(self, sid): self.set_parm_value(_VADPT_SLOT_NUM, sid) @property def lpar_id(self): """Returns the Local Partition ID for this adapter.""" return self._get_val_int(_VADPT_LOC_PART_ID) @property def _use_next_avail_slot_id(self): """Use next available (high) slot ID, true or false.""" # We could be using either next-available-slot field. If either is set, # it counts. return any(self._get_val_bool(unasi_field) for unasi_field in (_TA_USE_NEXT_AVAIL_HIGH_SLOT, _TA_USE_NEXT_AVAIL_SLOT)) @_use_next_avail_slot_id.setter def _use_next_avail_slot_id(self, unasi): """Use next available (high) slot ID. :param unasi: Boolean value to set (True or False) """ # NOTE(efried): We'd like to set this to not-HIGH for VIOS/mgmt, but in # bld(), we don't know what kind of parent we have. unasi_field = (_TA_USE_NEXT_AVAIL_HIGH_SLOT if self.traits.has_high_slot else _TA_USE_NEXT_AVAIL_SLOT) self.set_parm_value(unasi_field, u.sanitize_bool_for_api(unasi)) @property def mac(self): """Returns the Mac Address for the adapter. Typical format would be: AABBCCDDEEFF The API returns a format with no colons and is upper cased. """ return self._get_val_str(_VADPT_MAC_ADDR) @property def vsi_type_id(self): """Returns the virtual station interface type id.""" return self._get_val_str(_VADPT_VSI_TYPE_ID) @property def vsi_type_version(self): """Returns the virtual station interface version.""" return self._get_val_str(_VADPT_VSI_TYPE_VERSION) @property def vsi_type_manager_id(self): """Returns the virtual station interface manager id.""" return self._get_val_str(_VADPT_VSI_MANAGER_ID) @mac.setter def mac(self, new_val): new_mac = u.sanitize_mac_for_api(new_val) self.set_parm_value(_VADPT_MAC_ADDR, new_mac) @property def pvid(self): """Returns the Port VLAN ID (int value).""" return self._get_val_int(_VADPT_PVID) @pvid.setter def pvid(self, new_val): self.set_parm_value(_VADPT_PVID, new_val) @property def enabled(self): """Returns the enabled state (boolean value). A CNA is always created enabled=true. However, certain migration operations of an LPAR (ex. migration via OpenStack when using Open vSwitch) will cause the client's CNA to be disabled. This method can be used to check the state of the adapter. """ return self._get_val_bool(_VADPT_ENABLED) @enabled.setter def enabled(self, new_val): self.set_parm_value(_VADPT_ENABLED, u.sanitize_bool_for_api(new_val)) @property def loc_code(self): """The device's location code.""" return self._get_val_str(_VADPT_LOCATION_CODE) @property def tagged_vlans(self): """Returns a list of additional VLANs on this adapter. Only valid if tagged vlan support is on. """ addl_vlans = self._get_val_str(_VADPT_TAGGED_VLANS, '') list_data = [] if addl_vlans != '': list_data = [int(i) for i in addl_vlans.split(' ')] def update_list(new_list): data = ' '.join([str(j) for j in new_list]) self.set_parm_value(_VADPT_TAGGED_VLANS, data) return ewrap.ActionableList(list_data, update_list) @tagged_vlans.setter def tagged_vlans(self, new_list): data = ' '.join([str(i) for i in new_list]) self.set_parm_value(_VADPT_TAGGED_VLANS, data) @property def is_tagged_vlan_supported(self): """Returns if addl tagged VLANs are supported (bool value).""" return self._get_val_bool(_VADPT_TAGGED_VLAN_SUPPORT) @is_tagged_vlan_supported.setter def is_tagged_vlan_supported(self, new_val): """Parameter new_val is a bool (True or False).""" self.set_parm_value(_VADPT_TAGGED_VLAN_SUPPORT, u.sanitize_bool_for_api(new_val)) @property def vswitch_uri(self): """Returns the URI for the associated vSwitch.""" return self.get_href(u.xpath(_VADPT_VSWITCH, c.LINK), one_result=True) @vswitch_uri.setter def vswitch_uri(self, new_val): self.set_href(u.xpath(_VADPT_VSWITCH, c.LINK), new_val) @property def vswitch_id(self): """Returns the ID (typically 0-15) for the virtual switch.""" return self._get_val_int(_VADPT_VSWITCH_ID) @property def dev_name(self): """Returns the name of the device (if available). If RMC is down, will not be available. """ return self._get_val_str(_VADPT_DEV_NAME, 'Unknown') def _dev_name(self, value): """Sets the device name. This is only available for devices running on the Management VM. If set for any other LPARs, it will be ignored. :param value: The device name. """ self.set_parm_value(_VADPT_DEV_NAME, value) @property def trunk_pri(self): """Returns the Trunk Priority for the adapter. :returns: None if this is not a Trunk Adapter, priority otherwise. """ return self._get_val_int(_TA_TRUNK_PRI) def _trunk_pri(self, new_val): self.set_parm_value(_TA_TRUNK_PRI, new_val) @property def is_trunk(self): """Returns if this adapter was created with a trunk priority. If the adapter was created without a trunk priority, it is just a client network adapter. However, if it was given a trunk priority on creation, it is a wrapper for a trunk adapter. """ return self.trunk_pri is not None @property def configured_mtu(self): """The MTU of the adapter. May only be valid if adapter is being created against mgmt partition. """ return self._get_val_int(_VADPT_MTU) @configured_mtu.setter def configured_mtu(self, value): self.set_parm_value(_VADPT_MTU, value, attrib=c.ATTR_KSV160) @property def ovs_bridge(self): """The Open vSwitch bridge it is connected to. Otherwise None.""" return self._get_val_str(_VADPT_OVS_BRIDGE) @ovs_bridge.setter def ovs_bridge(self, value): self.set_parm_value(_VADPT_OVS_BRIDGE, value, attrib=c.ATTR_KSV160) @property def ovs_ext_ids(self): """If connected to an Open vSwitch, returns the external ids. This is a comma-delimited list of key=value pairs. Ex: 'iface-id=123asdf,iface-status=active' This maps directly to the Open vSwitch Interface object's 'external_id' field. """ return self._get_val_str(_VADPT_OVS_EXT_IDS) @ovs_ext_ids.setter def ovs_ext_ids(self, value): self.set_parm_value(_VADPT_OVS_EXT_IDS, value, attrib=c.ATTR_KSV160) @ewrap.Wrapper.xag_property(c.XAG.ADV) def ip_address(self): """Returns the IP Address of the network interface. Typical format would be: 255.255.255.255 (IPv4) and ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (IPv6) or other short forms of IPv6 address """ return self._get_val_str(_VADPT_IP_ADDR) @ewrap.Wrapper.xag_property(c.XAG.ADV) def subnet_mask(self): """Returns the subnet mask of the network interface. Typical format would be: 255.255.255.0 (IPv4) and ffff:ffff:ffff:ffff:: (IPv6) or other forms of IPv6 address """ return self._get_val_str(_VADPT_SUBNET_MASK) @ewrap.Wrapper.xag_property(c.XAG.ADV) def gateway(self): """Returns the gateway of the network interface. Typical format would be: 10.0.0.1 (IPv4) and cafe::1 (IPv6) or other forms of IPv6 address """ return self._get_val_str(_VADPT_GATEWAY) @ewrap.ElementWrapper.pvm_type(_SEA_ETH_BACK_DEV, has_metadata=True, child_order=_SEA_EBD_ORDER) class EthernetBackingDevice(ewrap.ElementWrapper): """Represents the SEA EthernetBackingDevice.""" @classmethod def bld(cls, adapter, dev_name): """Creates the EthernetBackingDevice element. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param dev_name: The device name (e.g. eth0). :returns: The EthernetBackingDevice element for SEAs. """ cfg = super(EthernetBackingDevice, cls)._bld(adapter) cfg._dev_name(dev_name) # This is required by the schema, setting it to 1 # just for legacy support. cfg._adapter_id(1) return cfg @property def dev_name(self): return self._get_val_str(_SEA_DEV_NAME) def _dev_name(self, dev_name): self.set_parm_value(_SEA_DEV_NAME, str(dev_name)) @property def adapter_id(self): return self._get_val_int(_SEA_EBD_ADAPTER_ID) def _adapter_id(self, value): # TODO(IBM) remove this once the schema no longer requires it. return self.set_parm_value(_SEA_EBD_ADAPTER_ID, value) pypowervm-1.1.24/pypowervm/wrappers/cluster.py0000664000175000017500000001704013571367171021220 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """EntryWrappers for Cluster and its subelements.""" from oslo_log import log as logging import pypowervm.util as u import pypowervm.wrappers.entry_wrapper as ewrap import pypowervm.wrappers.mtms as mtmwrap import pypowervm.wrappers.storage as stor LOG = logging.getLogger(__name__) # Cluster Constants _CL_NAME = 'ClusterName' _CL_ID = 'ClusterID' _CL_REPOPVS = 'RepositoryDisk' # Yes, really _CL_PV = stor.PHYS_VOL _CL_SSP_LINK = 'ClusterSharedStoragePool' _CL_NODES = 'Node' # Yes, really _CL_NODE = 'Node' _CL_CAPABILITY = 'ClusterCapabilities' _CL_EL_ORDER = (_CL_NAME, _CL_ID, _CL_REPOPVS, _CL_SSP_LINK, _CL_NODE, _CL_CAPABILITY) # Node Constants _N_HOSTNAME = 'HostName' _N_LPARID = 'PartitionID' _N_NAME = 'PartitionName' _N_VIOS_LEVEL = 'VirtualIOServerLevel' _N_VIOS_LINK = 'VirtualIOServer' _N_IPADDR = 'IPAddress' _N_STATE = 'State' _N_EL_ORDER = (_N_HOSTNAME, _N_LPARID, _N_NAME, mtmwrap.MTMS_ROOT, _N_VIOS_LEVEL, _N_VIOS_LINK, _N_IPADDR, _N_STATE) class NodeState(object): """Cluster node state, from NodeState.Enum.""" UP = 'Up' DOWN = 'Down' UNKNOWN = 'Unknown' @ewrap.EntryWrapper.pvm_type('Cluster', child_order=_CL_EL_ORDER) class Cluster(ewrap.EntryWrapper): """A Cluster behind a SharedStoragePool.""" search_keys = dict(name='ClusterName') @classmethod def bld(cls, adapter, name, repos_pv, first_node): """Create a fresh Cluster EntryWrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param name: String name for the Cluster. :param repos_pv: storage.PV representing the repository disk. :param first_node: Node wrapper representing the first VIOS to host the Cluster. (The Cluster Create API only accepts a single node; others must be added later.) The VIOS must be able to see each disk. """ clust = cls._bld(adapter) clust.repos_pv = repos_pv clust.nodes = [first_node] clust._name(name) return clust @property def name(self): return self._get_val_str(_CL_NAME) def _name(self, newname): self.set_parm_value(_CL_NAME, newname) @property def id(self): """The string ID according to VIOS, not a UUID or UDID.""" return self._get_val_str(_CL_ID) @property def ssp_uri(self): """The URI of the SharedStoragePool associated with this Cluster.""" return self.get_href(_CL_SSP_LINK, one_result=True) @property def ssp_uuid(self): """The UUID of the SharedStoragePool associated with this Cluster.""" uri = self.ssp_uri if uri is not None: return u.get_req_path_uuid(uri) @property def repos_pv(self): """Returns the (one) repository PV. Although the schema technically allows a collection of PVs under the RepositoryDisk element, a Cluster always has exactly one repository PV. """ repos_elem = self._find_or_seed(_CL_REPOPVS) pv_list = repos_elem.findall(_CL_PV) # Check only relevant when building up a Cluster wrapper internally if pv_list and len(pv_list) == 1: return stor.PV.wrap(pv_list[0]) return None @repos_pv.setter def repos_pv(self, pv): """Set the (single) PV member of RepositoryDisk. You cannot change the repository disk of a live Cluster. This setter is useful only when constructing new Clusters. :param pv: The PV (NOT a list) to set. """ self.replace_list(_CL_REPOPVS, [pv]) @property def nodes(self): """WrapperElemList of Node wrappers.""" return ewrap.WrapperElemList(self._find_or_seed(_CL_NODES), Node) @nodes.setter def nodes(self, ns): self.replace_list(_CL_NODES, ns) @ewrap.ElementWrapper.pvm_type('Node', has_metadata=True, child_order=_N_EL_ORDER) class Node(ewrap.ElementWrapper): """A Node represents a VIOS member of a Cluster. A Cluster cannot simply contain VirtualIOServer links because it is likely that some of the Cluster's members are not managed by the same instance of the PowerVM REST server, which would then have no way to construct said links. In such cases, the Node object supplies enough information about the VIOS that it could be found by a determined consumer. To add a new Node to a Cluster, only the hostname is required. n = Node() n.hostname = ... cluster.nodes.append(n) adapter.update(...) """ @classmethod def bld(cls, adapter, hostname=None, lpar_id=None, mtms=None, vios_uri=None): """Create a fresh Node ElementWrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param hostname: String hostname (or IP) of the Node. :param lpar_id: Integer LPAR ID of the Node. :param mtms: String OR mtms.MTMS wrapper representing the Machine Type, Model, and Serial Number of the system hosting the VIOS. String format: 'MT-M*S' e.g. '8247-22L*1234A0B'. :param vios_uri: String URI representing this Node. """ node = cls._bld(adapter) if vios_uri: node._vios_uri(vios_uri) if lpar_id: node._lpar_id(lpar_id) if mtms: node._mtms(mtms) if hostname: node._hostname(hostname) return node @property def hostname(self): return self._get_val_str(_N_HOSTNAME) def _hostname(self, hn): self.set_parm_value(_N_HOSTNAME, hn) @property def lpar_id(self): """Small integer partition ID, not UUID.""" return self._get_val_int(_N_LPARID) def _lpar_id(self, new_lpar_id): self.set_parm_value(_N_LPARID, str(new_lpar_id)) @property def mtms(self): """MTMS Element wrapper of the system hosting the Node (VIOS).""" return mtmwrap.MTMS.wrap(self._find(mtmwrap.MTMS_ROOT)) def _mtms(self, new_mtms): """Sets the MTMS of the Node. :param new_mtms: May be either a string of the form 'MT-M*S' or a mtms.MTMS ElementWrapper. """ if not isinstance(new_mtms, mtmwrap.MTMS): new_mtms = mtmwrap.MTMS.bld(self.adapter, new_mtms) self.inject(new_mtms.element) @property def vios_uri(self): """The URI of the VIOS. This is only set if the VIOS is on this system! """ return self.get_href(_N_VIOS_LINK, one_result=True) def _vios_uri(self, new_uri): self.set_href(_N_VIOS_LINK, new_uri) @property def vios_uuid(self): """The UUID of the Node (VIOS). This is only set if the VIOS is on this system! """ uri = self.vios_uri if uri is not None: return u.get_req_path_uuid(uri, preserve_case=True) @property def state(self): return self._get_val_str(_N_STATE) pypowervm-1.1.24/pypowervm/wrappers/virtual_io_server.py0000664000175000017500000010262013571367171023301 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers for VirtualIOServer and virtual storage mapping elements.""" import abc import copy import functools import re import six from oslo_log import log as logging import pypowervm.const as c import pypowervm.entities as ent from pypowervm.i18n import _ import pypowervm.util as u import pypowervm.wrappers.base_partition as bp import pypowervm.wrappers.entry_wrapper as ewrap import pypowervm.wrappers.iocard as card import pypowervm.wrappers.logical_partition as lpar import pypowervm.wrappers.managed_system as ms import pypowervm.wrappers.network as net import pypowervm.wrappers.storage as stor LOG = logging.getLogger(__name__) # VIO Constants _VIO_API_CAP = 'APICapable' _VIO_VNIC_CAP = 'IsVNICCapable' _VIO_VNIC_FAILOVER_CAP = 'VNICFailOverCapable' _VIO_SVR_INST_CFG = 'ServerInstallConfiguration' _VIO_LNAGGS = 'LinkAggregations' _VIO_MGR_PASSTHRU_CAP = 'ManagerPassthroughCapable' _VIO_MEDIA_REPOS = 'MediaRepositories' _VIO_MVR_SVC_PARTITION = 'MoverServicePartition' _VIO_NET_BOOT_DEVS = 'NetworkBootDevices' _VIO_PAGING_SVC_PARTITION = 'PagingServicePartition' _VIO_PVS = stor.PVS _VIO_SEAS = net.NB_SEAS _VIO_SSP_CAP = 'SharedStoragePoolCapable' _VIO_SSP_VER = 'SharedStoragePoolVersion' _VIO_STOR_POOLS = 'StoragePools' _VIO_TRUNK_ADPTS = net.SEA_TRUNKS _VIO_LICENSE = 'VirtualIOServerLicense' _VIO_LICENSE_ACCEPTED = 'VirtualIOServerLicenseAccepted' _VIO_VFC_MAPPINGS = 'VirtualFibreChannelMappings' _VIO_VSCSI_MAPPINGS = 'VirtualSCSIMappings' _VIO_FREE_IO_ADPTS_FOR_LNAGG = 'FreeIOAdaptersForLinkAggregation' # "FreeEthernetBackingDevicesForSEA" is really misspelled in the schema. _VIO_FREE_ETH_BACKDEVS_FOR_SEA = 'FreeEthenetBackingDevicesForSEA' _VIO_VNIC_BACKDEVS = 'VirtualNICBackingDevices' _VIO_CAPS = 'VirtualIOServerCapabilities' _VIO_VSCSI_BUS = 'VirtualSCSIBus' _VOL_UID = 'VolumeUniqueID' _VOL_NAME = 'VolumeName' _RESERVE_POLICY = 'ReservePolicy' _IO_ADPT_CHOICE = 'IOAdapterChoice' _IO_ADPT = 'IOAdapter' _IO_LINK_AGG_ADPT_ID = 'AdapterID' _IO_LINK_AGG_DESC = 'Description' _IO_LINK_AGG_DEV_NAME = 'DeviceName' _IO_LINK_AGG_DEV_TYPE = 'DeviceType' _IO_LINK_AGG_DRC_NAME = 'DynamicReconfigurationConnectorName' _IO_LINK_AGG_PHYS_LOC = 'PhysicalLocation' _IO_LINK_AGG_UDID = 'UniqueDeviceID' _VIRT_MEDIA_REPOSITORY_PATH = u.xpath(_VIO_MEDIA_REPOS, 'VirtualMediaRepository') _IF_ADDR = u.xpath('IPInterface', 'IPAddress') _ETHERNET_BACKING_DEVICE = u.xpath(_VIO_FREE_ETH_BACKDEVS_FOR_SEA, 'IOAdapterChoice', net.ETH_BACK_DEV) _SEA_PATH = u.xpath(_VIO_SEAS, net.SHARED_ETH_ADPT) # Mapping Constants _MAP_STORAGE = 'Storage' _MAP_TARGET_DEV = 'TargetDevice' _MAP_CLIENT_LPAR = 'AssociatedLogicalPartition' _MAP_PORT = 'Port' _MAP_ORDER = (_MAP_CLIENT_LPAR, stor.CLIENT_ADPT, stor.SERVER_ADPT, _MAP_STORAGE) _VFC_MAP_ORDER = (_MAP_CLIENT_LPAR, stor.CLIENT_ADPT, _MAP_PORT, stor.SERVER_ADPT, _MAP_STORAGE) # VSCSI Bus Constants _BUS_ASSOC_MAPS = 'AssociatedMappings' _BUS_EL_ORDER = (_MAP_CLIENT_LPAR, stor.CLIENT_ADPT, stor.SERVER_ADPT, _BUS_ASSOC_MAPS) # VSCSI Storage/Target Device Constants _STDEV_EL_ORDER = (_MAP_STORAGE, _MAP_TARGET_DEV) _WWPNS_PATH = u.xpath(_VIO_VFC_MAPPINGS, 'VirtualFibreChannelMapping', stor.CLIENT_ADPT, 'WWPNs') _PVS_PATH = u.xpath(stor.PVS, stor.PHYS_VOL) _VIOS_EL_ORDER = bp.BP_EL_ORDER + ( _VIO_API_CAP, _VIO_VNIC_CAP, _VIO_VNIC_FAILOVER_CAP, _VIO_SVR_INST_CFG, _VIO_LNAGGS, _VIO_MGR_PASSTHRU_CAP, _VIO_MEDIA_REPOS, _VIO_MVR_SVC_PARTITION, _VIO_NET_BOOT_DEVS, _VIO_PAGING_SVC_PARTITION, _VIO_PVS, _VIO_SEAS, _VIO_SSP_CAP, _VIO_SSP_VER, _VIO_STOR_POOLS, _VIO_TRUNK_ADPTS, _VIO_LICENSE, _VIO_LICENSE_ACCEPTED, _VIO_VFC_MAPPINGS, _VIO_VSCSI_MAPPINGS, _VIO_FREE_IO_ADPTS_FOR_LNAGG, _VIO_FREE_ETH_BACKDEVS_FOR_SEA, _VIO_VNIC_BACKDEVS, _VIO_CAPS, _VIO_VSCSI_BUS) LinkAggrIOAdapterChoice = card.LinkAggrIOAdapterChoice class _VIOSXAGs(object): """Extended attribute groups relevant to Virtual I/O Server. DEPRECATED. Use pypowervm.const.XAG and pypowervm.util.xag_attrs(). """ @functools.total_ordering class _Handler(object): def __init__(self, name): self.name = name self.attrs = u.xag_attrs(name) def __str__(self): return self.name def __eq__(self, other): if type(other) is str: return self.name == other return self.name == other.name def __lt__(self, other): if type(other) is str: return self.name < other return self.name < other.name def __hash__(self): return hash(self.name) _vals = dict( NETWORK=_Handler(c.XAG.VIO_NET), STORAGE=_Handler(c.XAG.VIO_STOR), SCSI_MAPPING=_Handler(c.XAG.VIO_SMAP), FC_MAPPING=_Handler(c.XAG.VIO_FMAP)) def __getattr__(self, item): if item in self._vals: import warnings warnings.warn(_("The 'xags' property of the VIOS EntryWrapper " "class is deprecated! Please use values from " "pypowervm.const.XAG instead."), DeprecationWarning) return self._vals[item] @ewrap.EntryWrapper.pvm_type('VirtualIOServer', child_order=_VIOS_EL_ORDER) class VIOS(bp.BasePartition): # DEPRECATED. Use pypowervm.const.XAG and pypowervm.util.xag_attrs(). xags = _VIOSXAGs() @classmethod def bld(cls, adapter, name, mem_cfg, proc_cfg, io_cfg=None): """Creates a new VIOS wrapper.""" return super(VIOS, cls)._bld_base(adapter, name, mem_cfg, proc_cfg, env=bp.LPARType.VIOS, io_cfg=io_cfg) @ewrap.Wrapper.xag_property(c.XAG.VIO_STOR) def media_repository(self): return self.element.find(_VIRT_MEDIA_REPOSITORY_PATH) def get_vfc_wwpns(self): """Returns a list of the virtual FC WWPN pairs for the vios. The response is a List of Lists. Ex. (('c05076065a8b005a', 'c05076065a8b005b'), ('c05076065a8b0060', 'c05076065a8b0061')) Note: ViosFCMapping extended attribute is required. """ return set([frozenset(x.split()) for x in self._get_vals(_WWPNS_PATH)]) def get_pfc_wwpns(self): """Returns a set of the Physical FC Adapter WWPNs on this VIOS.""" path = u.xpath(bp.IO_CFG_ROOT, bp.IO_SLOTS_ROOT, bp.IO_SLOT_ROOT, bp.ASSOC_IO_SLOT_ROOT, bp.RELATED_IO_ADPT_ROOT, bp.IO_PFC_ADPT_ROOT, bp.PFC_PORTS_ROOT, bp.PFC_PORT_ROOT, bp.PFC_PORT_WWPN) return set(self._get_vals(path)) def get_active_pfc_wwpns(self): """Returns a set of Physical FC Adapter WWPNs of 'active' ports.""" # The logic to check for active ports is poor. Right now it only # checks if the port has NPIV connections available. If there is a # FC, non-NPIV card...then this logic fails. # # This will suffice until the backing API adds more granular logic. return [pfc.wwpn for pfc in self.pfc_ports if pfc.npiv_total_ports > 0] @property def pfc_ports(self): """The physical Fibre Channel ports assigned to the VIOS.""" path = u.xpath(bp.IO_CFG_ROOT, bp.IO_SLOTS_ROOT, bp.IO_SLOT_ROOT, bp.ASSOC_IO_SLOT_ROOT, bp.RELATED_IO_ADPT_ROOT, bp.IO_PFC_ADPT_ROOT, bp.PFC_PORTS_ROOT, bp.PFC_PORT_ROOT) elems = self._find(path, use_find_all=True) resp = [] for elem in elems: resp.append(bp.PhysFCPort.wrap(elem)) return resp @property def is_license_accepted(self): return self._get_val_bool(_VIO_LICENSE_ACCEPTED, default=True) def hdisk_reserve_policy(self, disk_uuid): """Get the reserve policy for an hdisk. :param disk_uuid: The uuid of the hdisk. :returns: The reserve policy or None if the disk isn't found. """ policy = None # Get all the physical volume elements and look for a diskname match volumes = self.element.findall(_PVS_PATH) for volume in volumes: vol_uuid = volume.findtext(_VOL_UID) match = re.search(r'^[0-9]{5}([0-9A-F]{32}).+$', vol_uuid) if match and match.group(1) == disk_uuid: policy = volume.findtext(_RESERVE_POLICY) break return policy def hdisk_from_uuid(self, disk_uuid): """Get the hdisk name from the volume uuid. :param disk_uuid: The uuid of the hdisk. :returns: The associated hdisk name. """ name = None # Get all the physical volume elements and look for a diskname match volumes = self.element.findall(_PVS_PATH) for volume in volumes: vol_uuid = volume.findtext(stor.UDID) if vol_uuid: LOG.debug('get_hdisk_from_uuid match: %s' % vol_uuid) LOG.debug('get_hdisk_from_uuid disk_uuid: %s' % disk_uuid) if vol_uuid == disk_uuid: name = volume.findtext(_VOL_NAME) break return name @property def is_mover_service_partition(self): return self._get_val_bool(_VIO_MVR_SVC_PARTITION, False) @is_mover_service_partition.setter def is_mover_service_partition(self, value): """Set the Mover Service Partition designation. :param value: Boolean indicating whether the VIOS should be designated as a Mover Service Partition. """ self.set_parm_value(_VIO_MVR_SVC_PARTITION, u.sanitize_bool_for_api(value)) @ewrap.Wrapper.xag_property(c.XAG.VIO_NET) def ip_addresses(self): """Returns a list of IP addresses assigned to the VIOS. Will only return the IP Addresses that can be made known to the system. This only includes online Shared Ethernet Adapters and Ethernet Backing Devices. It will not include, for example, a VLAN adapter. This is a READ-ONLY list. """ ip_list = [] # Get all the shared ethernet adapters and free # ethernet devices and pull the IPs seas = self.element.findall(_SEA_PATH) free_eths = self.element.findall(_ETHERNET_BACKING_DEVICE) for eth in seas + free_eths: ip = eth.findtext(_IF_ADDR) if ip and ip not in ip_list: ip_list.append(ip) return tuple(ip_list) @ewrap.Wrapper.xag_property(c.XAG.VIO_FMAP) def vfc_mappings(self): """Returns a WrapperElemList of the VFCMapping objects.""" es = ewrap.WrapperElemList(self._find_or_seed( _VIO_VFC_MAPPINGS, attrib=u.xag_attrs(c.XAG.VIO_FMAP)), VFCMapping) return es @vfc_mappings.setter def vfc_mappings(self, new_mappings): self.replace_list(_VIO_VFC_MAPPINGS, new_mappings, attrib=u.xag_attrs(c.XAG.VIO_FMAP)) @ewrap.Wrapper.xag_property(c.XAG.VIO_SMAP) def scsi_mappings(self): """Returns a WrapperElemList of the VSCSIMapping objects.""" # TODO(efried): remove parent_entry once VIOS has pg83 in Events es = ewrap.WrapperElemList( self._find_or_seed(_VIO_VSCSI_MAPPINGS, attrib=u.xag_attrs(c.XAG.VIO_SMAP)), VSCSIMapping, parent_entry=self) return es @scsi_mappings.setter def scsi_mappings(self, new_mappings): self.replace_list(_VIO_VSCSI_MAPPINGS, new_mappings, attrib=u.xag_attrs(c.XAG.VIO_SMAP)) @ewrap.Wrapper.xag_property(c.XAG.VIO_NET) def seas(self): es = ewrap.WrapperElemList(self._find_or_seed( _VIO_SEAS, attrib=u.xag_attrs(c.XAG.VIO_NET)), net.SEA) return es @ewrap.Wrapper.xag_property(c.XAG.VIO_NET) def trunk_adapters(self): es = ewrap.WrapperElemList( self._find_or_seed(_VIO_TRUNK_ADPTS, attrib=u.xag_attrs(c.XAG.VIO_NET)), net.TrunkAdapter) return es def derive_orphan_trunk_adapters(self): """Builds a list of trunk adapters not attached to a SEA.""" sea_trunks = [] for sea in self.seas: sea_trunks.append(sea.primary_adpt) sea_trunks.extend(sea.addl_adpts) # Subtract the list of our adapters from there. orig_trunks = copy.copy(self.trunk_adapters) orphan_trunks = copy.copy(self.trunk_adapters) for sea_trunk in sea_trunks: # We can't just remove because the trunk adapters from the SEA # have the vswitch ref instead of id... So we have to compare # based off anchors. for ta in orig_trunks: if ta.dev_name == sea_trunk.dev_name: orphan_trunks.remove(ta) break return orphan_trunks @ewrap.Wrapper.xag_property(c.XAG.VIO_STOR) def phys_vols(self): """Will return a list of physical volumes attached to this VIOS. This list is READ-ONLY. """ # TODO(efried): remove parent_entry once VIOS has pg83 in Events es = ewrap.WrapperElemList( self._find_or_seed(stor.PVS, attrib=u.xag_attrs(c.XAG.VIO_STOR)), stor.PV, parent_entry=self) es_list = [es_val for es_val in es] return tuple(es_list) @ewrap.Wrapper.xag_property(c.XAG.VIO_NET) def io_adpts_for_link_agg(self): es = ewrap.WrapperElemList( self._find_or_seed(_VIO_FREE_IO_ADPTS_FOR_LNAGG, attrib=u.xag_attrs(c.XAG.VIO_NET)), LinkAggrIOAdapterChoice) return es def can_lpm(self, host_w, migr_data=None): """Determines if a partition is ready for Live Partition Migration. :return capable: False, VIOS types are not LPM capable :return reason: A message that will indicate why it was not capable of LPM. """ return False, _('Partition of VIOS type is not LPM capable') @property def vnic_capable(self): return self._get_val_bool(_VIO_VNIC_CAP) @property def vnic_failover_capable(self): return self._get_val_bool(_VIO_VNIC_FAILOVER_CAP) @six.add_metaclass(abc.ABCMeta) @ewrap.Wrapper.base_pvm_type class VStorageMapping(ewrap.ElementWrapper): """Base class for VSCSIMapping and VFCMapping.""" @staticmethod def crt_related_href(adapter, host_uuid, client_lpar_uuid): """Creates the Element for the 'AssociatedLogicalPartition'. :param adapter: A pypowervm.adapter.Adapter. :param host_uuid: The UUID of the ManagedSystem. Specify None to get a ROOT link. :param client_lpar_uuid: The UUID of the LPAR to which the mapping is to be attached. """ if host_uuid is None: return adapter.build_href(lpar.LPAR.schema_type, root_id=client_lpar_uuid, xag=[]) else: return adapter.build_href(ms.System.schema_type, root_id=host_uuid, child_type=lpar.LPAR.schema_type, child_id=client_lpar_uuid, xag=[]) @property def client_lpar_href(self): """Returns the Client LPAR (if any) URI. If None - then no client is connected. """ return self.get_href(_MAP_CLIENT_LPAR, one_result=True) def _client_lpar_href(self, href): self.set_href(_MAP_CLIENT_LPAR, href) @property def client_adapter(self): """Returns the Client side V*ClientAdapterElement. If None - then no client is connected. """ elem = self.element.find(stor.CLIENT_ADPT) if elem is not None: return self._client_adapter_cls.wrap(elem) return None def _client_adapter(self, ca): elem = self._find_or_seed(stor.CLIENT_ADPT) self.element.replace(elem, ca.element) @property def server_adapter(self): """Returns the Virtual I/O Server side V*ServerAdapterElement.""" return self._server_adapter_cls.wrap( self.element.find(stor.SERVER_ADPT)) def _server_adapter(self, sa): elem = self._find_or_seed(stor.SERVER_ADPT) self.element.replace(elem, sa.element) @ewrap.Wrapper.base_pvm_type class _STDevMethods(ewrap.ElementWrapper): """Methods for storage and target common to STDev and VSCSIMapping.""" def _set_stg_and_tgt(self, adapter, stg_ref, lua=None, target_name=None): self.backing_storage = stg_ref if lua is not None or target_name is not None: # Build a *TargetDev of the appropriate type for this stg_ref self._target_dev(stg_ref.target_dev_type.bld(adapter, lua, target_name)) @property def backing_storage(self): """The backing storage element (if applicable). This element may be a PV, LU, VirtualDisk, or VirtualOpticalMedia. May return None. """ elem = self.element.find(_MAP_STORAGE) if elem is None: return None # If backing storage exists, it comprises a single child of elem. But # type is unknown immediately, so call all children and then wrap. stor_elems = list(elem) if len(stor_elems) != 1: return None # TODO(efried): parent_entry not needed once VIOS has pg83 in Events parent_entry = getattr(self, 'parent_entry', None) # The storage element may be any one of VDisk, VOptMedia, PV, or LU. # Allow ElementWrapper to detect (from the registry) and wrap correctly return ewrap.ElementWrapper.wrap(stor_elems[0], parent_entry=parent_entry) @backing_storage.setter def backing_storage(self, stg): """Sets the backing storage of this mapping to a VDisk, VOpt, LU or PV. :param stg: Either a VDisk, VOpt, LU or PV wrapper representing the backing storage to assign. """ # Always replace. Because while the storage has one element, it can't # inject properly if the backing type changes (ex. cloning from vOpt to # vDisk). stor_elem = ent.Element(_MAP_STORAGE, self.adapter, attrib={}, children=[]) stor_elem.inject(stg.element) self.inject(stor_elem) @property def target_dev(self): """The target device associated with the backing storage. May be any of {storage_type}TargetDev for {storage_type} in VDisk, VOpt, LU or PV. """ elem = self.element.find(_MAP_TARGET_DEV) if elem is None: return None # If the virtual target device exists, it comprises a single child of # elem. But the exact type is unknown. vtd_elems = list(elem) if len(vtd_elems) != 1: return None # Let ElementWrapper.wrap figure out (from the registry) the # appropriate return type. return ewrap.ElementWrapper.wrap(vtd_elems[0]) def _target_dev(self, vtd): """Sets the target device of this mapping. :param vtd: A {storage_type}TargetDev ElementWrapper representing the virtual target device to assign. """ vtd_elem = ent.Element(_MAP_TARGET_DEV, self.adapter, attrib={}, children=[]) vtd_elem.inject(vtd.element) self.inject(vtd_elem) @ewrap.ElementWrapper.pvm_type('VirtualSCSIStorageAndTargetDevice', has_metadata=True, child_order=_STDEV_EL_ORDER) class STDev(_STDevMethods): """Mapping backing storage and target device. Used as a mixin for VSCSIMapping, and first-class internal Element for VSCSIBus. """ @classmethod def bld(cls, adapter, stg_ref, lua=None): """Build a new STDev - only to be used with VSCSIBus. :param adapter: The pypowervm Adapter that will be used to create the mapping. :param stg_ref: The backing storage element (PV, LU, VDisk, or VOptMedia) to use in the new mapping. :param lua: (Optional. Default: None) Logical Unit Address to set on the TargetDevice. If None, the LUA will be assigned by the server. Should be specified for all of the VSCSIMappings for a particular bus, or none of them. :return: The newly-created STDev. """ stdev = super(STDev, cls)._bld(adapter) stdev._set_stg_and_tgt(adapter, stg_ref, lua=lua) return stdev @ewrap.ElementWrapper.pvm_type('VirtualSCSIMapping', has_metadata=True, child_order=_MAP_ORDER) class VSCSIMapping(VStorageMapping, _STDevMethods): """The mapping of a VIOS SCSI adapter to the Client LPAR SCSI adapter. PowerVM provides a mechanism for Server/Client adapters to provide storage connectivity (for LPARs that do not have dedicated hardware). This mapping describes the Virtual I/O Server's Server SCSI Adapter and the Client LPAR's Client SCSI Adapter. To create a new Client SCSI Adapter, create a new mapping and update the Virtual I/O Server. This will be an atomic operation that creates the adapters on the Virtual I/O Server and Client LPAR, and then maps them properly. There is no need to pre-create the adapters before creating a new mapping. """ _client_adapter_cls = stor.VSCSIClientAdapterElement _server_adapter_cls = stor.VSCSIServerAdapterElement @classmethod def bld(cls, adapter, host_uuid, client_lpar_uuid, stg_ref, lpar_slot_num=None, lua=None, target_name=None): """Creates a new VSCSIMapping :param adapter: The pypowervm Adapter that will be used to create the mapping. :param host_uuid: Not used. :param client_lpar_uuid: The client LPAR's UUID. :param stg_ref: The backing storage element (PV, LU, VDisk, or VOptMedia) to use in the new mapping. :param lpar_slot_num: (Optional, Default: None) The client slot number to use in the new mapping. If None then we let REST choose the slot number. :param lua: (Optional. Default: None) Logical Unit Address to set on the TargetDevice. If None, the LUA will be assigned by the server. Should be specified for all of the VSCSIMappings for a particular bus, or none of them. :param target_name: (Optional, Default: None) Name of the TargetDevice If None, the target_name will be assigned by the server. :return: The newly-created VSCSIMapping. """ s_map = super(VSCSIMapping, cls)._bld(adapter) # Create the 'Associated Logical Partition' element of the mapping. s_map._client_lpar_href( cls.crt_related_href(adapter, None, client_lpar_uuid)) s_map._client_adapter(stor.VClientStorageAdapterElement.bld( adapter, slot_num=lpar_slot_num)) s_map._server_adapter(stor.VServerStorageAdapterElement.bld(adapter)) s_map._set_stg_and_tgt(adapter, stg_ref, lua=lua, target_name=target_name) return s_map @classmethod def bld_from_existing(cls, existing_map, stg_ref, lpar_slot_num=None, lua=None, target_name=None): """Clones the existing mapping, but swaps in the new storage elem. :param existing_map: The existing VSCSIMapping to clone. :param stg_ref: The backing storage element (PV, LU, VDisk, or VOptMedia) to use in the new mapping. If explicitly None, the new mapping is created with no storage. :param lpar_slot_num: (Optional, Default: None) The client slot number to use in the mapping. If None then the existing slot number is used. :param lua: (Optional. Default: None) Logical Unit Address to set on the TargetDevice. If None, the LUA will be assigned by the server. Should be specified for all of the VSCSIMappings for a particular bus, or none of them. :param target_name: (Optional, Default: None) Name of the TargetDevice If None, the target_name will be assigned by the server. :return: The newly-created VSCSIMapping. """ # We do NOT want the source's TargetDevice element, so we explicitly # copy the pieces we want from the original mapping. new_map = super(VSCSIMapping, cls)._bld(existing_map.adapter) if existing_map.client_lpar_href is not None: new_map._client_lpar_href(existing_map.client_lpar_href) if existing_map.client_adapter is not None: new_map._client_adapter(copy.deepcopy(existing_map.client_adapter)) if existing_map.server_adapter is not None: new_map._server_adapter(copy.deepcopy(existing_map.server_adapter)) if stg_ref is not None: new_map.backing_storage = copy.deepcopy(stg_ref) if lpar_slot_num is not None: # Set the slot number and remove the 'UseNextAvailableSlot' tag. new_map.client_adapter._lpar_slot_num(lpar_slot_num) new_map.client_adapter._use_next_slot(False) if any((lua, target_name)): if stg_ref is None: raise ValueError(_("Can't specify target device LUA without a " "backing storage device!")) # Build a *TargetDev of the appropriate type for this stg_ref new_map._target_dev(stg_ref.target_dev_type.bld( existing_map.adapter, lua, target_name)) return new_map @ewrap.EntryWrapper.pvm_type('VirtualSCSIBus', child_order=_BUS_EL_ORDER) class VSCSIBus(ewrap.EntryWrapper, VStorageMapping): """Virtual SCSI Bus, first-class CHILD of VirtualIOServer. PowerVM provides a mechanism for Server/Client adapters to provide storage connectivity (for LPARs that do not have dedicated hardware). This mapping describes the Virtual I/O Server's Server SCSI Adapter and the Client LPAR's Client SCSI Adapter. To create a new Client SCSI Adapter, create a new mapping and update the Virtual I/O Server. This will be an atomic operation that creates the adapters on the Virtual I/O Server and Client LPAR, and then maps them properly. There is no need to pre-create the adapters before creating a new mapping. """ _client_adapter_cls = stor.VSCSIClientAdapterElement _server_adapter_cls = stor.VSCSIServerAdapterElement @classmethod def bld(cls, adapter, client_lpar_uuid, lpar_slot_num=None): """Creates a new VSCSIBus with no storage. Storage should be added afterwards by modifying stg_targets. :param adapter: The pypowervm Adapter that will be used to create the bus. :param client_lpar_uuid: The client LPAR's UUID. :param lpar_slot_num: (Optional, Default: None) The client slot number to use in the new mapping. If None then we let REST choose the slot number. :return: The newly-created VSCSIBus. """ s_bus = super(VSCSIBus, cls)._bld(adapter) # Create the 'Associated Logical Partition' element of the mapping. s_bus._client_lpar_href(adapter.build_href(lpar.LPAR.schema_type, client_lpar_uuid, xag=[])) s_bus._client_adapter(stor.VClientStorageAdapterElement.bld( adapter, slot_num=lpar_slot_num)) s_bus._server_adapter(stor.VServerStorageAdapterElement.bld(adapter)) return s_bus @classmethod def bld_from_existing(cls, existing_bus): """Clones a bus's LPAR and client/server adapters, but not storage. :param existing_bus: The existing VSCSIBus to clone. :return: The newly-created VSCSIBus. """ # We do NOT want the source's storage, so we explicitly copy the pieces # we want from the original bus. new_bus = super(VSCSIBus, cls)._bld(existing_bus.adapter) if existing_bus.client_lpar_href is not None: new_bus._client_lpar_href(existing_bus.client_lpar_href) if existing_bus.client_adapter is not None: new_bus._client_adapter(copy.deepcopy(existing_bus.client_adapter)) if existing_bus.server_adapter is not None: new_bus._server_adapter(copy.deepcopy(existing_bus.server_adapter)) return new_bus @property def mappings(self): return ewrap.WrapperElemList(self._find_or_seed( _BUS_ASSOC_MAPS), STDev) @mappings.setter def mappings(self, stdevs): self.replace_list(_BUS_ASSOC_MAPS, stdevs) @ewrap.ElementWrapper.pvm_type('VirtualFibreChannelMapping', has_metadata=True, child_order=_VFC_MAP_ORDER) class VFCMapping(VStorageMapping): """The mapping of a VIOS FC adapter to the Client LPAR FC adapter. PowerVM provides a mechanism for Server/Client adapters to provide storage connectivity (for LPARs that do not have dedicated hardware). This mapping describes the Virtual I/O Server's Server Fibre Channel (FC) Adapter and the Client LPAR's Client FC Adapter. To create a new Client FC Adapter, create a new mapping and update the Virtual I/O Server. This will be an atomic operation that creates the adapters on the Virtual I/O Server and Client LPAR, and then maps them properly. There is no need to pre-create the adapters before creating a new mapping. """ _client_adapter_cls = stor.VFCClientAdapterElement _server_adapter_cls = stor.VFCServerAdapterElement @classmethod def bld(cls, adapter, host_uuid, client_lpar_uuid, backing_phy_port, client_wwpns=None, lpar_slot_num=None): """Creates the VFCMapping object to connect to a Physical FC Port. This is used when creating a new mapping between a Client LPAR and the VirtualIOServer. This creates a Fibre Channel connection between an LPAR and a physical Fibre Port. The response object should be used for creating the mapping via an adapter.update() to the Virtual I/O Server. The response object will not have the UUIDs (as those are not assigned until the update is done). This holds true for certain other elements as well. :param adapter: The pypowervm Adapter that will be used to create the mapping. :param host_uuid: The host system's UUID. :param client_lpar_uuid: The client LPAR's UUID that the disk should be connected to. :param backing_phy_port: The name of the physical FC port that backs the virtual adapter. :param client_wwpns: An optional set of two WWPNs that can be set upon the mapping. These represent the client VM's WWPNs on the client FC adapter. If not set, the system will dynamically generate them. :param lpar_slot_num: An optional integer to be used as the Virtual slot number on the client adapter :returns: The new VFCMapping Wrapper. """ s_map = super(VFCMapping, cls)._bld(adapter) # Create the 'Associated Logical Partition' element of the mapping. s_map._client_lpar_href( cls.crt_related_href(adapter, host_uuid, client_lpar_uuid)) s_map._client_adapter(stor.VFCClientAdapterElement.bld( adapter, wwpns=client_wwpns, slot_num=lpar_slot_num)) # Create the backing port with required 'Port' tag. s_map.backing_port = bp.PhysFCPort.bld_ref(adapter, backing_phy_port, ref_tag='Port') s_map._server_adapter(stor.VFCServerAdapterElement.bld(adapter)) return s_map @property def backing_port(self): """The Virtual I/O Server backing PhysicalFCPort. If None - then the vfcmap isn't done and no physical port is backing it. """ elem = self.element.find(_MAP_PORT) if elem is not None: return bp.PhysFCPort.wrap(elem) return None @backing_port.setter def backing_port(self, value): """Sets the backing port.""" elem = self._find_or_seed(_MAP_PORT) self.element.replace(elem, value.element) pypowervm-1.1.24/pypowervm/wrappers/__init__.py0000664000175000017500000000000013571367171021262 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/wrappers/vios_file.py0000664000175000017500000001237213571367171021521 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """EntryWrapper for File ('web' namespace).""" import pypowervm.const as pc import pypowervm.wrappers.entry_wrapper as ewrap _FILE_NAME = 'Filename' _FILE_DATE_MOD = 'DateModified' _FILE_INET_MED_TYPE = 'InternetMediaType' _FILE_UUID = 'FileUUID' _FILE_EXP_SIZE = 'ExpectedFileSizeInBytes' _FILE_CUR_SIZE = 'CurrentFileSizeInBytes' _FILE_ENUM_TYPE = 'FileEnumType' _FILE_VIOS = 'TargetVirtualIOServerUUID' _FILE_TDEV_UDID = 'TargetDeviceUniqueDeviceID' _FILE_ASSET_FILE = 'AssetFile' _FILE_CHKSUM = 'SHA256' _DEFAULT_MEDIA_TYPE = 'application/octet-stream' class FileType(object): """Supported file types.""" MEDIA_ISO = 'BROKERED_MEDIA_ISO' DISK_IMAGE = 'BROKERED_DISK_IMAGE' # Obsolete. Behaves the same as DISK_IMAGE. DISK_IMAGE_COORDINATED = 'BROKERED_DISK_IMAGE' @ewrap.EntryWrapper.pvm_type('File', ns=pc.WEB_NS) class File(ewrap.EntryWrapper): """Wraps the File Metadata for files on the VIOS. The API supports passing a File up to devices on the Virtual I/O Server. This object wraps the metadata for the Files. """ @classmethod def bld(cls, adapter, f_name, f_type, v_uuid, sha_chksum=None, f_size=None, tdev_udid=None): """Creates a fresh File wrapper that can be used for a create action. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param f_name: The name for the file. :param f_type: The type of the file. One of the FileType values. :param v_uuid: The UUID for the Virtual I/O Server that the file will reside on. :param sha_chksum: (OPTIONAL) The SHA256 checksum for the file. Useful for integrity checks. :param f_size: (OPTIONAL) The size in bytes of the file to upload. Can be an int or a String (that represents an integer number). Useful for integrity checks. :param tdev_udid: The device UDID that the file will back into. :returns: The newly created File wrapper. """ # Metadata needs to be in a specific order. These are required f = super(File, cls)._bld(adapter) f._file_name(f_name) f._internet_media_type(_DEFAULT_MEDIA_TYPE) # Optional - should not be included in the Element if None. if sha_chksum: f._chksum(sha_chksum) if f_size: f._expected_file_size(f_size) # These are required f._enum_type(f_type) f._vios_uuid(v_uuid) # Optical media doesn't need to cite a target dev for file upload if tdev_udid: f._tdev_udid(tdev_udid) return f @property def file_name(self): return self._get_val_str(_FILE_NAME) def _file_name(self, name): self.set_parm_value(_FILE_NAME, name) @property def date_modified(self): return self._get_val_str(_FILE_DATE_MOD) @property def internet_media_type(self): """Typically 'application/octet-stream'.""" return self._get_val_str(_FILE_INET_MED_TYPE) def _internet_media_type(self, imt): self.set_parm_value(_FILE_INET_MED_TYPE, imt) @property def file_uuid(self): """The file's UUID (different from the entries).""" return self._get_val_str(_FILE_UUID) @property def expected_file_size(self): return self._get_val_int(_FILE_EXP_SIZE) def _expected_file_size(self, sz): self.set_parm_value(_FILE_EXP_SIZE, sz) @property def current_file_size(self): return self._get_val_int(_FILE_CUR_SIZE) @property def enum_type(self): """The type of the file. One of the FileType values.""" return self._get_val_str(_FILE_ENUM_TYPE) def _enum_type(self, et): self.set_parm_value(_FILE_ENUM_TYPE, et) def _chksum(self, sha): self.set_parm_value(_FILE_CHKSUM, sha) @property def vios_uuid(self): return self._get_val_str(_FILE_VIOS) def _vios_uuid(self, uuid): self.set_parm_value(_FILE_VIOS, uuid) @property def tdev_udid(self): return self._get_val_str(_FILE_TDEV_UDID) def _tdev_udid(self, udid): self.set_parm_value(_FILE_TDEV_UDID, udid) @property def asset_file(self): """Used to identify the asset file on upload. Only used in conjunction with DISK_IMAGE_COORDINATED. Provides the path to a file on the local system where data can be sent during an upload operation. This is used for significant speed improvements as the REST API server does not need to be involved with the upload. """ return self._get_val_str(_FILE_ASSET_FILE) pypowervm-1.1.24/pypowervm/wrappers/enterprise_pool.py0000664000175000017500000002274113571367171022754 0ustar neoneo00000000000000# Copyright 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from pypowervm import const from pypowervm.i18n import _ from pypowervm.wrappers import entry_wrapper from pypowervm.wrappers import mtms as mtms_wrapper LOG = logging.getLogger(__name__) _POWER_ENTERPRISE_POOL = 'PowerEnterprisePool' _POWER_ENTERPRISE_POOL_MEMBER = 'PowerEnterprisePoolMember' _P_POOL_ID = 'PoolID' _P_POOL_NAME = 'PoolName' _P_COMPLIANCE_STATE = 'ComplianceState' _P_COMPLIANCE_REMAINING_HOURS = 'ComplianceRemainingHours' _PM_PROC_COMPLIANCE_HOURS_LEFT = 'ProcComplianceRemainingHours' _PM_MEM_COMPLIANCE_HOURS_LEFT = 'MemComplianceRemainingHours' _P_TOTAL_MOBILE_PROCS = 'TotalMobileCoDProcUnits' _P_AVAIL_MOBILE_PROCS = 'AvailableMobileCoDProcUnits' _UNRET_MOBILE_PROCS = 'UnreturnedMobileCoDProcUnits' _P_TOTAL_MOBILE_MEM = 'TotalMobileCoDMemory' _P_AVAIL_MOBILE_MEM = 'AvailableMobileCoDMemory' _UNRET_MOBILE_MEM = 'UnreturnedMobileCoDMemory' _PM_MOBILE_PROCS = 'MobileCoDProcUnits' _PM_MOBILE_MEM = 'MobileCoDMemory' _PM_INACTIVE_PROCS = 'InactiveProcUnits' _PM_INACTIVE_MEM = 'InactiveMemory' _PM_SYS_NAME = 'ManagedSystemName' _PM_SYS_INSTALLED_PROCS = 'ManagedSystemInstalledProcUnits' _PM_SYS_INSTALLED_MEM = 'ManagedSystemInstalledMemory' _PM_SYS_MTMS = 'ManagedSystemMachineTypeModelSerialNumber' _PM_SYS_STATE = 'ManagedSystemState' _MGMT_CONSOLES = 'PowerEnterprisePoolManagementConsoles' _MGMT_CONSOLE = 'PowerEnterprisePoolManagementConsole' _MGMT_CONSOLE_NAME = 'ManagementConsoleName' _MGMT_CONSOLE_MTMS = 'ManagementConsoleMachineTypeModelSerialNumber' _MGMT_CONSOLE_IS_MASTER_CONSOLE = 'IsMasterConsole' _MGMT_CONSOLE_IP_ADDR = 'ManagementConsoleIPAddress' class ComplianceState(object): IN_COMPLIANCE = 'InCompliance' APPROACHING_OUT_OF_COMPLIANCE_SERVER = 'ApproachingOutOfComplianceServer' APPROACHING_OUT_OF_COMPLIANCE_POOL = 'ApproachingOutOfCompliancePool' OUT_OF_COMPLIANCE = 'OutOfCompliance' UNAVAILABLE = 'Unavailable' @entry_wrapper.EntryWrapper.pvm_type(_POWER_ENTERPRISE_POOL, has_metadata=True) class Pool(entry_wrapper.EntryWrapper): """Wraps the Pool entries.""" @property def id(self): """Integer enterprise pool ID.""" return self._get_val_int(_P_POOL_ID) @property def name(self): """The name of the enterprise pool.""" return self._get_val_str(_P_POOL_NAME) @property def compliance_state(self): """The compliance state of the enterprise pool.""" return self._get_val_str(_P_COMPLIANCE_STATE) @entry_wrapper.Wrapper.xag_property(const.XAG.ADV) def compliance_hours_left(self): """Integer num of hours until the pool is considered out of compliance. Return default of 0 if it is not found. """ return self._get_val_int(_P_COMPLIANCE_REMAINING_HOURS, default=0) @property def total_mobile_procs(self): """Integer num of the total mobile CoD proc units in the pool.""" return self._get_val_int(_P_TOTAL_MOBILE_PROCS) @property def avail_mobile_procs(self): """Integer num of the available mobile CoD proc units in the pool.""" return self._get_val_int(_P_AVAIL_MOBILE_PROCS, default=0) @property def unret_mobile_procs(self): """Integer num of the unreturned mobile CoD proc units in the pool.""" return self._get_val_int(_UNRET_MOBILE_PROCS) @property def total_mobile_mem(self): """Integer num of the total mobile CoD memory (GB) in the pool.""" return self._get_val_int(_P_TOTAL_MOBILE_MEM) @property def avail_mobile_mem(self): """Integer num of the available mobile CoD memory (GB) in the pool.""" return self._get_val_int(_P_AVAIL_MOBILE_MEM, default=0) @property def unret_mobile_mem(self): """Integer num of the unreturned mobile CoD memory (GB) in the pool.""" return self._get_val_int(_UNRET_MOBILE_MEM) @property def mgmt_consoles(self): """Returns a WrapperElemList of PoolMgmtConsole's.""" elem = self._find_or_seed(_MGMT_CONSOLES) return entry_wrapper.WrapperElemList( elem, PoolMgmtConsole) @property def master_console_mtms(self): """The master console MTMS (machine type, model, serial number).""" for console in self.mgmt_consoles: if console.is_master_console: return console.mtms LOG.error(_('Unable to determine master management console MTMS ' '(machine type, model, serial number) from ' '%(identifier)s because no %(param)s was marked as the ' 'master console for the pool.') % {'identifier': self._type_and_uuid, 'param': _MGMT_CONSOLE}) return None @entry_wrapper.EntryWrapper.pvm_type(_POWER_ENTERPRISE_POOL_MEMBER, has_metadata=True) class PoolMember(entry_wrapper.EntryWrapper): """Wraps the PoolMember entries.""" @property def mobile_procs(self): """Integer num of the mobile CoD proc units on the system.""" return self._get_val_int(_PM_MOBILE_PROCS) @mobile_procs.setter def mobile_procs(self, value): self.set_parm_value(_PM_MOBILE_PROCS, value) @property def mobile_mem(self): """Integer amount of mobile CoD memory (GB) on the system.""" return self._get_val_int(_PM_MOBILE_MEM) @mobile_mem.setter def mobile_mem(self, value): self.set_parm_value(_PM_MOBILE_MEM, value) @property def inactive_procs(self): """Integer num of the inactive (dark) proc units on the system.""" return self._get_val_int(_PM_INACTIVE_PROCS) @property def inactive_mem(self): """Integer amount of inactive (dark) memory (GB) on the system.""" return self._get_val_int(_PM_INACTIVE_MEM) @property def unret_mobile_procs(self): """Integer num of the unreturned mobile CoD proc units on the sys.""" return self._get_val_int(_UNRET_MOBILE_PROCS) @property def unret_mobile_mem(self): """Integer amount of unreturned mobile CoD memory (GB) on the sys.""" return self._get_val_int(_UNRET_MOBILE_MEM) @entry_wrapper.Wrapper.xag_property(const.XAG.ADV) def proc_compliance_hours_left(self): """Integer num of proc compliance hours remaining. Number of hours remaining until the system is considered out of compliance in terms of mobile procs. Return default of 0 if it is not found. """ return self._get_val_int(_PM_PROC_COMPLIANCE_HOURS_LEFT, default=0) @entry_wrapper.Wrapper.xag_property(const.XAG.ADV) def mem_compliance_hours_left(self): """Integer num of memory compliance hours remaining. Number of hours remaining until the system is considered out of compliance in terms of mobile memory. Return default of 0 if it is not found. """ return self._get_val_int(_PM_MEM_COMPLIANCE_HOURS_LEFT, default=0) @property def sys_name(self): """The name of the system that corresponds to this pool member.""" return self._get_val_str(_PM_SYS_NAME) @property def sys_installed_procs(self): """Integer num of the installed proc units on the system.""" return self._get_val_int(_PM_SYS_INSTALLED_PROCS) @property def sys_installed_mem(self): """Integer amount of installed memory (MB) on the system.""" return self._get_val_int(_PM_SYS_INSTALLED_MEM) @property def sys_mtms(self): """The MTMS (machine type, model, serial number) of the system.""" sys_mtms_element = self._find(_PM_SYS_MTMS) return mtms_wrapper.MTMS.wrap(sys_mtms_element) @property def sys_state(self): """The state of the system.""" return self._get_val_str(_PM_SYS_STATE) @property def mgmt_consoles(self): """Returns a WrapperElemList of PoolMgmtConsole's.""" elem = self._find_or_seed(_MGMT_CONSOLES) return entry_wrapper.WrapperElemList( elem, PoolMgmtConsole) @entry_wrapper.ElementWrapper.pvm_type(_MGMT_CONSOLE, has_metadata=True) class PoolMgmtConsole(entry_wrapper.ElementWrapper): """Wraps the PoolMgmtConsole elements.""" @property def name(self): """String value for the name of the management console.""" return self._get_val_str(_MGMT_CONSOLE_NAME) @property def mtms(self): """The MTMS (machine type, model, serial number) of the console.""" return mtms_wrapper.MTMS.wrap(self.element.find(_MGMT_CONSOLE_MTMS)) @property def is_master_console(self): """Boolean for whether or not this console is master for the pool.""" return self._get_val_bool(_MGMT_CONSOLE_IS_MASTER_CONSOLE) @property def ip_addr(self): """String value for the IP address of the console.""" return self._get_val_str(_MGMT_CONSOLE_IP_ADDR) pypowervm-1.1.24/pypowervm/wrappers/http_error.py0000664000175000017500000000442213571367171021727 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HttpError, the EntryWrapper for HttpErrorResponse ('web' namespace).""" from oslo_log import log as logging import pypowervm.const as pc import pypowervm.wrappers.entry_wrapper as ewrap LOG = logging.getLogger(__name__) _REASON_CODE = 'ReasonCode' _MESSAGE = 'Message' _HTTP_STATUS = 'HTTPStatus' # Error codes that indicate the VIOS is busy _VIOS_BUSY_ERR_CODES = ['HSCL3205', 'VIOS0014'] @ewrap.EntryWrapper.pvm_type('HttpErrorResponse', ns=pc.WEB_NS) class HttpError(ewrap.EntryWrapper): @property def status(self): return self._get_val_int(_HTTP_STATUS) @property def reason_code(self): return self._get_val_str(_REASON_CODE) @property def message(self): return self._get_val_str(_MESSAGE) def is_vios_busy(self): try: msg = self.message if any(code in msg for code in _VIOS_BUSY_ERR_CODES): return True return self._legacy_message_check(msg) except Exception: return False def _legacy_message_check(self, msg): # This logic is...unfortunate. We have to parse messages for strings # (instead of keys). But we will only do that if it is marked an # internal error. if self.status != pc.HTTPStatus.INTERNAL_ERROR: return False # The old message met the following criteria if ('VIOS' in msg and 'is busy processing some other request' in msg): return True # The new message format is the following if 'The system is currently too busy' in msg: return True # All others, assume not busy return False pypowervm-1.1.24/pypowervm/wrappers/managed_system.py0000664000175000017500000004612313571367171022543 0ustar neoneo00000000000000# Copyright 2014, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers, constants, and helpers around ManagedSystem and its children.""" import re import warnings from oslo_log import log as logging import pypowervm.const as c from pypowervm.i18n import _ import pypowervm.util as u import pypowervm.wrappers.entry_wrapper as ewrap import pypowervm.wrappers.iocard as card import pypowervm.wrappers.mtms as mtmwrap LOG = logging.getLogger(__name__) # ManagedSystem XPath constants _PRIMARY_IP_ADDRESS = 'PrimaryIPAddress' _HOST_IP_ADDRESS = _PRIMARY_IP_ADDRESS _STATE = 'State' _SYSTEM_NAME = 'SystemName' _MASTER_MODE = 'IsPowerVMManagementMaster' _PROC_THROTTLE = 'ProcessorThrottling' _METER_POOL_ID = 'MeteredPoolID' _SYS_CAPABILITIES = 'AssociatedSystemCapabilities' _ACTIVE_LPM_CAP = u.xpath( _SYS_CAPABILITIES, 'ActiveLogicalPartitionMobilityCapable') _INACTIVE_LPM_CAP = u.xpath( _SYS_CAPABILITIES, 'InactiveLogicalPartitionMobilityCapable') _VETH_MAC_ADDR_CAP = u.xpath( _SYS_CAPABILITIES, 'VirtualEthernetCustomMACAddressCapable') _IBMi_LPM_CAP = u.xpath( _SYS_CAPABILITIES, 'IBMiLogicalPartitionMobilityCapable') _IBMi_RESTRICTEDIO_CAP = u.xpath( _SYS_CAPABILITIES, 'IBMiRestrictedIOModeCapable') _SIMP_REMOTE_RESTART_CAP = u.xpath( _SYS_CAPABILITIES, 'PowerVMLogicalPartitionSimplifiedRemoteRestartCapable') _AME_CAP = u.xpath(_SYS_CAPABILITIES, 'ActiveMemoryExpansionCapable') _PPT_CAP = u.xpath(_SYS_CAPABILITIES, 'CustomPhysicalPageTableRatioCapable') _AIX_CAP = u.xpath(_SYS_CAPABILITIES, 'AIXCapable') _IBMi_CAP = u.xpath(_SYS_CAPABILITIES, 'IBMiCapable') _LINUX_CAP = u.xpath(_SYS_CAPABILITIES, 'LinuxCapable') _SHR_PROC_POOL_CAP = u.xpath( _SYS_CAPABILITIES, 'SharedProcessorPoolCapable') _VNIC_CAP = u.xpath(_SYS_CAPABILITIES, 'VirtualNICDedicatedSRIOVCapable') _VNIC_FAILOVER_CAP = u.xpath(_SYS_CAPABILITIES, 'VirtualNICFailOverCapable') _DYN_SRR_CAP = u.xpath( _SYS_CAPABILITIES, 'DynamicSimplifiedRemoteRestartToggleCapable') _IBMi_NATIVE_IO_CAP = u.xpath(_SYS_CAPABILITIES, 'IBMiNativeIOCapable') _DISABLE_SECURE_BOOT_CAP = u.xpath( _SYS_CAPABILITIES, 'DisableSecureBootCapable') _PARTITION_SECURE_BOOT_CAP = u.xpath( _SYS_CAPABILITIES, 'PartitionSecureBootCapable') _IOSLOT_OWNER_ASSMT_CAP = u.xpath( _SYS_CAPABILITIES, 'IOSlotOwnerAssignmentCapable') _DED_PROC_POOL_CAP = u.xpath( _SYS_CAPABILITIES, 'DedicatedProcessorPartitionCapable') # Migration Constants _SYS_PROC_CONFIG = 'AssociatedSystemProcessorConfiguration' _PROC_COMPAT_MODES = u.xpath( _SYS_PROC_CONFIG, 'SupportedPartitionProcessorCompatibilityModes') _MIN_PROC_UNITS_PER_CPU = u.xpath( _SYS_PROC_CONFIG, 'MinimumProcessorUnitsPerVirtualProcessor') _MIGR_INFO = 'SystemMigrationInformation' _MAX_ACTIVE_MIGR = u.xpath(_MIGR_INFO, 'MaximumActiveMigrations') _MAX_INACTIVE_MIGR = u.xpath(_MIGR_INFO, 'MaximumInactiveMigrations') _ACTIVE_MIGR_RUNNING = u.xpath( _MIGR_INFO, 'NumberOfActiveMigrationsInProgress') _INACTIVE_MIGR_RUNNING = u.xpath( _MIGR_INFO, 'NumberOfInactiveMigrationsInProgress') _MAX_FIRMWARE_MIGR = u.xpath(_MIGR_INFO, 'MaximumFirmwareActiveMigrations') _AFFINITY_CHECK_CAP = u.xpath( _MIGR_INFO, 'LogicalPartitionAffinityCheckCapable') _CAPABILITY_MAP = { 'active_lpar_mobility_capable': { 'prop': _ACTIVE_LPM_CAP, 'default': False}, 'inactive_lpar_mobility_capable': { 'prop': _INACTIVE_LPM_CAP, 'default': False}, # custom_mac_addr_capable True is correct for POWER7 'custom_mac_addr_capable': { 'prop': _VETH_MAC_ADDR_CAP, 'default': True}, 'ibmi_lpar_mobility_capable': { 'prop': _IBMi_LPM_CAP, 'default': False}, 'ibmi_restrictedio_capable': { 'prop': _IBMi_RESTRICTEDIO_CAP, 'default': False}, 'simplified_remote_restart_capable': { 'prop': _SIMP_REMOTE_RESTART_CAP, 'default': False}, 'physical_page_table_ratio_capable': { 'prop': _PPT_CAP, 'default': False}, 'affinity_check_capable': { 'prop': _AFFINITY_CHECK_CAP, 'default': False}, 'active_memory_expansion_capable': { 'prop': _AME_CAP, 'default': False}, # aix_capable defaults to True for backward compat (that is what we # returned before there was a capability for this in the PowerVM REST API) 'aix_capable': { 'prop': _AIX_CAP, 'default': True}, 'ibmi_capable': { 'prop': _IBMi_CAP, 'default': False}, 'linux_capable': { 'prop': _LINUX_CAP, 'default': True}, 'shared_processor_pool_capable': { 'prop': _SHR_PROC_POOL_CAP, 'default': False}, 'vnic_capable': { 'prop': _VNIC_CAP, 'default': False}, 'vnic_failover_capable': { 'prop': _VNIC_FAILOVER_CAP, 'default': False}, 'dynamic_srr_capable': { 'prop': _DYN_SRR_CAP, 'default': False}, 'ibmi_nativeio_capable': { 'prop': _IBMi_NATIVE_IO_CAP, 'default': False}, 'disable_secure_boot_capable': { 'prop': _DISABLE_SECURE_BOOT_CAP, 'default': False}, 'partition_secure_boot_capable': { 'prop': _PARTITION_SECURE_BOOT_CAP, 'default': False}, 'ioslot_owner_assignment_capable': { 'prop': _IOSLOT_OWNER_ASSMT_CAP, 'default': False}, 'dedicated_processor_partition_capable': { 'prop': _DED_PROC_POOL_CAP, 'default': True}, } _SYS_MEM_CONFIG = 'AssociatedSystemMemoryConfiguration' _MEMORY_INSTALLED = u.xpath(_SYS_MEM_CONFIG, 'InstalledSystemMemory') _MEMORY_AVAIL = u.xpath(_SYS_MEM_CONFIG, 'CurrentAvailableSystemMemory') _MEMORY_CONFIGURABLE = u.xpath(_SYS_MEM_CONFIG, 'ConfigurableSystemMemory') _MEMORY_REGION_SIZE = u.xpath(_SYS_MEM_CONFIG, 'MemoryRegionSize') _SYS_FIRMWARE_MEM = u.xpath(_SYS_MEM_CONFIG, 'MemoryUsedByHypervisor') _PAGE_TABLE_RATIO = u.xpath(_SYS_MEM_CONFIG, 'DefaultHardwarePageTableRatio') _DEFAULT_PPT_RATIO = u.xpath(_SYS_MEM_CONFIG, 'DefaultPhysicalPageTableRatio') _PROC_UNITS_INSTALLED = u.xpath( _SYS_PROC_CONFIG, 'InstalledSystemProcessorUnits') _PROC_UNITS_AVAIL = u.xpath( _SYS_PROC_CONFIG, 'CurrentAvailableSystemProcessorUnits') _PROC_UNITS_CONFIGURABLE = u.xpath( _SYS_PROC_CONFIG, 'ConfigurableSystemProcessorUnits') _MAX_PROCS_PER_PARTITION = u.xpath( _SYS_PROC_CONFIG, 'CurrentMaximumAllowedProcessorsPerPartition') _MAX_PROCS_PER_AIX_LINUX_PARTITION = u.xpath( _SYS_PROC_CONFIG, 'CurrentMaximumProcessorsPerAIXOrLinuxPartition') _MAX_VCPUS_PER_PARTITION = u.xpath( _SYS_PROC_CONFIG, 'MaximumAllowedVirtualProcessorsPerPartition') _MAX_VCPUS_PER_AIX_LINUX_PARTITION = u.xpath( _SYS_PROC_CONFIG, 'CurrentMaximumVirtualProcessorsPerAIXOrLinuxPartition') _VIOS_LINK = u.xpath("AssociatedVirtualIOServers", c.LINK) # AssociatedSystemIOConfig constants _ASIO_ROOT = 'AssociatedSystemIOConfiguration' _ASIO_AVAIL_WWPNS = 'AvailableWWPNs' _ASIO_HCA = 'HostChannelAdapters' _ASIO_HEA = 'HostEthernetAdapters' _ASIO_IOBUSES = 'IOBuses' _ASIO_IOSLOTS = 'IOSlots' _ASIO_SRIOVS = 'SRIOVAdapters' _ASIO_ASVNET = 'AssociatedSystemVirtualNetwork' _ASIO_WWPN_PREFIX = 'WWPNPrefix' _IOSLOT_ROOT = 'IOSlot' _IOSLOT_BUS_GRP_REQ = 'BusGroupingRequired' _IOSLOT_DESC = 'Description' _IOSLOT_FEAT_CODES = 'FeatureCodes' _IOSLOT_PART_ID = 'PartitionID' _IOSLOT_PART_UUID = 'PartitionUUID' _IOSLOT_PART_NAME = 'PartitionName' _IOSLOT_PART_TYPE = 'PartitionType' _IOSLOT_PCI_CLASS = 'PCIClass' _IOSLOT_PCI_DEV_ID = 'PCIDeviceID' _IOSLOT_PCI_SUB_DEV_ID = 'PCISubsystemDeviceID' _IOSLOT_PCI_REV_ID = 'PCIRevisionID' _IOSLOT_PCI_VEND_ID = 'PCIVendorID' _IOSLOT_PCI_SUB_VEND_ID = 'PCISubsystemVendorID' _IOSLOT_DYN_REC_CON_INDEX = 'SlotDynamicReconfigurationConnectorIndex' _IOSLOT_DYN_REC_CON_NAME = 'SlotDynamicReconfigurationConnectorName' @ewrap.EntryWrapper.pvm_type('ManagedSystem') class System(ewrap.EntryWrapper): """The PowerVM system that is being managed.""" @property def system_name(self): return self._get_val_str(_SYSTEM_NAME) @property def mtms(self): return mtmwrap.MTMS.wrap(self.element.find(mtmwrap.MTMS_ROOT)) @property def asio_config(self): return ASIOConfig.wrap(self.element.find(_ASIO_ROOT)) @property def system_state(self): return self._get_val_str(_STATE, 'unknown') @property def proc_units(self): return self._get_val_float(_PROC_UNITS_INSTALLED, 0) @property def min_proc_units(self): return self._get_val_float(_MIN_PROC_UNITS_PER_CPU, 0) @property def proc_units_configurable(self): return self._get_val_float(_PROC_UNITS_CONFIGURABLE, 0) @property def proc_units_avail(self): return self._get_val_float(_PROC_UNITS_AVAIL, 0) @property def max_sys_procs_limit(self): return self._get_val_int(_MAX_PROCS_PER_PARTITION, 0) @property def max_procs_per_aix_linux_lpar(self): val = self._get_val_int(_MAX_PROCS_PER_AIX_LINUX_PARTITION, 0) # Some systems will not have maximum procs per lpar based on # partition type. In that case, use system max procs per partition. if val == 0: val = self.max_sys_procs_limit return val @max_procs_per_aix_linux_lpar.setter def max_procs_per_aix_linux_lpar(self, value): self.set_parm_value(_MAX_PROCS_PER_AIX_LINUX_PARTITION, str(value)) @property def max_sys_vcpus_limit(self): return self._get_val_int(_MAX_VCPUS_PER_PARTITION, 0) @property def max_vcpus_per_aix_linux_lpar(self): val = self._get_val_int(_MAX_VCPUS_PER_AIX_LINUX_PARTITION, 0) # Some systems will not have maximum vcpus per lpar based on # partition type. In that case, use system max vcpus per partition. if val == 0: val = self.max_sys_vcpus_limit return val @max_vcpus_per_aix_linux_lpar.setter def max_vcpus_per_aix_linux_lpar(self, value): self.set_parm_value(_MAX_VCPUS_PER_AIX_LINUX_PARTITION, str(value)) @property def memory_total(self): return self._get_val_int(_MEMORY_INSTALLED, 0) @property def memory_free(self): return self._get_val_int(_MEMORY_AVAIL, 0) @property def memory_configurable(self): return self._get_val_int(_MEMORY_CONFIGURABLE, 0) @property def memory_region_size(self): return self._get_val_int(_MEMORY_REGION_SIZE, 0) @property def firmware_memory(self): return self._get_val_int(_SYS_FIRMWARE_MEM, 0) @property def page_table_ratio(self): return self._get_val_int(_PAGE_TABLE_RATIO, 0) @property def default_ppt_ratio(self): # Platforms that don't provide the default PPTR from REST # have a default value of 6 (1:4096). return self._get_val_int(_DEFAULT_PPT_RATIO, 6) @property def host_ip_address(self): prop = _HOST_IP_ADDRESS val = self._get_val_str(prop) return val def get_capability(self, key): """returns: The requested system capability from Power.""" if key in _CAPABILITY_MAP: prop = _CAPABILITY_MAP[key]['prop'] default = _CAPABILITY_MAP[key]['default'] if key == 'aix_capable': str_val = self._get_val_str(prop) # we can get 'unavailable' if PHYP interface is running an # older level and doesn't support query of this information if str_val is not None and str_val.lower() == 'inactive': return False return default return self._get_val_bool(prop, default=default) return None def get_capabilities(self): """returns: The system capabilities from Power.""" return {key: self.get_capability(key) for key in _CAPABILITY_MAP} @property def proc_compat_modes(self): """List of strings containing the processor compatibility modes. This is a READ-ONLY list. """ return tuple(self._get_vals(_PROC_COMPAT_MODES)) def highest_compat_mode(self): """This method returns the highest compatibility mode of the host.""" modes = [] pattern = r'^power(\d+)\+?$' for mode in self.proc_compat_modes: match = re.search(pattern, mode.lower()) if match: modes.append(int(match.group(1))) modes = sorted(modes) if modes: return modes[-1] else: return 0 @property def migration_data(self): """returns: The migration properties from PowerVM. This information should not be changed and should be treated as read only. """ max_migr_sup = self._get_val_int(_MAX_FIRMWARE_MIGR) act_migr_sup = self._get_val_int(_MAX_ACTIVE_MIGR) inact_migr_sup = self._get_val_int(_MAX_INACTIVE_MIGR) pref_act_migr_sup = act_migr_sup pref_inact_migr_sup = inact_migr_sup act_migr_prog = self._get_val_int(_ACTIVE_MIGR_RUNNING) inact_migr_prog = self._get_val_int(_INACTIVE_MIGR_RUNNING) proc_compat = (','.join(self.proc_compat_modes)) migr_data = {'max_migration_ops_supported': max_migr_sup, 'active_migrations_supported': act_migr_sup, 'inactive_migrations_supported': inact_migr_sup, 'preferred_active_migrations_supported': pref_act_migr_sup, 'preferred_inactive_migrations_supported': pref_inact_migr_sup, 'active_migrations_in_progress': act_migr_prog, 'inactive_migrations_in_progress': inact_migr_prog, 'proc_compat': proc_compat} # Copy get_capabilities() dictionary into migration_data in case # sometimes we need validate the host is capable for mobility. cap_data = self.get_capabilities() migr_data.update(cap_data) return migr_data @property def vios_links(self): """List of hrefs from AssociatedVirtualIOServers. This is a READ-ONLY list. """ return self.get_href(_VIOS_LINK) @property def session_is_master(self): """The master mode state of this managed system. Use pypowervm.tasks.master_mode.request_master to request master mode :returns: True if the management node of this System's adapter.session is the master. """ return self._get_val_bool(_MASTER_MODE, True) @property def metered_pool_id(self): return self._get_val_str(_METER_POOL_ID) @property def processor_is_throttled(self): return self._get_val_bool(_PROC_THROTTLE) @ewrap.ElementWrapper.pvm_type(_ASIO_ROOT, has_metadata=True) class ASIOConfig(ewrap.ElementWrapper): """The associated system IO configuration for this system.""" @property def avail_wwpns(self): return self._get_val_int(_ASIO_AVAIL_WWPNS) @property def io_slots(self): es = ewrap.WrapperElemList(self._find_or_seed(_ASIO_IOSLOTS), IOSlot) return es @property def wwpn_prefix(self): return self._get_val_str(_ASIO_WWPN_PREFIX) @property def sriov_adapters(self): es = ewrap.WrapperElemList(self._find_or_seed(_ASIO_SRIOVS), child_class=card.SRIOVAdapter, indirect='IOAdapterChoice') return es @ewrap.ElementWrapper.pvm_type(_IOSLOT_ROOT, has_metadata=True) class IOSlot(ewrap.ElementWrapper): """An I/O Slot represents a device bus on the system. It may contain a piece of hardware within it. """ @property def bus_grp_required(self): return self._get_val_bool(_IOSLOT_BUS_GRP_REQ) @property def description(self): return self._get_val_str(_IOSLOT_DESC) @property def feat_codes(self): return self._get_val_int(_IOSLOT_FEAT_CODES) @property def part_id(self): """Short ID of the partition to which the slot is assigned. None if the slot is unassigned. """ return self._get_val_int(_IOSLOT_PART_ID) @property def part_uuid(self): """UUID of the partition to which the slot is assigned. None if the slot is unassigned. """ return self._get_val_str(_IOSLOT_PART_UUID) @property def part_name(self): """String name of the partition to which the slot is assigned. None if the slot is unassigned. """ return self._get_val_str(_IOSLOT_PART_NAME) @property def part_type(self): """String type of the partition to which the slot is assigned. May be compared with base_partition.LPARType enum values. None if the slot is unassigned. """ return self._get_val_str(_IOSLOT_PART_TYPE) @property def pci_class(self): return self._get_val_int(_IOSLOT_PCI_CLASS) @property def pci_dev_id(self): return self._get_val_int(_IOSLOT_PCI_DEV_ID) @property def pci_subsys_dev_id(self): return self._get_val_int(_IOSLOT_PCI_SUB_DEV_ID) @property def pci_sub_dev_id(self): """Deprecated - use pci_subsys_dev_id instead.""" warnings.warn(_( "This property is deprecated! " "Use pci_subsys_dev_id instead."), DeprecationWarning) return self.pci_subsys_dev_id @property def pci_rev_id(self): return self._get_val_int(_IOSLOT_PCI_REV_ID) @property def pci_revision_id(self): """Deprecated - use pci_rev_id instead.""" warnings.warn(_( "This property is deprecated! " "Use pci_rev_id instead."), DeprecationWarning) return self.pci_rev_id @property def pci_vendor_id(self): return self._get_val_int(_IOSLOT_PCI_VEND_ID) @property def pci_subsys_vendor_id(self): return self._get_val_int(_IOSLOT_PCI_SUB_VEND_ID) @property def pci_sub_vendor_id(self): """Deprecated - use pci_subsys_vendor_id instead.""" warnings.warn(_( "This property is deprecated! " "Use pci_subsys_vendor_id instead."), DeprecationWarning) return self.pci_subsys_vendor_id @property def drc_index(self): return self._get_val_int(_IOSLOT_DYN_REC_CON_INDEX) @property def dyn_reconfig_conn_index(self): """Deprecated - use drc_index instead.""" warnings.warn(_( "This property is deprecated! " "Use drc_index instead."), DeprecationWarning) return self.drc_index @property def drc_name(self): return self._get_val_str(_IOSLOT_DYN_REC_CON_NAME) @property def dyn_reconfig_conn_name(self): """Deprecated - use drc_name instead.""" warnings.warn(_( "This property is deprecated! " "Use drc_name instead."), DeprecationWarning) return self.drc_name pypowervm-1.1.24/pypowervm/wrappers/monitor.py0000664000175000017500000001366413571367171021236 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers/helpers for Performance and Capacity Monitoring (PCM) metrics.""" import abc import datetime import pytz import six from oslo_log import log as logging from pypowervm import adapter as adpt import pypowervm.const as pc import pypowervm.util as u import pypowervm.wrappers.entry_wrapper as ewrap # Constants that make up the http path PREFERENCES = 'preferences' RAW_METRICS = 'RawMetrics' LONG_TERM_MONITOR = 'LongTermMonitor' SHORT_TERM_MONITOR = 'ShortTermMonitor' PCM_SERVICE = 'pcm' _SYSTEM_NAME = 'SystemName' _LTM_ENABLED = 'LongTermMonitorEnabled' _AGG_ENABLED = 'AggregationEnabled' _STM_ENABLED = 'ShortTermMonitorEnabled' _COMP_LTM_ENABLED = 'ComputeLTMEnabled' _UPDATED = 'updated' _TITLE = 'title' _PUBLISHED = 'published' _CATEGORY = 'category' _DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f' LOG = logging.getLogger(__name__) @ewrap.EntryWrapper.pvm_type('ManagedSystemPcmPreference', ns=pc.PCM_NS) class PcmPref(ewrap.EntryWrapper): """Wraps the Performance and Capacity Monitoring preferences.""" @property def system_name(self): return self._get_val_str(_SYSTEM_NAME) @property def ltm_enabled(self): """Long Term Monitoring.""" return self._get_val_bool(_LTM_ENABLED) @ltm_enabled.setter def ltm_enabled(self, value): """Long Term Monitoring.""" self.set_parm_value(_LTM_ENABLED, u.sanitize_bool_for_api(value)) @property def aggregation_enabled(self): """Metrics Aggregation.""" return self._get_val_bool(_AGG_ENABLED) @aggregation_enabled.setter def aggregation_enabled(self, value): """Metrics Aggregation.""" self.set_parm_value(_AGG_ENABLED, u.sanitize_bool_for_api(value)) @property def stm_enabled(self): """Short Term Monitoring.""" return self._get_val_bool(_STM_ENABLED) @stm_enabled.setter def stm_enabled(self, value): """Short Term Monitoring. Short Term metrics can affect the performance of workloads. Not recommended for production workload. """ self.set_parm_value(_STM_ENABLED, u.sanitize_bool_for_api(value)) @property def compute_ltm_enabled(self): """Compute Long Term Monitoring.""" return self._get_val_bool(_COMP_LTM_ENABLED) @compute_ltm_enabled.setter def compute_ltm_enabled(self, value): """Compute Long Term Monitoring.""" self.set_parm_value(_COMP_LTM_ENABLED, u.sanitize_bool_for_api(value)) @six.add_metaclass(abc.ABCMeta) class MonitorMetrics(object): """A pseudo wrapper for Monitor metrics. The standard pattern of wrapping a response or entry and accessing properties for the data can be used, even though this isn't a traditional EntryWrapper. """ def __init__(self, entry): self.entry = entry @staticmethod def _str_to_datetime(str_date): # The format of the string is one of two ways. # Current: 2015-04-30T06:11:35.000-05:00 # Legacy: 2015-04-30T06:11:35.000Z (the Z was meant to be timezone). # # The formatter will strip any Z's that may be in the string out. str_date = str_date.replace('Z', '-00:00') # Separate out the timezone. Datetime doesn't like formatting time # zones, so we pull it out for manual parsing. It is the 6th digit # from the right. str_date, str_tz = str_date[:-6], str_date[-6:] # We now have the date, without the timezone. date = (datetime.datetime.strptime(str_date, _DATETIME_FORMAT). replace(tzinfo=pytz.utc)) # Parse out the timezone. tz_oper = str_tz[0] tz_hr, tz_min = int(str_tz[1:3]), int(str_tz[4:6]) tz_delta = datetime.timedelta(hours=tz_hr, minutes=tz_min) # Return the date plus/minus the timezone delta. return (date + tz_delta) if (tz_oper == '+') else (date - tz_delta) @classmethod def wrap(cls, response_or_entry): if isinstance(response_or_entry, adpt.Response): return [cls(entry) for entry in response_or_entry.feed.entries] else: return cls(response_or_entry) @property def id(self): return self.entry.uuid @property def published(self): return self.entry.properties.get(_PUBLISHED) @property def published_datetime(self): return self._str_to_datetime(self.published) @property def title(self): return self.entry.properties.get(_TITLE) @property def updated(self): return self.entry.properties.get(_UPDATED) @property def updated_datetime(self): return self._str_to_datetime(self.updated) @property def category(self): return self.entry.properties.get(_CATEGORY) @property def link(self): return self.entry.links[None][0] class LTMMetrics(MonitorMetrics): """A pseudo wrapper for Long Term Monitor metrics. The standard pattern of wrapping a response or entry and accessing properties for the data can be used, even though this isn't a traditional EntryWrapper. """ pass class STMMetrics(MonitorMetrics): """A pseudo wrapper for Short Term Monitor metrics. The standard pattern of wrapping a response or entry and accessing properties for the data can be used, even though this isn't a traditional EntryWrapper. """ pass pypowervm-1.1.24/pypowervm/wrappers/management_console.py0000664000175000017500000001014513571367171023374 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers, constants, and helpers around ManagementConsole.""" from oslo_log import log as logging import pypowervm.const as c import pypowervm.wrappers.entry_wrapper as ewrap import pypowervm.wrappers.mtms as mtmwrap LOG = logging.getLogger(__name__) _MGD_FRAMES = 'ManagedFrames' _MGD_SYS = 'ManagedSystems' _MGMT_CON_NAME = 'ManagementConsoleName' # NETI XPath constants _NETI_ROOT = 'NetworkInterfaces' _MGMT_TPLT_OBJ_MOD_VERS = 'TemplateObjectModelVersion' _MGMT_USR_OBJ_MOD_VERS = 'UserObjectModelVersion' _MGMT_VERS_INFO = 'VersionInfo' _MGMT_LOC_VIOS_IMG_NAMES = 'LocalVirtualIOServerImageNames' _MGMT_WEB_OBJ_MOD_VERS = 'WebObjectModelVersion' _PWR_ENT_POOLS = 'PowerEnterprisePools' # SSH Config _PUB_KEY = 'PublicSSHKey' _AUTH_KEYS = 'AuthorizedKeys' _AUTH_KEY = 'AuthorizedKey' _MGMT_NETI_ROOT = 'ManagementConsoleNetworkInterface' _MGMT_NETI_NAME = 'InterfaceName' _MGMT_NETI_ADDRESS = 'NetworkAddress' _CONS_EL_ORDER = ( mtmwrap.MTMS_ROOT, _MGD_FRAMES, _MGD_SYS, _MGMT_CON_NAME, _NETI_ROOT, _MGMT_TPLT_OBJ_MOD_VERS, _MGMT_USR_OBJ_MOD_VERS, _MGMT_VERS_INFO, _MGMT_LOC_VIOS_IMG_NAMES, _MGMT_WEB_OBJ_MOD_VERS, _PWR_ENT_POOLS, _PUB_KEY, _AUTH_KEYS) @ewrap.EntryWrapper.pvm_type('ManagementConsole', child_order=_CONS_EL_ORDER) class ManagementConsole(ewrap.EntryWrapper): """The PowerVM ManagementConsole. This refers to the console that is managing PowerVM system. It's the one providing the REST API interface. """ @property def name(self): return self._get_val_str(_MGMT_CON_NAME) @property def mtms(self): return mtmwrap.MTMS.wrap(self.element.find(mtmwrap.MTMS_ROOT)) @property def network_interfaces(self): return NetworkInterfaces.wrap(self.element.find(_NETI_ROOT)) @property def ssh_public_key(self): return self._get_val_str(_PUB_KEY) @property def ssh_authorized_keys(self): """Returns a list of keys. The returned tuple contains the keys as plain strings. """ return tuple(key_w.key for key_w in ewrap.WrapperElemList(self._find_or_seed(_AUTH_KEYS), AuthorizedKey)) @ssh_authorized_keys.setter def ssh_authorized_keys(self, keys): """Sets the keys given a list of key strings.""" self.replace_list( _AUTH_KEYS, [AuthorizedKey.bld(self.adapter, key) for key in keys], attrib=c.ATTR_SCHEMA_KSV130) @ewrap.ElementWrapper.pvm_type(_AUTH_KEY, attrib={}) class AuthorizedKey(ewrap.ElementWrapper): """The Authorized Key wrapper.""" @classmethod def bld(cls, adapter, key): new_key = super(AuthorizedKey, cls)._bld(adapter) new_key.key = key return new_key @property def key(self): return self.element.text @key.setter def key(self, val): self.element.text = val @ewrap.ElementWrapper.pvm_type(_NETI_ROOT, has_metadata=True) class NetworkInterfaces(ewrap.ElementWrapper): """The Network Interfaces wrapper.""" @property def console_interface(self): return ConsoleNetworkInterfaces.wrap( self.element.find(_MGMT_NETI_ROOT)) @ewrap.ElementWrapper.pvm_type(_MGMT_NETI_ROOT, has_metadata=True) class ConsoleNetworkInterfaces(ewrap.ElementWrapper): """The Console Network Interfaces wrapper.""" @property def name(self): return self._get_val_str(_MGMT_NETI_NAME) @property def address(self): return self._get_val_str(_MGMT_NETI_ADDRESS) pypowervm-1.1.24/pypowervm/wrappers/pcm/0000775000175000017500000000000013571367172017743 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/wrappers/pcm/vios.py0000664000175000017500000001420513571367171021276 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers to parse the output of the PCM JSON data from VIOS.""" import abc import json import six from pypowervm.wrappers import pcm class ViosInfo(object): """Represents a monitor sample from the Virtual I/O Server monitor. The VIOS PCM monitor JSON data can be parsed by this. The base structure is: - ViosInfo - Info - ViosSample - ViosMemory - ViosNetwork - ViosNetworkAdpt (List) - ViosSharedEthernetAdapter (List) - ViosStorage - ViosFCPhysAdpt (List) - ViosFCVirtAdpt (List) - ViosStoragePAdpt (List) - ViosStorageVAdpt (List) - ViosSSP (List) """ def __init__(self, raw_json): data = json.loads(raw_json) systemUtil = data.get('systemUtil') self.info = pcm.Info(systemUtil.get('utilInfo')) self.sample = ViosSample(systemUtil.get('utilSample')) class ViosSample(object): def __init__(self, util_sample): self.time_stamp = util_sample.get('timeStamp') # TODO(thorst) Evaluate with multi VIOS. vios = util_sample.get('viosUtil')[0] # Convert the ID to keep consistent with phyp. self.id = int(vios.get('id')) self.name = vios.get('name') # Complex types mem = vios.get('memory') self.mem = ViosMemory(mem) if mem else None net = vios.get('network') self.network = ViosNetwork(net) if net else None storage = vios.get('storage') self.storage = ViosStorage(storage) if storage else None class ViosMemory(object): def __init__(self, mem): self.utilized_mem = mem.get('utilizedMem') class ViosNetwork(object): """The Network elements within the VIOS.""" def __init__(self, net): self.adpts = [ViosNetworkAdpt(x) for x in net.get('genericAdapters', [])] self.seas = [ViosSharedEthernetAdapter(x) for x in net.get('sharedAdapters', [])] class ViosNetworkAdpt(object): """Represents a Network Adapter on the system.""" def __init__(self, adpt): self.name = adpt.get('id') # Type: 'virtual' or 'physical' or 'sea' (if NetworkBridge) self.type = adpt.get('type') self.physical_location = adpt.get('physicalLocation') self.received_packets = adpt.get('receivedPackets') self.sent_packets = adpt.get('sentPackets') self.dropped_packets = adpt.get('droppedPackets') self.received_bytes = adpt.get('receivedBytes') self.sent_bytes = adpt.get('sentBytes') class ViosSharedEthernetAdapter(ViosNetworkAdpt): """Represents a Shared Ethernet Adapter on the VIOS.""" def __init__(self, bridge): super(ViosSharedEthernetAdapter, self).__init__(bridge) self.bridged_adpts = bridge.get('bridgedAdapters') class ViosStorage(object): """Represents the storage elements on the VIOS.""" def __init__(self, storage): fc_adpts = storage.get('fiberChannelAdapters', []) self.fc_adpts = [ViosFCPhysAdpt(x) for x in fc_adpts] phys_adpts = storage.get('genericPhysicalAdapters', []) self.phys_adpts = [ViosStoragePAdpt(x) for x in phys_adpts] virt_adpts = storage.get('genericVirtualAdapters', []) self.virt_adpts = [ViosStorageVAdpt(x) for x in virt_adpts] ssps = storage.get('sharedStoragePools', []) self.ssps = [ViosSSP(x) for x in ssps] @six.add_metaclass(abc.ABCMeta) class ViosStorageAdpt(object): """Base class for storage adapters.""" def __init__(self, adpt): self.name = adpt.get('id') self.physical_location = adpt.get('physicalLocation') self.num_reads = adpt.get('numOfReads') self.num_writes = adpt.get('numOfWrites') self.read_bytes = adpt.get('readBytes') self.write_bytes = adpt.get('writeBytes') class ViosFCPhysAdpt(ViosStorageAdpt): """Represents a physical fiber channel adapter on the VIOS.""" def __init__(self, adpt): super(ViosFCPhysAdpt, self).__init__(adpt) self.wwpn = adpt.get('wwpn') # Appears to be Gb/s interface speed self.running_speed = adpt.get('runningSpeed') # TODO(thorst) Add FC Ports (need vfc mappings) vadpts = adpt.get('ports', []) self.ports = [ViosFCVirtAdpt(x) for x in vadpts] class ViosFCVirtAdpt(ViosStorageAdpt): """Represents a Virtual FC Port (NPIV).""" def __init__(self, vadpt): super(ViosFCVirtAdpt, self).__init__(vadpt) self.wwpn = vadpt.get('wwpn') # Appears to be Gb/s interface speed self.running_speed = vadpt.get('runningSpeed') class ViosStoragePAdpt(ViosStorageAdpt): """Represents a physical storage adapter (typically a SAS drive).""" def __init__(self, adpt): super(ViosStoragePAdpt, self).__init__(adpt) self.type = adpt.get('type') class ViosStorageVAdpt(ViosStorageAdpt): """Represents a virtual storage adapter (vscsi).""" def __init__(self, adpt): super(ViosStorageVAdpt, self).__init__(adpt) self.type = adpt.get('type') class ViosSSP(object): """Represents a Shared Storage Pool (entire element).""" def __init__(self, ssp): self.name = ssp.get('id') self.pool_disks = ssp.get('poolDisks') self.num_reads = ssp.get('numOfReads') self.num_writes = ssp.get('numOfWrites') self.total_space = ssp.get('totalSpace') self.used_space = ssp.get('usedSpace') self.read_bytes = ssp.get('readBytes') self.write_bytes = ssp.get('writeBytes') pypowervm-1.1.24/pypowervm/wrappers/pcm/__init__.py0000664000175000017500000000172513571367171022060 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers used for multiple types of PCM data.""" class Info(object): def __init__(self, utilInfo): self.version = utilInfo.get('version') self.metric_type = utilInfo.get('metricType') self.monitoring_type = utilInfo.get('monitoringType') self.mtms = utilInfo.get('mtms') self.name = utilInfo.get('name') pypowervm-1.1.24/pypowervm/wrappers/pcm/lpar.py0000664000175000017500000001161513571367171021256 0ustar neoneo00000000000000# Copyright 2016, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Wrappers to parse the PCM JSON data from IBM.Host Resource Manager. """ import json class LparInfo(object): """Represents a monitor sample from the IBM.Host Resource Manager. The Lpar JSON utilization data has the following structure: - LparUtil - LparMemory lpar_metrics are generally collected once every two minutes, as opposed to the other data which is collected every 30 seconds. """ def __init__(self, raw_json): data = json.loads(raw_json) self._lparuuid_to_util_dict = dict() lpar_util_list = list() lpar_metric_list_rsct = data.get('lparUtil') for lpar_metrics_rsct in lpar_metric_list_rsct: if 'errorInfo' in lpar_metrics_rsct: error_code = lpar_metrics_rsct['errorInfo']['errorId'] if error_code in ('6001', '6003'): self._create_lpar_memory_util_for_errored_vm( error_code, lpar_metrics_rsct) else: # Any other errors that might get introduced at neo-rest. continue lpar_util = LparUtil(lpar_metrics_rsct) lpar_util_list.append(lpar_util) self._lparuuid_to_util_dict[lpar_util.uuid] = lpar_util self._lpars_util = lpar_util_list def find(self, lpar_uuid): return self._lparuuid_to_util_dict.get(lpar_uuid, None) def _create_lpar_memory_util_for_errored_vm( self, error_code, lpar_metrics_rsct): if error_code == '6001': # If LPAR is powered off, then no memory is being used. memory = dict(pctRealMemFree=100, vmPgInRate=0, vmPgOutRate=0, vmPgSpInRate=0, vmPgSpOutRate=0) elif error_code == '6003': # If LPAR has inactive RMC, then assume all memory is being used. memory = dict(pctRealMemFree=0) lpar_metrics_rsct['memory'] = memory @property def lpars_util(self): return self._lpars_util class LparUtil(object): """Represents individual Lpar metric information. """ def __init__(self, lpar_util): self._uuid = lpar_util.get('uuid') self._lpar_id = lpar_util.get('id') self._name = lpar_util.get('name') self._timestamp = lpar_util.get('timestamp') self._memory = LparMemory(lpar_util.get('memory')) @property def lpar_id(self): return self._lpar_id @property def uuid(self): return self._uuid @property def name(self): return self._name @property def timestamp(self): return self._timestamp @property def memory(self): return self._memory class LparMemory(object): """Represents information on Lpar memory utilization """ def __init__(self, memory): self._pct_real_mem_avbl = memory.get('pctRealMemAvbl') self._total_pg_count = memory.get('totalPgSpSizeCount') self._free_pg_count = memory.get('totalPgSpFreeCount') self._active_pg_count = memory.get('vmActivePgCount') self._real_mem_size_bytes = memory.get('realMemSizeBytes') self._pct_real_mem_free = memory.get('pctRealMemFree') self._vm_pg_in_rate = memory.get('vmPgInRate') self._vm_pg_out_rate = memory.get('vmPgOutRate') self._vm_pg_swap_in_rate = memory.get('vmPgSpInRate') self._vm_pg_swap_out_rate = memory.get('vmPgSpOutRate') @property def pct_real_mem_avbl(self): return self._pct_real_mem_avbl @property def total_pg_count(self): return self._total_pg_count @property def free_pg_count(self): return self._free_pg_count @property def active_pg_count(self): return self._active_pg_count @property def real_mem_size_bytes(self): return self._real_mem_size_bytes @property def pct_real_mem_free(self): return self._pct_real_mem_free @property def vm_pg_in_rate(self): return self._vm_pg_in_rate @property def vm_pg_out_rate(self): return self._vm_pg_out_rate @property def vm_pg_swap_in_rate(self): return self._vm_pg_swap_in_rate @property def vm_pg_swap_out_rate(self): return self._vm_pg_swap_out_rate pypowervm-1.1.24/pypowervm/wrappers/pcm/phyp.py0000664000175000017500000002363513571367171021305 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers to parse the output of the PCM JSON data from PHYP.""" import abc import json import six from pypowervm.wrappers import pcm class PhypInfo(object): """Represents a monitor sample from the PHYP monitor. The PHYP PCM monitor JSON data can be parsed by this. The base structure is: - PhypInfo - Info - PhypSample - PhypSystemFirmware - PhypSystemProcessor - PhypSystemMemory - PhypSharedProcPool - PhypVMSample (list - client virtual machines) - PhypLparProc - PhypLparMemory - PhypNetwork - PhypVEA - PhypSriovLparPort - PhypStorage - PhypStorageVAdpt - PhypVirtualFCAdpt - PhypViosSample (list - Virtual I/O Servers) - PhypLparProc """ def __init__(self, raw_json): data = json.loads(raw_json) systemUtil = data.get('systemUtil') self.info = pcm.Info(systemUtil.get('utilInfo')) self.sample = PhypSample(systemUtil.get('utilSample')) class PhypSample(object): """A Power Hypervisor Sample.""" def __init__(self, util_sample): self.time_stamp = util_sample.get('timeStamp') self.status = util_sample.get('status') self.time_based_cycles = util_sample.get('timeBasedCycles') # Complex objects sys_f = util_sample.get('systemFirmware') self.system_firmware = (None if sys_f is None else PhypSystemFirmware(sys_f)) proc = util_sample.get('processor') self.processor = None if proc is None else PhypSystemProcessor(proc) mem = util_sample.get('memory') self.memory = None if mem is None else PhypSystemMemory(mem) spp_list = util_sample.get('sharedProcessorPool') self.shared_proc_pools = [PhypSharedProcPool(x) for x in spp_list] # List of LPARs lpars = util_sample.get('lparsUtil') self.lpars = [PhypVMSample(x) for x in lpars] # List of Virtual I/O Servers vioses = util_sample.get('viosUtil') self.vioses = [PhypViosSample(x) for x in vioses] class PhypSystemFirmware(object): """Firmware information from PHYP.""" def __init__(self, system_firmware): self.utilized_proc_cycles = system_firmware.get('utilizedProcCycles') self.assigned_mem = system_firmware.get('assignedMem') class PhypSharedProcPool(object): """Information of the Shared Processor Pool.""" def __init__(self, spp): self.id = spp.get('id') self.name = spp.get('name') self.assigned_proc_cycles = spp.get('assignedProcCycles') self.utilized_pool_cycles = spp.get('utilizedPoolCycles') self.max_proc_units = spp.get('maxProcUnits') self.borrowed_pool_proc_units = spp.get('borrowedPoolProcUnits') class PhypSystemProcessor(object): """Processor information about the entire system from PHYP.""" def __init__(self, processor): self.total_proc_units = processor.get('totalProcUnits') self.configurable_proc_units = processor.get('configurableProcUnits') self.available_proc_units = processor.get('availableProcUnits') self.proc_cycles_per_sec = processor.get('procCyclesPerSecond') class PhypSystemMemory(object): """System wide Memory information from PHYP.""" def __init__(self, mem): self.total_mem = mem.get('totalMem') self.available_mem = mem.get('availableMem') self.configurable_mem = mem.get('configurableMem') @six.add_metaclass(abc.ABCMeta) class PhypLparSample(object): """A LPAR sample presented by PHYP. Generic for VIOS & VM.""" def __init__(self, lpar): self.id = lpar.get('id') self.uuid = lpar.get('uuid') self.name = lpar.get('name') self.state = lpar.get('state') self.affinity_score = lpar.get('affinityScore') # Complex types proc = lpar.get('processor') self.processor = None if proc is None else PhypLparProc(proc) class PhypViosSample(PhypLparSample): """A VIOS sample presented by the PHYP metrics.""" def __init__(self, lpar): super(PhypViosSample, self).__init__(lpar) class PhypVMSample(PhypLparSample): """A Virtual Machine (non VIOS) presented by the PHYP metrics.""" def __init__(self, lpar): super(PhypVMSample, self).__init__(lpar) self.type = lpar.get('type') # Complex Types mem = lpar.get('memory') self.memory = None if mem is None else PhypLparMemory(mem) net = lpar.get('network') self.network = None if net is None else PhypNetwork(net) storage = lpar.get('storage') self.storage = None if storage is None else PhypStorage(storage) class PhypLparMemory(object): """A sample of a Client Virtual Machines's memory presented by PHYP. Part of the PhypLparSample. """ def __init__(self, mem): self.logical_mem = mem.get('logicalMem') self.backed_physical_mem = mem.get('backedPhysicalMem') class PhypLparProc(object): """A sample of the LPARs processor presented by PHYP. Part of the PhypLparSample """ def __init__(self, proc): self.pool_id = proc.get('poolId') self.mode = proc.get('mode') self.virt_procs = proc.get('maxVirtualProcessors') self.proc_units = proc.get('maxProcUnits') self.weight = proc.get('weight') self.entitled_proc_cycles = proc.get('entitledProcCycles') self.util_cap_proc_cycles = proc.get('utilizedCappedProcCycles') self.util_uncap_proc_cycles = proc.get('utilizedUnCappedProcCycles') self.idle_proc_cycles = proc.get('idleProcCycles') self.donated_proc_cycles = proc.get('donatedProcCycles') self.time_wait_dispatch = proc.get('timeSpentWaitingForDispatch') self.total_instructions = proc.get('totalInstructions') self.total_inst_exec_time = proc.get('totalInstructionsExecutionTime') class PhypNetwork(object): """A sample of the LPARs network information. Part of the PhypLparSample """ def __init__(self, network): veas = network.get('virtualEthernetAdapters') self.veas = [] if veas is None else [PhypVEA(x) for x in veas] sriov_ports = network.get('sriovLogicalPorts') self.sriov_ports = ([] if sriov_ports is None else [PhypSriovLparPort(x) for x in sriov_ports]) class PhypVEA(object): """The Virtual Ethernet Adapters (aka. CNA's) data.""" def __init__(self, vea): self.vlan_id = vea.get('vlanId') self.vswitch_id = vea.get('vswitchId') self.physical_location = vea.get('physicalLocation') self.is_pvid = vea.get('isPortVLANID') self.received_packets = vea.get('receivedPackets') self.sent_packets = vea.get('sentPackets') self.dropped_packets = vea.get('droppedPackets') self.sent_bytes = vea.get('sentBytes') self.received_bytes = vea.get('receivedBytes') self.received_physical_packets = vea.get('receivedPhysicalPackets') self.sent_physical_packets = vea.get('sentPhysicalPackets') self.dropped_physical_packets = vea.get('droppedPhysicalPackets') self.sent_physical_bytes = vea.get('sentPhysicalBytes') self.received_physical_bytes = vea.get('receivedPhysicalBytes') class PhypSriovLparPort(object): """A metric for the SR-IOV Logical Ports.""" def __init__(self, sriov_p): self.drc_index = sriov_p.get('drcIndex') self.phys_drc_index = sriov_p.get('physicalDrcIndex') self.phys_port_id = sriov_p.get('physicalPortId') self.physical_location = sriov_p.get('physicalLocation') self.received_packets = sriov_p.get('receivedPackets') self.sent_packets = sriov_p.get('sentPackets') self.dropped_sent_packets = sriov_p.get('droppedSentPackets') self.dropped_received_packets = sriov_p.get('droppedReceivedPackets') self.sent_bytes = sriov_p.get('sentBytes') self.recevied_bytes = sriov_p.get('receivedBytes') self.error_in = sriov_p.get('errorIn') self.error_out = sriov_p.get('errorOut') class PhypStorage(object): """A sample of the LPARs storage information. Part of the PhypLparSample """ def __init__(self, stor): v_adpts = stor.get('genericVirtualAdapters') self.v_stor_adpts = ([] if v_adpts is None else [PhypStorageVAdpt(x) for x in v_adpts]) v_fcs = stor.get('virtualFiberChannelAdapters') self.v_fc_adpts = ([] if v_fcs is None else [PhypVirtualFCAdpt(x) for x in v_fcs]) class PhypStorageVAdpt(object): """An indicator to the Client VM Storage to the VIOS storage elem.""" def __init__(self, stor): self.physical_location = stor.get('physicalLocation') self.vios_id = stor.get('viosId') self.vios_slot = stor.get('viosAdapterSlotId') class PhypVirtualFCAdpt(object): """An indicator to identify the Client VFC Adpt with the VIOS storage.""" def __init__(self, vfc): self.vios_id = vfc.get('viosId') # The PCM metrics will have wwpnPair as key name in older versions # and wwpnpair as key name in newer versions. self.wwpn_pair = vfc.get('wwpnpair', vfc.get('wwpnPair', [])) self.physical_location = vfc.get('physicalLocation') pypowervm-1.1.24/pypowervm/wrappers/mtms.py0000664000175000017500000000514513571367171020522 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pypowervm.wrappers.entry_wrapper as ewrap # MTMS XPath constants MTMS_ROOT = 'MachineTypeModelAndSerialNumber' _MTMS_MT = 'MachineType' _MTMS_MODEL = 'Model' _MTMS_SERIAL = 'SerialNumber' @ewrap.ElementWrapper.pvm_type(MTMS_ROOT, has_metadata=True) class MTMS(ewrap.ElementWrapper): """The Machine Type, Model and Serial Number wrapper.""" @classmethod def bld(cls, adapter, mtms_str): """Creates a new MTMS ElementWrapper. If mtms_str is specified, it is parsed first. If machine_type, model, and/or serial is specified, their values are used, overriding any parsed values from mtms_str. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param mtms_str: String representation of Machine Type, Model, and Serial Number. The format is Machine Type - Model Number * Serial Example: 8247-22L*1234567 """ mtms = super(MTMS, cls)._bld(adapter) mtm, sn = mtms_str.split('*', 1) mt, md = mtm.split('-', 1) # Assignment order is significant mtms.machine_type = mt mtms.model = md mtms.serial = sn return mtms @property def machine_type(self): return self._get_val_str(_MTMS_MT) @machine_type.setter def machine_type(self, mt): self.set_parm_value(_MTMS_MT, mt) @property def model(self): return self._get_val_str(_MTMS_MODEL) @model.setter def model(self, md): self.set_parm_value(_MTMS_MODEL, md) @property def serial(self): return self._get_val_str(_MTMS_SERIAL) @serial.setter def serial(self, sn): self.set_parm_value(_MTMS_SERIAL, sn) @property def mtms_str(self): """Builds a string representation of the MTMS. Does not override default __str__ as that is useful for debug purposes. """ return self.machine_type + '-' + self.model + '*' + self.serial pypowervm-1.1.24/pypowervm/wrappers/logical_partition.py0000664000175000017500000002530613571367171023246 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """LPAR, the EntryWrapper for LogicalPartition.""" from oslo_log import log as logging import pypowervm.const as pc from pypowervm.i18n import _ import pypowervm.util as u import pypowervm.wrappers.base_partition as bp import pypowervm.wrappers.entry_wrapper as ewrap LOG = logging.getLogger(__name__) _LPAR_MIG_STG_VIOS_DATA_STATUS = 'MigrationStorageViosDataStatus' _LPAR_MIG_STG_VIOS_DATA_TIME = 'MigrationStorageViosDataTimestamp' _LPAR_RR = 'RemoteRestartCapable' _LPAR_SRR = 'SimplifiedRemoteRestartCapable' _LPAR_HAS_DED_PROCS_FOR_MIG = 'HasDedicatedProcessorsForMigration' _LPAR_SUSPEND_CAP = 'SuspendCapable' _LPAR_MIG_DISABLE = 'MigrationDisable' _LPAR_MIG_STATE = 'MigrationState' _LPAR_RR_STATE = 'RemoteRestartState' _LPAR_PRI_PGING_SVC_PART = 'PrimaryPagingServicePartition' _LPAR_POWER_MGT_MODE = 'PowerManagementMode' _LPAR_SEC_PGING_SVC_PART = 'SecondaryPagingServicePartition' _LPAR_USES_HSL_OPTICONN = 'UsesHighSpeedLinkOpticonnect' _LPAR_USES_VIRT_OPTICONN = 'UsesVirtualOpticonnect' _LPAR_VFC_CLIENT_ADPTS = 'VirtualFibreChannelClientAdapters' _LPAR_VSCSI_CLIENT_ADPTS = 'VirtualSCSIClientAdapters' _LPAR_RESTRICTED_IO = 'IsRestrictedIOPartition' _LPAR_STG_DEV_UDID = 'StorageDeviceUniqueDeviceID' _LPAR_DES_IPL_SRC = 'DesignatedIPLSource' _LPAR_DED_VNICS = 'DedicatedVirtualNICs' _LPAR_BOOTLIST_INFO = 'BootListInformation' _LPAR_EL_ORDER = bp.BP_EL_ORDER + ( _LPAR_MIG_STG_VIOS_DATA_STATUS, _LPAR_MIG_STG_VIOS_DATA_TIME, _LPAR_RR, _LPAR_SRR, _LPAR_HAS_DED_PROCS_FOR_MIG, _LPAR_SUSPEND_CAP, _LPAR_MIG_DISABLE, _LPAR_MIG_STATE, _LPAR_RR_STATE, _LPAR_PRI_PGING_SVC_PART, _LPAR_POWER_MGT_MODE, _LPAR_SEC_PGING_SVC_PART, _LPAR_USES_HSL_OPTICONN, _LPAR_USES_VIRT_OPTICONN, _LPAR_VFC_CLIENT_ADPTS, _LPAR_VSCSI_CLIENT_ADPTS, _LPAR_RESTRICTED_IO, _LPAR_STG_DEV_UDID, _LPAR_DES_IPL_SRC, _LPAR_DED_VNICS, _LPAR_BOOTLIST_INFO) class IPLSrc(object): """Mirror of IPLSource.Enum (relevant to IBMi partitions only). Valid values for: - LPAR.desig_ipl_src - 'iIPLsource' param in pypowervm.power.power_on. Example usage: - ilpar.desig_ipl_src = IPLSrc.C ilpar.update() - power_on(..., add_parms={IPLSrc.KEY: IPLSrc.A, ...}) """ KEY = 'iIPLsource' A = 'a' B = 'b' C = 'c' D = 'd' UNKNOWN = 'Unknown' ALL_VALUES = (A, B, C, D, UNKNOWN) class RRState(object): """Remote Restart states - mirror of PartitionRemoteRestart.Enum.""" INVALID = "Invalid" RR_ABLE = "Remote_Restartable" SRC_RRING = "Source_Remote_Restarting" DEST_RRING = "Destination_Remote_Restarting" REM_RESTARTED = "Remote_Restarted" PROF_RESTORED = "Profile_Restored" RES_STG_DEV_UPD_FAIL = "Reserved_Storage_Device_Update_Failed" FORCED_SRC_RESTART = "Forced_Source_Side_Restart" SRC_CLEANUP_FAIL = "Source_Side_Cleanup_Failed" RES_STG_DEV_UPD_FAIL_W_OVRD = ("Reserved_Storage_Device_Update_Failed_With" "_Override") RR_ABLE_SUSPENDED = "Remote_Restartable_Suspended" LOC_UPD_FAIL = "Local_Update_Failed" PART_UPD = "Partial_Update" STALE_DATA = "Stale_Data" LOC_DATA_VALID = "Local_Data_Valid" OUT_OF_SPACE = "Out_Of_Space" LOC_DATA_INVALID = "Local_Data_Invalid" DEST_RR_ED = "Destination_Remote_Restarted" SRC_RRING_SUSPENDED = "Source_Remote_Restarting_Suspended" LOC_STG_UPD_FAIL = "Local_Storage_Update_Failed" PG_DEV_UPD_OVRD = "Page_Device_Update_Override" class BootStorageType(object): """Enumeration of possible storage connection methods for devices.""" VSCSI = 'vscsi' VFC = 'npiv' UNKNOWN = 'Unknown' ALL_VALUES = (VSCSI, VFC, UNKNOWN) @ewrap.EntryWrapper.pvm_type('LogicalPartition', child_order=_LPAR_EL_ORDER) class LPAR(bp.BasePartition, ewrap.WrapperSetUUIDMixin): @classmethod def bld(cls, adapter, name, mem_cfg, proc_cfg, env=bp.LPARType.AIXLINUX, io_cfg=None): """Creates an LPAR wrapper. Thin wrapper around BasePartition._bld_base, defaulting env. """ return super(LPAR, cls)._bld_base(adapter, name, mem_cfg, proc_cfg, env, io_cfg) def _can_modify(self, dlpar_cap, cap_desc): """Checks to determine if the LPAR can be modified. :param dlpar_cap: The appropriate DLPAR attribute to validate. Only used if system is active. :param cap_desc: A translated string indicating the DLPAR capability. :return capable: True if HW can be added/removed. False otherwise. :return reason: A translated message that will indicate why it was not capable of modification. If capable is True, the reason will be None. """ # If we are in the LPAR, we have access to the operating system type. # If it is an OS400 type, then we can add/remove HW no matter what. if self.env == bp.LPARType.OS400: return True, None return super(LPAR, self)._can_modify(dlpar_cap, cap_desc) def can_lpm(self, host_w, migr_data=None): """Determines if a LPAR is ready for Live Partition Migration. This check validates that the target system is capable of handling the LPAR if the LPAR is an IBMi. It simply validates that the LPAR has the essential capabilities in place for a LPM operation. :param host_w: The host wrapper for the system. :param migr_data: The dictionary of migration data for the target host. If parameters are not passed in, will skip the check and let the low levels surface related error. The supported key today is: - ibmi_lpar_mobility_capable: Boolean TODO(IBM): add more destination checks here. Ex. migrate an AIX or IBMi VM to a Linux only host. :return capable: True if the LPAR is LPM capable. False otherwise. :return reason: A translated message that will indicate why it was not capable of LPM. If capable is True, the reason will be None. """ # First check is the not activated state if self.state != bp.LPARState.RUNNING: return False, _("LPAR is not in an active state.") if self.env == bp.LPARType.OS400: # IBM i does not require RMC, but does need to check for target # host and source host are capable for IBMi mobility and # restricted I/O. if migr_data is not None: c = migr_data.get('ibmi_lpar_mobility_capable') if c is not None and not c: return False, _('Target system does not have the IBM i' ' LPAR Mobility Capability.') if not self.restrictedio: return False, _('IBM i LPAR does not have restricted I/O.') if not host_w.get_capability('ibmi_lpar_mobility_capable'): return False, _('Source system does not have the IBM i' ' LPAR Mobility Capability.') elif self.rmc_state != bp.RMCState.ACTIVE: return False, _('LPAR does not have an active RMC connection.') if self.is_mgmt_partition: return False, _('LPAR is the management partition') c = self.capabilities if not (c.mem_dlpar and c.proc_dlpar): return False, _('LPAR is not available for LPM due to missing ' 'DLPAR capabilities.') return True, None @property def migration_state(self): """See PartitionMigrationStateEnum. e.g. 'Not_Migrating', 'Migration_Starting', 'Migration_Failed', etc. Defaults to 'Not_Migrating' """ return self._get_val_str(_LPAR_MIG_STATE, 'Not_Migrating') @property def rr_enabled(self): """Deprecated (n/a for NovaLink) - use srr_enabled instead.""" import warnings warnings.warn(_("This is not the property you are looking for. Use " "srr_enabled in a NovaLink environment."), DeprecationWarning) return None @rr_enabled.setter def rr_enabled(self, value): """Deprecated (n/a for NovaLink) - use srr_enabled instead.""" import warnings warnings.warn(_("This is not the property you are looking for. Use " "srr_enabled in a NovaLink environment."), DeprecationWarning) @property def rr_state(self): """Deprecated (n/a for NovaLink) - use srr_enabled instead.""" import warnings warnings.warn(_("This is not the property you are looking for. Use " "srr_enabled in a NovaLink environment."), DeprecationWarning) return None @property def srr_enabled(self): """Simplied remote restart. :returns: Returns SRR config boolean """ return self._get_val_bool(_LPAR_SRR, False) @srr_enabled.setter def srr_enabled(self, value): self.set_parm_value(_LPAR_SRR, u.sanitize_bool_for_api(value), attrib=pc.ATTR_KSV120) @property def restrictedio(self): return self._get_val_bool(_LPAR_RESTRICTED_IO, False) @restrictedio.setter def restrictedio(self, value): self.set_parm_value(_LPAR_RESTRICTED_IO, u.sanitize_bool_for_api(value)) @property def desig_ipl_src(self): """Designated IPL Source - see IPLSrc enumeration.""" return self._get_val_str(_LPAR_DES_IPL_SRC) @desig_ipl_src.setter def desig_ipl_src(self, value): """Designated IPL Source - see IPLSrc enumeration.""" if value not in IPLSrc.ALL_VALUES: raise ValueError(_("Invalid IPLSrc '%s'.") % value) self.set_parm_value(_LPAR_DES_IPL_SRC, value) def set_uuid(self, value): # LPAR uuids must be uppercase. up_uuid = str(value).upper() super(LPAR, self).set_uuid(up_uuid) self.set_parm_value(bp._BP_UUID, up_uuid) pypowervm-1.1.24/pypowervm/wrappers/entry_wrapper.py0000664000175000017500000022362013571367171022443 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for all wrapper classes in the pypowervm.wrappers package.""" import abc from oslo_log import log as logging import re import six import urllib from pypowervm import adapter as adpt import pypowervm.const as pc import pypowervm.entities as ent from pypowervm.i18n import _ from pypowervm import util import pypowervm.utils.uuid as pvm_uuid LOG = logging.getLogger(__name__) def _indirect_child_elem(wrap, indirect): if indirect is None: return wrap.element else: return ent.Element(indirect, wrap.adapter, children=[wrap.element]) @six.add_metaclass(abc.ABCMeta) class Wrapper(object): """Base wrapper object that subclasses should extend. Provides base support for operations. Will define a few methods that need to be overridden by subclasses. """ # See pvm_type decorator schema_type = None default_attrib = None schema_ns = None has_metadata = False # Registers PowerVM object wrappers by their schema type. # {schema_type_string: # {'entry': entry_wrapper_class, # 'element': element_wrapper_class}, # ...} # E.g. {'SharedEthernetAdapter': # {'entry': pypowervm.wrappers.network.SEA}, # 'LogicalUnit': # {'entry': pypowervm.wrappers.storage.LUEnt, # 'element': pypowervm.wrappers.storage.LU}, # ...} # Some schema types double as a ROOT/CHILD object and a DETAIL within a # ROOT/CHILD). Such schema types will have both 'entry' and 'element' keys # in the sub-dict. Otherwise, only one key will exist accordingly. _pvm_object_registry = {} # Maps a property name to its extended attribute group. Should be sparse - # if a property is not associated with a xag, it can (should) be absent # from this dict. _xag_registry = {} # Allows us to ensure that all Wrapper classes are properly registered via # @[base_]pvm_type. _registered = False @classmethod def base_pvm_type(cls, cls_): """Decorator/method to register a PowerVM base class. Use this instead of @pvm_type on Wrapper subclasses which are not to be instantiated, but are themselves bases for real Wrappers. For example, use @base_pvm_type for BasePartition; and @pvm_type for LogicalPartition and VirtualIOServer. Use as a decorator with no arguments: @Wrapper.base_pvm_type class SomeBaseClass(Wrapper): ... Or use as a method to register a base class explicitly after it has been defined: Wrapper.base_pvm_type(SomeBaseClass) :param cls_: The Wrapper subclass to be decorated/registered. :return: cls_ """ # @xag_property registers with Wrapper._xag_registry because # cls_ hasn't been created yet. Transfer the created registry to the # cls_, merging with any already registered by its bases, and clear # Wrapper's registry so it doesn't pollute the next cls_. cls_._xag_registry = dict(cls_._xag_registry, **Wrapper._xag_registry) cls_._registered = True Wrapper._xag_registry = {} return cls_ @classmethod def _register_schema_type(cls, schema_type, class_): """Register this class according to its schema_type. The registry is used to identify the appropriate wrapper class to apply to an XML object we receive from the REST server. :param schema_type: String schema type of the REST element to register. :param class_: Concrete {Entry|Element}Wrapper subclass to associate with the schema_type. """ ent_or_el = 'entry' if issubclass(class_, EntryWrapper) else 'element' if schema_type not in Wrapper._pvm_object_registry: Wrapper._pvm_object_registry[schema_type] = {} Wrapper._pvm_object_registry[schema_type][ent_or_el] = class_ @classmethod def pvm_type(cls, schema_type, has_metadata=None, ns=pc.UOM_NS, attrib=pc.DEFAULT_SCHEMA_ATTR, child_order=None): """Decorator for {Entry|Element}Wrappers of PowerVM objects. Sets foundational fields used for construction of new wrapper instances and pieces thereof. Registers the decorated class, keyed by its schema_type, This enables the wrap method to return the correct subclass even if invoked directly from ElementWrapper or EntryWrapper. :param schema_type: PowerVM REST API Schema type of the subclass (str). :param has_metadata: Indicates whether, when creating and wrapping a fresh adapter.Element, it should have a child element. :param ns: PowerVM REST API Schema namespace of the subclass. :param attrib: Default attributes for fresh Element when factory constructor is used. :param child_order: Ordered list of the element names of the first- level children of this element/entry. Used for order-agnostic construction/setting of values. """ def inner(class_): # Base stuff first (e.g. register extended attribute groups). cls.base_pvm_type(class_) class_.schema_type = schema_type if has_metadata is not None: class_.has_metadata = has_metadata if ns is not None: class_.schema_ns = ns if attrib is not None: class_.default_attrib = attrib if child_order: co = list(child_order) if class_.has_metadata and co[0] != 'Metadata': co.insert(0, 'Metadata') class_._child_order = tuple(co) cls._register_schema_type(schema_type, class_) return class_ return inner @classmethod def xag_property(cls, xag): """Decorator to tag a @property with an extended attribute group. Use this decorator in place of (not in addition to) @property. Within class Foo: @xag_property('bar') def some_prop(self): ... confers the same property-ness on 'some_prop' as would @property def some_prop(self): ... but it also associates some_prop with extended attribute group name 'bar' such that Foo.get_xag_for_prop('some_prop') returns the value 'bar'. :param xag: String name of the extended attribute group with which the decorated property is associated. May either be one of the pypowervm.const.XAG enum values; or a member of one of the pypowervm.entities.*XAGs collections (for example, see pypowervm.wrappers.virtual_io_server.phys_vols). """ def wrap(func): cls._xag_registry[func.__name__] = str(xag) return property(func) return wrap @classmethod def get_xag_for_prop(cls, propname): """The extended attribute group name for a property of this Wrapper. :param propname: Short (unqualified) name of a property of this Wrapper, as a string. :return: String indicating the name of the extended attribute group for the given property. Should be a pypowervm.const.XAG enum value. None (not 'None') if there is no xag associated with the specified property. """ return cls._xag_registry.get(propname, None) @property def child_order(self): return getattr(self, '_child_order', ()) @property def adapter(self): return self.element.adapter @property def traits(self): return self.adapter.traits @property def uuid(self): """Returns the uuid of the entry or element.""" # The following should only apply to EntryWrappers if getattr(self, 'entry', None) is not None: return self.entry.uuid # Anything with Metadata may have a UUID. Could do has_metadata check, # but that doesn't really add anything. This will return None if not # found. return self._get_val_str(pc.UUID_XPATH) @uuid.setter def uuid(self, value): """Sets the UUID (if supported). :param value: A valid PowerVM UUID value in either uuid format or string format """ if isinstance(self, WrapperSetUUIDMixin): self.set_uuid(value) else: raise AttributeError(_('Cannot set uuid.')) def inject(self, subelement, replace=True): """Injects subelement as a child element, possibly replacing it. This is pypowervm.adapter.Element.inject, with ordering_list always set to self.child_order. """ self.element.inject(subelement, self.child_order, replace=replace) def _find(self, property_name, use_find_all=False): """Will find a given element within the object. :param property_name: The property to search within the tree for. :param use_find_all: If set to true, will use the find_all method for queries. """ element = self.element if element is None: return None if use_find_all: found_value = element.findall(property_name) else: found_value = element.find(property_name) return found_value # May be None def _find_or_seed(self, prop_name, attrib=pc.DEFAULT_SCHEMA_ATTR): """Will find the existing element, or create if needed. If the element is not found, it will be added to the child list of this element. :param prop_name: The property name to replace with the new value. :param attrib: The attributes to use for the property. Defaults to the DEFAULT_SCHEM_ATTR. :returns: The existing element, or a newly created one if not found. """ root_elem = self.element # Find existing existing = root_elem.find(prop_name) if existing: return existing else: new_elem = ent.Element(prop_name, self.adapter, attrib=attrib, children=[]) self.inject(new_elem) return new_elem def _get_elem_list(self, tag): """An entities.ElementList for a given tag from within this wrapper. :param tag: The string XML tag of the values to find. :return: An entities.ElementList for the specified tag. """ return ent.ElementList( self.element, tag, ordering_list=self.child_order) def _set_elem_list(self, tag, val_iter): """Set (or replace) the contents of an entities.ElementList. :param tag: The string XML tag of the ElementList to assign. :param val_iter: Iterable of raw (string) values to set. """ ellist = self._get_elem_list(tag) ellist.clear() ellist.extend(val_iter) def replace_list(self, prop_name, prop_children, attrib=pc.DEFAULT_SCHEMA_ATTR, indirect=None): """Replaces a property on this Entry that contains a children list. The prop_children represent the new elements for the property. If the property does not already exist, this will simply append the new children. :param prop_name: The property name to replace with the new value. :param prop_children: A list of ElementWrapper objects that represent the new children for the property list. :param attrib: The attributes to use if the property. Defaults to the DEFAULT_SCHEM_ATTR. :param indirect: Name of a schema element which should wrap each of the prop_children. For example, VNIC backing devices look like: ... ... ... ... In this case, invoke this method as: replace_list( 'AssociatedBackingDevices', [], indirect='VirtualNICBackingDeviceChoice') """ new_elem = ent.Element(prop_name, self.adapter, attrib=attrib, children=[_indirect_child_elem(child, indirect) for child in prop_children]) self.inject(new_elem) @abc.abstractproperty def _type_and_uuid(self): """Return the type and uuid of this entry together in one string. This is useful for error messages, logging, etc. """ pass def set_parm_value(self, property_name, value, create=True, attrib=None): """Set a child element value, possibly creating the child. :param property_name: The schema name of the property to set. :param value: The (string) value to assign to the property's 'text'. :param create: If True, and the property is not found, it will be created. Otherwise this method will throw an exception. :param attrib: The element attributes to use if the element is created. """ element_value = self._find(property_name) if element_value is None: self.log_missing_value(property_name) if create: element_value = ent.Element( property_name, self.adapter, ns=self.schema_ns, attrib=attrib, text=str(value)) self.inject(element_value) element_value.text = str(value) def set_float_gb_value(self, property_name, value, create=True): """Special case of set_parm_value for floats of Gigabyte.Type. - Gigabyte.Type can't handle more than 6dp. - Floating point representation issues can mean that e.g. 0.1 + 0.2 produces 0.30000000000000004. - str() rounds to 12dp. So this method converts a float (or float string) to a string with exactly 6dp before storing it in the property. :param property_name: The schema name of the property to set (see set_parm_value). :param value: The floating point number or floating point string to be set. :param create: If True, and the property is not found, it will be created. Otherwise this method will throw an exception. (See set_parm_value.) """ self.set_parm_value(property_name, util.sanitize_float_for_api(value, precision=6), create=create) def __get_val(self, property_name, default=None, converter=None): """Retrieve the value of an element within this wrapper's ElementTree. This is the baseline for all the _get_val_{type} methods. :param property_name: The name (XPath) of the property to find. :param default: The default value to return if the property is not found OR if type conversion fails. :param converter: Optional callable accepting a single string parameter and returning a value of some other type. The converter callable should raise ValueError if conversion fails. :return: The (possibly converted) value corresponding to the identified property. """ element_value = self._find(property_name) if element_value is None: self.log_missing_value(property_name) return default text = element_value.text if text is None: return default if type(text) is str: text = text.strip() if callable(converter): try: return converter(text) except ValueError: message = (_( "Cannot convert %(property_name)s='%(value)s' in object " "%(pvmobject)s") % {"property_name": property_name, "value": text, "pvmobject": self._type_and_uuid}) LOG.error(message) return default return text def _get_vals(self, property_name): """Gets a list of values from PowerVM. :param property_name: property to return :returns: List of strings containing property values. No type conversion is done. If no elements are found, the empty list is returned (as opposed to None). """ values = [] elements = self._find(property_name, use_find_all=True) if elements is not None: for element in elements: values.append(element.text) return values def _get_val_bool(self, property_name, default=False): """Gets the boolean value of a PowerVM property. :param property_name: property to return :param default: The value to return if the property is not found in the data :return: If the property exists in the data and has the value 'true' or 'false' (case-insensitive), then the corresponding boolean value will be returned. If the property does not exist, then the default value will be returned if specified, otherwise False will be returned. """ def str2bool(bool_str): return str(bool_str).lower() == 'true' return self.__get_val(property_name, default=default, converter=str2bool) def _get_val_int(self, property_name, default=None): """Gets the integer value of a PowerVM property. :param property_name: property to find :param default: Value to return if property is not found. Defaults to None (which is not an int - plan accordingly). :return: Integer (int) value of the property if it is found and it is a valid integer. :raise ValueError: If the value cannot be converted. """ return self.__get_val(property_name, default=default, converter=int) def _get_val_float(self, property_name, default=None): """Gets the float value of a PowerVM property. :param property_name: property to find :param default: Value to return if property is not found. Defaults to None (which is not a float - plan accordingly). :return: float value of the property if it is found and it is a valid float. :raise ValueError: If the value cannot be converted. """ return self.__get_val(property_name, default=default, converter=float) def _get_val_str(self, property_name, default=None): """Gets the string value of a PowerVM property. :param property_name: property to find :param default: Value to return if property is not found. Defaults to None (which is not a str - plan accordingly). :return: str value of the property if it is found. May be the empty string. """ return self.__get_val(property_name, default=default, converter=None) def _get_val_percent(self, property_name, default=None): """Gets the value in float-percentage format of a PowerVM property. :param property_name: property to find :param default: Value to return if property is not found. Defaults to None (which is not a float - plan accordingly). :return: If the property is say "2.45%", a value of .0245 will be returned. % in the property is optional. """ def str2percent(percent_str): if percent_str: percent_str = re.findall(r"\d*\.?\d+", percent_str)[0] return (float(percent_str))/100 else: return None return self.__get_val(property_name, default=default, converter=str2percent) def log_missing_value(self, param): LOG.trace('The expected parameter of %(param)s was not found in ' '%(identifier)s' % {"param": param, "identifier": self._type_and_uuid}) def get_href(self, propname, one_result=False): """Returns the hrefs from AtomLink elements. :param propname: The name of the schema element containing the 'href' attribute. :param one_result: If True, we are expecting exactly one result, and will return that one (string) result, or None. If False (the default), we will return a tuple of strings which may be empty. """ ret_links = [] elements = self._find(propname, use_find_all=True) # Loop through what was found, if anything if elements: for atomlink in elements: # If the element doesn't have an href, ignore it. try: ret_links.append(atomlink.attrib['href']) except KeyError: pass if one_result: if len(ret_links) == 1: return ret_links[0] else: return None # Otherwise return a (possibly empty) tuple of the results return tuple(ret_links) def set_href(self, propname, href): """Finds or creates the (single) named property and sets its href. If the indicated element does not exist, it (and any necessary interim parent elements) will be created. If any intervening path is non- unique, any new element paths will be created under the first one. :param propname: XPath to the property. :param href: The URI value to assign to the href attribute. rel=related is automatically assigned. """ links = self._find(propname, use_find_all=True) if len(links) > 1: msg = _('Refusing set href over multiple links.\nPath: %{path}s\n' 'Number of links found: %{nlinks}d') raise ValueError(msg % {'path': propname, 'nlinks': len(links)}) if len(links) == 1: link = links[0] else: # Not found - create the property pathtoks = propname.split(util.XPATH_DELIM) append_point = self while len(pathtoks) > 1: next_prop = pathtoks.pop(0) new_el = append_point._find(next_prop) if new_el is None: new_el = ent.Element(next_prop, self.adapter) append_point.element.inject( new_el, ordering_list=self.child_order) append_point = ElementWrapper.wrap(new_el) link = ent.Element(pathtoks[-1], self.adapter) append_point.element.inject(link, ordering_list=self.child_order) # At this point we have found or created the propname element. Its # handle is in the link var. link.attrib['href'] = href link.attrib['rel'] = 'related' def toxmlstring(self, pretty=False): """Produce an XML dump of this Wrapper's Element. :param pretty: If True, format the XML in a visually-pleasing manner. :return: An XML string representing this Element. """ return self.element.toxmlstring(pretty=pretty) @classmethod def _bld_element(cls, adapter, tag=None, has_metadata=has_metadata, ns=schema_ns, attrib=default_attrib): """Create a fresh entities.Element, usually for immediate wrapping. :param adapter: The entities.Adapter to be consulted for traits, etc. :param tag: Property name of the new Element. :param has_metadata: If True, a child will be created. :param ns: Namespace to use. :param attrib: XML attributes to use in the outer Element. :return: A fresh adapter.Element. """ # TODO(efried): Get rid of this method - fold it into Element._bld() tag = cls.schema_type if tag is None else tag # Make sure the call was either through a legal wrapper or explicitly # specified a tag name if tag is None: raise TypeError(_("Refusing to construct and wrap an Element " "without a tag.")) has_metadata = (cls.has_metadata if has_metadata is None else has_metadata) ns = cls.schema_ns if ns is None else ns attrib = cls.default_attrib if attrib is None else attrib children = [] if has_metadata: children.append( ent.Element('Metadata', adapter, ns=ns, children=[ ent.Element('Atom', adapter, ns=ns)])) return ent.Element(tag, adapter, ns=ns, attrib=attrib, children=children) @classmethod def _class_for_element(cls, element): """Discover and return an appropriate *Wrapper subclass for element. :param element: An adapter.Element to introspect :return: A Wrapper subclass (the class, not an instance). If element represents a known subclass, it is returned; else the invoking class is returned. """ # Extract tag. Let this raise AttributeError - means the Element is # invalid. schema_type = element.tag # Is it a registered wrapper class? try: return Wrapper._pvm_object_registry[schema_type][ 'entry' if issubclass(cls, EntryWrapper) else 'element'] except KeyError: return cls def _bld_link_list(self, container_type, links): """Creates an element with a list of children. :param container_type: The element that will contain the elements. :param links: The set of strings which are link elements. """ new_elems = [] for item in links: new_elems.append(ent.Element('link', self.adapter, attrib={ 'href': item, 'rel': 'related'})) return ent.Element(container_type, self.adapter, children=new_elems) class EntryWrapper(Wrapper): """Base Wrapper for the Entry object types.""" # If it's an Entry, it must be a ROOT or CHILD has_metadata = True def __init__(self, entry, etag=None): self.entry = entry self._etag = etag @classmethod def getter(cls, adapter, entry_uuid=None, parent_class=None, parent_uuid=None, xag=None, parent=None): """Return EntryWrapperGetter or FeedGetter for this EntryWrapper type. Parameters are the same as described by EntryWrapperGetter.__init__ If entry_uuid is None, a FeedGetter is returned. Otherwise, an EntryWrapperGetter is returned. """ if entry_uuid is None: return FeedGetter( adapter, cls, parent=parent, parent_class=parent_class, parent_uuid=parent_uuid, xag=xag) else: return EntryWrapperGetter( adapter, cls, entry_uuid, parent=parent, parent_class=parent_class, parent_uuid=parent_uuid, xag=xag) @classmethod def _bld(cls, adapter, tag=None, has_metadata=None, ns=None, attrib=None): """Create a fresh EntryWrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param tag: XML tag for the EntryWrapper's Entry's root Element. :param has_metadata: If True, a basic child is created under the root element. :param ns: XML namespace for the contents. :param attrib: XML attributes for the root element. """ element = cls._bld_element( adapter, tag, has_metadata=has_metadata, ns=ns, attrib=attrib) return cls(ent.Entry({'title': element.tag}, element.element, adapter)) @classmethod def wrap(cls, response_or_entry, etag=None): """Creates an entry (or list) from an adapter.Response or Entry. If response is specified and is a feed, a list of EntryWrapper will be returned. The entries within the feed are not guaranteed to have etags (e.g. from non-uom elements). Otherwise, a single EntryWrapper will be returned. This is NOT a list. This method should usually be invoked from an EntryWrapper subclass decorated by Wrapper.pvm_type, and an instance of that subclass will be returned. If invoked directly from EntryWrapper, we attempt to detect whether an appropriate subclass exists based on the Entry's Element's tag. If so, that subclass is used; otherwise a generic EntryWrapper is used. :param response_or_entry: The Response from an adapter.Adapter.read request, or an existing adapter.Entry to wrap. :returns: A list of wrappers if response_or_entry is a Response with a Feed. A single wrapper if response_or_entry is an Entry or a Response with an Entry. """ # Process Response if specified. This recursively calls this method # with the entry(s) within the Response. if isinstance(response_or_entry, adpt.Response): if response_or_entry.entry is not None: return cls.wrap( response_or_entry.entry, etag=response_or_entry.etag) elif response_or_entry.feed is not None: return [cls.wrap(entry, etag=entry.etag) for entry in response_or_entry.feed.entries] else: raise KeyError(_("Response is missing 'entry' property.")) # Else process Entry if specified if isinstance(response_or_entry, ent.Entry): # If schema_type is set, cls represents a legal subclass - use it. # Otherwise, try to discover an appropriate subclass based on the # element. If that fails, it will default to the invoking class, # which will usually just be EntryWrapper. wcls = (cls._class_for_element(response_or_entry.element) if cls.schema_type is None else cls) return wcls(response_or_entry, etag) # response_or_entry is neither a Response nor an Entry fmt = _("Must supply a Response or Entry to wrap. Got %s") raise TypeError(fmt % str(type(response_or_entry))) def refresh(self, use_etag=True): """Fetch the latest version of the entry from the REST API server. If the entry has not been updated on the server, self is returned unchanged. Otherwise a new, fresh wrapper instance is returned. Generally, this should be used as: wrapper_instance = wrapper_instance.refresh() :param use_etag: (Optional) If False, the object's etag will not be sent with the request, ensuring that the object is retrieved afresh from the server. :return: EntryWrapper representing the latest data from the REST API server. If the input wrapper contains etag information and the server responds 304 (Not Modified), the original wrapper is returned. Otherwise, a fresh EntryWrapper of the appropriate type is returned. """ etag = self.etag if use_etag else None resp = self.adapter.read_by_href(self.href, etag=etag) if resp.status == pc.HTTPStatus.NO_CHANGE: return self return self.wrap(resp) @classmethod def get(cls, adapter, uuid=None, parent_type=None, parent_uuid=None, parent=None, **read_kwargs): """GET and wrap an entry or feed of this type. Shortcut to EntryWrapper.wrap(adapter.read(...)). For example, retrieving a ROOT object: resp = adapter.read(VIOS.schema_type, root_id=v_uuid, xag=xags) vwrap = VIOS.wrap(resp) Becomes: vwrap = VIOS.get(adapter, uuid=v_uuid, xag=xags) Or retrieving a CHILD feed: resp = adapter.read(System.schema_type, root_id=sys_uuid, child_type=VSwitch.schema_type) vswfeed = VSwitch.wrap(resp) Becomes: vswfeed = VSwitch.get(adapter, parent=sys) Or: vswfeed = VSwitch.get(adapter, parent_type=System, parent_uuid=sys_uuid) :param cls: A subclass of EntryWrapper. Its schema_type will be used as the first argument to adapter.read() :param adapter: The pypowervm.adapter.Adapter instance through which to perform the GET. :param uuid: If retrieving a single entry, specify its string UUID. For ROOT objects, you may specify either uuid or root_id; for CHILD objects, you may specify either uuid or child_id. :param parent_type: If the invoking class represents a CHILD, specify either the parent parameter or BOTH parent_type and parent_uuid. This parameter may be either the schema_type or the EntryWrapper subclass of the parent ROOT object. :param parent_uuid: If the invoking class represents a CHILD, specify either the parent parameter or BOTH parent_type and parent_uuid. This parameter indicates the UUID of the parent ROOT object. Do not use the root_id parameter. :param parent: If the invoking class represents a CHILD, specify either the parent parameter or BOTH parent_type and parent_uuid. This parameter is an EntryWrapper representing the parent ROOT object of the CHILD to be retrieved. :param read_kwargs: Any arguments to be passed directly through to Adapter.read(). :return: An EntryWrapper (or list thereof) around the requested REST object. (Note that this may not be of the type from which the method was invoked, e.g. if the child_type parameter is used.) """ parent_type, parent_uuid = util.parent_spec(parent, parent_type, parent_uuid) if parent_type is not None: # CHILD mode resp = cls._read_child(adapter, parent_type, parent_uuid, uuid, read_kwargs) else: # ROOT mode if any(k in read_kwargs for k in ('child_type', 'child_id')): raise ValueError(_("Developer error: specify 'parent' or " "('parent_type' and 'parent_uuid') to " "retrieve a CHILD object.")) if uuid is not None: if 'root_id' in read_kwargs: raise ValueError(_("Specify either 'uuid' or 'root_id' " "when requesting a ROOT object.")) read_kwargs['root_id'] = uuid resp = adapter.read(cls.schema_type, **read_kwargs) return cls.wrap(resp) @classmethod def _read_child(cls, adapter, parent_type, parent_uuid, uuid, read_kwargs): """Helper method for 'get' to read CHILD feed or entry Response. Params are as described in the 'get' method. """ if parent_uuid is None: raise ValueError(_("Both parent_type and parent_uuid are required " "when retrieving a CHILD feed or entry.")) if 'root_id' in read_kwargs: raise ValueError(_("Specify the parent's UUID via the parent_uuid " "parameter.")) if uuid is not None: if 'child_id' in read_kwargs: raise ValueError(_("Specify either 'uuid' or 'child_id' when " "requesting a CHILD object.")) read_kwargs['child_id'] = uuid # Accept parent_type as either EntryWrapper subclass or string if not isinstance(parent_type, str): parent_type = parent_type.schema_type return adapter.read(parent_type, root_id=parent_uuid, child_type=cls.schema_type, **read_kwargs) @classmethod def get_by_href(cls, adapter, href, **rbh_kwargs): """Get a wrapper or feed given a URI. This can be useful for retrieving wrappers "associated" with other wrappers, where the association is provided via an atom link. Some examples are TrunkAdapter.associated_vswitch_uri and VNICBackDev.vios_href. :param adapter: A pypowervm.adapter.Adapter instance for REST API communication. :param href: The string URI (including scheme://host:port/) of the entry or feed to retrieve. :param rbh_kwargs: Keyword arguments to be passed directly to Adapter's read_by_href method. :return: EntryWrapper subclass of the appropriate type, or a list thereof, representing the entry/feed associated with the href parameter. """ return cls.wrap(adapter.read_by_href(href, **rbh_kwargs)) @classmethod def search(cls, adapter, negate=False, xag=None, parent_type=None, parent_uuid=None, one_result=False, parent=None, **kwargs): """Performs a REST API search. Searches for object(s) of the type indicated by cls having (or not having) the key/value indicated by the (single) kwarg. Regular expressions, comparators, and logical operators are not supported. :param cls: A subclass of EntryWrapper. The wrapper class may define a search_keys member, which is a dictionary mapping a @property getter method name to a search key supported by the REST API for that object type. To retrieve an XML report of the supported search keys for object Foo, perform: read('Foo', suffix_type='search'). If the wrapper class does not define a search_keys member, OR if xag is None, the fallback search algorithm performs a GET of the entire feed of the object type and loops through it looking for (mis)matches on the @property indicated by the search key. :param adapter: The pypowervm.adapter.Adapter instance through which to perform the search. :param negate: If True, the search is negated - we find all objects of the indicated type where the search key does *not* equal the search value. :param xag: List of extended attribute group names. :param parent_type: If searching for CHILD objects, specify either the parent parameter or BOTH parent_type and parent_uuid. This parameter indicates the parent ROOT object. It may be either the string schema type or the corresponding EntryWrapper subclass. :param parent_uuid: If searching for CHILD objects, specify either the parent parameter or BOTH parent_type and parent_uuid. This parameter specifies the UUID of the parent ROOT object. If parent_type is specified, but parent_uuid is None, all parents of the ROOT type will be searched. This may result in a slow response time. :param one_result: Use when expecting (at most) one search result. If True, this method will return the first element of the search result list, or None if the search produced no results. :param parent: If searching for CHILD objects, specify either the parent parameter or BOTH parent_type and parent_uuid. This parameter is an EntryWrapper instance indicating the parent ROOT object. :param kwargs: Exactly one key=value. The key must correspond to a key in cls.search_keys and/or the name of a getter @property on the EntryWrapper subclass. Due to limitations of the REST API, if specifying xags or searching for a CHILD, the key must be the name of a getter @property. The value is the value to match. :return: If one_result=False (the default), a list of instances of the cls. The list may be empty (no results were found). It may contain more than one instance (e.g. for a negated search, or for one where the key does not represent a unique property of the object). If one_result=True, returns a single instance of cls, or None if the search produced no results. """ def list_or_single(results, single): """Returns either the results list or its first entry. :param results: The list of results from the search. May be empty. Must not be None. :param single: If False, return results unchanged. If True, return only the first entry in the results list, or None if results is empty. """ if not single: return results return results[0] if results else None try: parent_type, parent_uuid = util.parent_spec(parent, parent_type, parent_uuid) except ValueError: # Special case where we allow parent_type without parent_uuid. The # reverse is caught by the check below. if parent_type is not None and type(parent_type) is not str: parent_type = parent_type.schema_type # parent_uuid makes no sense without parent_type if parent_type is None and parent_uuid is not None: raise ValueError(_('Parent UUID specified without parent type.')) if len(kwargs) != 1: raise ValueError(_('The search() method requires exactly one ' 'key=value argument.')) key, val = kwargs.popitem() try: # search API does not support xag or CHILD if xag is not None or parent_type is not None: # Cheater's way to cause _search_by_feed to be invoked raise AttributeError() search_key = cls.search_keys[key] except (AttributeError, KeyError): # Fallback search by [GET feed] + loop return list_or_single( cls._search_by_feed(adapter, cls.schema_type, negate, key, val, xag, parent_type, parent_uuid), one_result) op = '!=' if negate else '==' quote = urllib.parse.quote if six.PY3 else urllib.quote search_parm = "(%s%s'%s')" % (search_key, op, quote(str(val), safe='')) # Let this throw HttpError if the caller got it wrong. # Note that this path will only be hit for ROOTs. return list_or_single( cls.wrap(cls._read_parent_or_child( adapter, cls.schema_type, parent_type, parent_uuid, suffix_type='search', suffix_parm=search_parm)), one_result) @classmethod def _search_by_feed(cls, adapter, target_type, negate, key, val, xag, parent_type, parent_uuid): if not hasattr(cls, key): raise ValueError(_("Wrapper class %(class)s does not support " "search key '%(key)s'.") % {'class': cls.__name__, 'key': key}) feedwrap = cls.wrap(cls._read_parent_or_child(adapter, target_type, parent_type, parent_uuid, xag=xag)) retlist = [] val = str(val) for entry in feedwrap: entval = str(getattr(entry, key, None)) include = (entval != val) if negate else (entval == val) if include: retlist.append(entry) return retlist @staticmethod def _read_parent_or_child(adapter, target_type, parent_type, parent_uuid, **kwargs): if parent_type is None: # ROOT feed search return adapter.read(target_type, **kwargs) if parent_uuid is not None: # CHILD of a specific ROOT return adapter.read(parent_type, root_id=parent_uuid, child_type=target_type, **kwargs) # Search all ROOTs of the specified type. ret = None # Wishing there was a quick URI to get all UUIDs. # Let EntryWrapper.wrap figure out the wrapper type. Whatever it # is, the uuid @property is available. for parent in EntryWrapper.wrap(adapter.read(parent_type)): resp = adapter.read( parent_type, root_id=parent.uuid, child_type=target_type, **kwargs) # This is a bit of a cheat. Technically extending the feed of # a Response doesn't result in a legal Response (the rest of # the metadata won't accurately reflect the feed). However # this is guaranteed only to be used immediately by wrap() to # extract the Entrys. if ret is None: ret = resp else: ret.feed.entries.extend(resp.feed.entries) return ret def create(self, parent_type=None, parent_uuid=None, timeout=-1, parent=None): """Performs an adapter.create (REST API PUT) with this wrapper. :param parent_type: If creating a CHILD, specify either the parent parameter or BOTH parent_type and parent_uuid. This parameter may be either the schema_type or the EntryWrapper subclass of the parent ROOT object. :param parent_uuid: If creating a CHILD, specify either the parent parameter or BOTH parent_type and parent_uuid. This parameter indicates the UUID of the parent ROOT object. :param timeout: (Optional) Integer number of seconds after which to time out the PUT request. -1, the default, causes the request to use the timeout value configured on the Session belonging to the Adapter. :param parent: If creating a CHILD, specify either the parent parameter or BOTH parent_type and parent_uuid. This parameter is an EntryWrapper representing the parent ROOT object of the CHILD to be created. :return: New EntryWrapper of the invoking class representing the PUT response. """ service = pc.SERVICE_BY_NS[self.schema_ns] parent_type, parent_uuid = util.parent_spec(parent, parent_type, parent_uuid) if parent_type is None and parent_uuid is None: # ROOT resp = self.adapter.create(self, self.schema_type, service=service, timeout=timeout) else: # CHILD resp = self.adapter.create( self, parent_type, root_id=parent_uuid, child_type=self.schema_type, service=service, timeout=timeout) return self.wrap(resp) def delete(self): """Performs an adapter.delete (REST API DELETE) with this wrapper.""" self.adapter.delete_by_href(self.href, etag=self.etag) # TODO(IBM): Remove deprecated xag parameter def update(self, xag='__DEPRECATED__', timeout=-1, force=False): """Performs adapter.update of this wrapper. :param xag: DEPRECATED - do not use. :param timeout: (Optional) Integer number of seconds after which to time out the POST request. -1, the default, causes the request to use the timeout value configured on the Session belonging to the Adapter. :param force: True if this is called as part of force resize. :return: The updated wrapper, per the response from the Adapter.update. """ if xag != '__DEPRECATED__': import warnings warnings.warn( _("The 'xag' parameter to EntryWrapper.update is deprecated! " "At best, using it will result in a no-op. At worst, it " "will give you incurable etag mismatch errors."), DeprecationWarning) if timeout == -1: # Override default timeout to 60 minutes unless the Session is # configured for longer already. timeout = max(60 * 60, self.adapter.session.timeout) # adapter.update_by_path expects the path (e.g. # '/rest/api/uom/Object/UUID'), not the whole href. path = util.dice_href(self.href, include_fragment=False) if force: path = adpt.Adapter.extend_path(path, add_qp=[('force', 'true')]) return self.wrap(self.adapter.update_by_path(self, self.etag, path, timeout=timeout)) @property def element(self): return self.entry.element @property def etag(self): return self._etag @property def href(self): """Finds the reference to the entity. Assumes that the entity has a link element that references self. If it does not, returns None. """ return self.entry.self_link @property def related_href(self): """Returns the URI to be used for references in other elements. This will return a root URI (no extended attributes, no fragments). This should be used as needed to support entries/elements that have relationships to others. """ temp_href = self.href return util.dice_href(temp_href, include_scheme_netloc=True, include_query=False, include_fragment=False) @property def _type_and_uuid(self): """Return the type and uuid of this entry together in one string. This is useful for error messages, logging, etc. """ entry_type = self.schema_type uuid = self.uuid if entry_type is None: entry_type = self.__class__.__name__ if uuid is None: uuid = "UnknownUUID" return entry_type + ":" + uuid class ElementWrapper(Wrapper): """Base wrapper for Elements.""" # If it's an Element, it's *probably* a DETAIL. (Redundant assignment, # but prefer to be explicit.) has_metadata = False @classmethod def _bld(cls, adapter, tag=None, has_metadata=None, ns=None, attrib=None): """Create a fresh ElementWrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param tag: XML tag for the ElementWrapper's root Element. :param has_metadata: If True, a basic child is created under the root element. :param ns: XML namespace for the contents. :param attrib: XML attributes for the root element. """ ret = cls() ret.element = cls._bld_element( adapter, tag=tag, has_metadata=has_metadata, ns=ns, attrib=attrib) return ret @classmethod def wrap(cls, element, **kwargs): """Wrap an existing adapter.Element OR construct a fresh one. This method should usually be invoked from an ElementWrapper subclass decorated by Wrapper.pvm_type, and an instance of that subclass will be returned. If invoked directly from ElementWrapper, we attempt to detect whether an appropriate subclass exists based on the Element's tag. If so, that subclass is used; otherwise a generic ElementWrapper is used. :param element: An existing adapter.Element to wrap. :param **kwargs: Arbitrary attributes to set on the new ElementWrapper. :returns: An ElementWrapper (subclass) instance containing the element. """ wcls = (cls._class_for_element(element) if cls.schema_type is None else cls) wrap = wcls() wrap.element = element for key, val in kwargs.items(): setattr(wrap, key, val) return wrap @property def _type_and_uuid(self): """Return the type of this element. This is useful for error messages, logging, etc. """ entry_type = self.schema_type if entry_type is None: entry_type = self.__class__.__name__ return entry_type def __eq__(self, other): """Tests equality.""" return self.element == other.element def __hash__(self): """Hash value. Necessary to be overwritten because of the side effect in Python 3.x of overwriting the __eq__ method causing an object to be unhashable. """ return super(ElementWrapper, self).__hash__() class WrapperElemList(list): """The wrappers can create complex Lists (from a Group from the response). The lists that they wrap tend to be generated on each 'get' from the property. This list allows for modification of the 'wrappers' that get returned, which update the backing elements. This is not a full implementation of a list. Only the 'common use' methods are supported Functions that are provided: - Getting via index (ex. list[1]) - Obtaining the length (ex. len(list)) - Extending the list (ex. list.extend(other_list)) - Appending to the list (ex. list.append(other_elem)) - Removing from the list (ex. list.remove(other_elem)) """ def __init__(self, root_elem, child_class=None, indirect=None, **kwargs): """Creates a new list backed by an Element anchor and child type. :param root_elem: The container element. Should be the backing element, not a wrapper. Ex. The element for 'SharedEthernetAdapters'. :param child_class: The child class (subclass of ElementWrapper). This is optional. If not specified, will wrap all children elements. :param indirect: Name of schema layer to ignore between root_elem and the target child_class. This is for schema structures such as: ... ... ... In this case, we want WrapperElemList to return [IOAdapter, SRIOVAdapter, ...] ...ignoring the intervening layer, so we would set indirect='IOAdapterChoice'. Note that we rely upon the intervening layer (in this example, IOAdapterChoice) to contain nothing but the target element type - not even . :param kwargs: Optional additional named arguments that may be passed into the wrapper on creation. """ self.root_elem = root_elem if child_class is not None: self.child_class = child_class else: # Default to the ElementWrapper, which should resolve to the # appropriate class type. self.child_class = ElementWrapper self.indirect = indirect self.injects = kwargs def __find_elems(self): root_elems = self.root_elem.findall( self.indirect) if self.indirect else [self.root_elem] found = [] for root_elem in root_elems: if (self.child_class is not None and self.child_class is not ElementWrapper): found.extend(root_elem.findall(self.child_class.schema_type)) else: found.extend(list(root_elem)) return found def __getitem__(self, idx): if isinstance(idx, slice): all_elems = self.__find_elems() all_elems = all_elems[idx.start:idx.stop:idx.step] return [self.child_class.wrap(x, **self.injects) for x in all_elems] elem = self.__find_elems()[idx] return self.child_class.wrap(elem, **self.injects) def index(self, value): elems = self.__find_elems() return elems.index(value.element) def __getslice__(self, i, j): elems = self.__find_elems() return [self.child_class.wrap(x, **self.injects) for x in elems[i:j]] def __len__(self, *args, **kwargs): return len(self.__find_elems()) def __iter__(self): elems = self.__find_elems() for elem in elems: yield self.child_class.wrap(elem, **self.injects) def __str__(self): return '[' + ', '.join([str(self.child_class.wrap( elem, **self.injects)) for elem in self.__find_elems()]) + ']' def __repr__(self): return '[' + ', '.join([repr(self.child_class.wrap( elem, **self.injects)) for elem in self.__find_elems()]) + ']' def __contains__(self, item): elems = self.__find_elems() return item.element in elems def extend(self, seq): for elem in seq: self.append(elem) def append(self, elem): self.root_elem.element.append( _indirect_child_elem(elem, self.indirect).element) def remove(self, elem): find_elem = _indirect_child_elem(elem, self.indirect) # Try this way first...if there is a value error, that means # that the identical element isn't here...need to try 'functionally # equivalent' -> slower... try: self.root_elem.remove(find_elem) return except ValueError: pass # Onto the slower path. Get children and see if any are equivalent children = list(self.root_elem) equiv = util.find_equivalent(find_elem, children) if equiv is None: raise ValueError(_('No such child element.')) self.root_elem.remove(equiv) class ActionableList(list): """Provides a List that will call back to a function on modification. Does not support lower level modifications (ex. list[5] = other_elem), but does support extend, append, remove, insert and pop. """ def __init__(self, list_data, action): """Creations the action list. :param list_data: The list data :param action: The action to call back to. Should take in a list as a parameter (this is then list post modification). """ super(ActionableList, self).__init__(list_data) self.action = action def extend(self, seq): super(ActionableList, self).extend(seq) self.action(self) def append(self, elem): super(ActionableList, self).append(elem) self.action(self) def remove(self, elem): super(ActionableList, self).remove(elem) self.action(self) def insert(self, index, obj): super(ActionableList, self).insert(index, obj) self.action(self) def pop(self, index=-1): elem = super(ActionableList, self).pop(index) self.action(self) return elem @six.add_metaclass(abc.ABCMeta) class WrapperSetUUIDMixin(object): """Abstract mixin to enable a Wrapper instance to set its UUID. USE WITH CAUTION. Caveats: This will only work on Wrappers with has_metadata=True. Not all elements accept a consumer-set UUID. Of those that do, some only accept it at creation, not on update. """ def set_uuid(self, new_uuid): """Set the UUID of the XML entity represented by this Wrapper. :param new_uuid: The UUID to set. Must valid uuid type or string properly formatted (e.g. 8-4-4-4-12) """ if not self.has_metadata: raise AttributeError( _('Cannot set UUID on Wrapper with no Metadata.')) # Step 1: sanitize uuid value s_uuid = str(new_uuid) if s_uuid != pvm_uuid.convert_uuid_to_pvm(s_uuid): raise ValueError(_('uuid value not valid: %s') % new_uuid) # Step 2: (vivify and) set Metadata/Atom/AtomID atom = self._find('Metadata/Atom') atomid = atom.find('AtomID') if atomid is None: atomid = ent.Element('AtomID', self.adapter) atom.append(atomid) atomid.text = s_uuid # Step 3: if an Atom, update the properties['id'] to match try: self.entry.properties['id'] = s_uuid except AttributeError: # No entry (this is an ElementWrapper) - nothing to do. # Note: we don't trap KeyError: if entry.properties is there, but # doesn't have an 'id' key, something is wrong. pass class EntryWrapperGetter(object): """Attribute container with enough information to GET an EntryWrapper. An instance of this class can be used to defer the REST call which fetches a PowerVM object. This will typically be used to initialize a pypowervm.utils.transaction.WrapperTask, or as the first parameter to a method decorated as pypowervm.utils.transaction.entry_transaction, allowing that method to acquire a lock before performing the GET, thus minimizing the probability of out-of-band changes resulting in etag mismatch and requiring a retry. """ def __init__(self, adapter, entry_class, entry_uuid, parent_class=None, parent_uuid=None, xag=None, parent=None): """Create a GET specification for an EntryWrapper. :param adapter: A pypowervm.adapter.Adapter instance through which the GET can be performed. :param entry_class: An EntryWrapper subclass indicating the type of the entry to GET. :param entry_uuid: The string UUID of the entry to GET. :param parent_class: If the target object type is CHILD, specify either the parent parameter or BOTH parent_class and parent_uuid. This param is the EntryWrapper subclass of the ROOT parent object type. :param parent_uuid: If the target object type is CHILD, specify either the parent parameter or BOTH parent_class and parent_uuid.this param is the UUID of the ROOT parent object. :param xag: List of extended attribute groups to request on the object. :param parent: If the target object type is CHILD, specify either the parent parameter or BOTH parent_class and parent_uuid. This parameter represents the ROOT parent object. """ def validate_wrapper_type(var): if not issubclass(type(var), type) or not issubclass(var, Wrapper): raise ValueError(_("Must specify a Wrapper subclass.")) self.adapter = adapter validate_wrapper_type(entry_class) self.entry_class = entry_class self.entry_uuid = entry_uuid parent_class, parent_uuid = util.parent_spec(parent, parent_class, parent_uuid) if (parent_class and not parent_uuid) or ( parent_uuid and not parent_class): raise ValueError(_("Must specify both parent class and parent " "UUID, or neither.")) self.parent_class = parent_class self.parent_uuid = parent_uuid self.xag = xag self.cache = None def get(self, refresh=False): """Return the EntryWrapper indicated by this instance. If the EntryWrapper has not yet been retrieved, it is fetched via GET from the REST API. Thereafter, it is cached. Subsequent calls to this method will return the cached copy unless refresh=True, in which case the cached copy is refreshed before returning. :param refresh: (Optional) If True, and the specified EntryWrapper was previously retrieved, it is refreshed before being returned. If False (the default), it is returned without refreshing. If the specified EntryWrapper had not yet been retrieved, this parameter has no effect. :return: The EntryWrapper specified by this EntryWrapperGetter instance. """ if self.cache is None: if self.parent_class: root_type = self.parent_class root_id = self.parent_uuid child_type = self.entry_class.schema_type child_id = self.entry_uuid else: root_type = self.entry_class.schema_type root_id = self.entry_uuid child_type = None child_id = None self.cache = self.entry_class.wrap(self.adapter.read( root_type, root_id, child_type=child_type, child_id=child_id, xag=self.xag)) elif refresh: self.cache = self.cache.refresh() return self.cache @property def uuid(self): """Return the UUID of the entry for which this spec was created. This mainly exists so we can ask for wrapper_or_spec.uuid. """ return self.entry_uuid class FeedGetter(EntryWrapperGetter): """Attribute container with enough information to GET an EntryWrapper feed. An instance of this class can be used to defer the REST call which fetches a feed of PowerVM objects (a list of EntryWrapper). This will typically be used to initialize a pypowervm.utils.transaction.FeedTask, allowing the FeedTask to defer the GET as long as possible, thus minimizing the probability of out-of-band changes resulting in etag mismatch and requiring a retry. """ def __init__(self, adapter, entry_class, parent_class=None, parent_uuid=None, xag=None, parent=None): """Create a GET specification for an EntryWrapper feed. :param adapter: A pypowervm.adapter.Adapter instance through which the GET can be performed. :param entry_class: An EntryWrapper subclass indicating the type of the feed to GET. :param parent_class: If the target object type is CHILD, specify either the parent parameter or BOTH parent_class and parent_uuid. This param is the EntryWrapper subclass of the ROOT parent object type. :param parent_uuid: If the target object type is CHILD, specify either the parent parameter or BOTH parent_class and parent_uuid.this param is the UUID of the ROOT parent object. :param xag: List of extended attribute groups to request on the feed. :param parent: If the target object type is CHILD, specify either the parent parameter or BOTH parent_class and parent_uuid. This parameter represents the ROOT parent object. """ # Using entry_uuid=None will cause the GET to fetch the feed. super(FeedGetter, self).__init__( adapter, entry_class, None, parent=parent, parent_class=parent_class, parent_uuid=parent_uuid, xag=xag) def get(self, refresh=False, refetch=False): """Return the feed (list of EntryWrappers) indicated by this instance. If the feed has not yet been retrieved, it is fetched via GET from the REST API. Thereafter, it is cached. Subsequent calls to this method will return the cached copy unless refresh or refetch is specified. The refresh option, if True, will cause each entry in the feed to be refreshed if previously cached. The refetch option, if True, will cause the feed to be refetched as a whole. Note: due to the design of the REST server, refetch will generally perform better than refresh. :param refresh: (Optional) If True, and the specified feed was previously retrieved, each entry therein is refreshed before the feed is returned. If the specified feed had not yet been retrieved, this parameter has no effect. If both refresh and refetch are True, refresh takes precedence. :param refetch: (Optional) If True, a fresh GET of the entire feed is performed, regardless of whether the feed was fetched and cached previously. If both refresh and refetch are True, refresh takes precedence. :return: The feed (list of EntryWrappers) specified by this FeedGetter instance. """ # Note: self.cache is the feed (list of EntryWrapper) in the context of # this subclass. Therefore, the superclass's concept of 'refresh' is # no good (it would be trying [ewrap, ...].refresh()). if refresh and self.cache is not None: # Future: parallelize, for what it's worth. new_feed = [ewrap.refresh() for ewrap in self.cache] self.cache = new_feed return self.cache # To refetch, simply wipe the cache before super.get(). if refetch: self.cache = None # Never, never call super.get(refresh=True). return super(FeedGetter, self).get(refresh=False) class UUIDFeedGetter(FeedGetter): """Quasi-FeedGetter that builds its "feed" based on a list of UUIDs. This is expected to be useful for building FeedTasks when, for example: - The FeedTask is operating on an SSP (the VIOSes aren't necessarily all in the same feed); - The operation is only concerned with one REST object, but a WrapperTask is not sufficient. """ def __init__(self, adapter, entry_class, uuid_list, parent_class=None, parent_uuid=None, xag=None, parent=None): """Create a UUIDFeedGetter. :param adapter: See FeedGetter. :param entry_class: See FeedGetter. :param uuid_list: Iterable of string UUIDs of the objects with which to populate the quasi-feed. :param parent_class: See FeedGetter. :param parent_uuid: See FeedGetter. :param xag: See FeedGetter. :param parent: See FeedGetter. """ super(UUIDFeedGetter, self).__init__( adapter, entry_class, parent=parent, parent_class=parent_class, parent_uuid=parent_uuid, xag=xag) self.uuid_list = uuid_list self._create_wrapper_getters() def _create_wrapper_getters(self): self.wrapper_getters = [EntryWrapperGetter( self.adapter, self.entry_class, entry_uuid, parent_class=self.parent_class, parent_uuid=self.parent_uuid, xag=self.xag) for entry_uuid in self.uuid_list] def get(self, refresh=False, refetch=False): """Get the individual wrappers for each UUID and put them in a 'feed'. :param refresh: See FeedGetter.get. :param refetch: See FeedGetter.get. """ if refetch: # Rebuild the wrapper getters, guaranteeing that we clear anything # already fetched self._create_wrapper_getters() # Populate the quasi-feed from the individual wrapper getters. # Future: parallelize, for what it's worth. return [wg.get(refresh=refresh) for wg in self.wrapper_getters] pypowervm-1.1.24/pypowervm/wrappers/job.py0000664000175000017500000003305413571367171020314 0ustar neoneo00000000000000# Copyright 2014, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """EntryWrapper, constants, and enums around Job ('web' namespace).""" import threading import time from oslo_config import cfg from oslo_log import log as logging import six import pypowervm.const as pc import pypowervm.entities as ent import pypowervm.exceptions as pvmex from pypowervm.i18n import _ import pypowervm.util as u import pypowervm.wrappers.entry_wrapper as ewrap LOG = logging.getLogger(__name__) CONF = cfg.CONF _JOBS = 'jobs' _REQ_OP = 'RequestedOperation' _JOB_GROUP_NAME = u.xpath(_REQ_OP, 'GroupName') _JOB_OPERATION_NAME = u.xpath(_REQ_OP, 'OperationName') _JOB_PARAM = u.xpath('Results', 'JobParameter') _JOB_RESULTS_NAME = u.xpath(_JOB_PARAM, 'ParameterName') _JOB_RESULTS_VALUE = u.xpath(_JOB_PARAM, 'ParameterValue') _RESPONSE_EXCEPTION = 'ResponseException' _JOB_MESSAGE = u.xpath(_RESPONSE_EXCEPTION, 'Message') _JOB_STACKTRACE = u.xpath(_RESPONSE_EXCEPTION, 'StackTrace') _JOB_STATUS = 'Status' _JOB_ID = 'JobID' class JobStatus(object): NOT_ACTIVE = 'NOT_STARTED' RUNNING = 'RUNNING' COMPLETED_OK = 'COMPLETED_OK' COMPLETED_WITH_WARNINGS = 'COMPLETED_WITH_WARNINGS' COMPLETED_WITH_ERROR = 'COMPLETED_WITH_ERROR' class PollAndDeleteThread(threading.Thread): def __init__(self, job, sensitive): super(PollAndDeleteThread, self).__init__() self.job = job self.sensitive = sensitive def run(self): self.job.poll_while_status([JobStatus.RUNNING], 0, self.sensitive) self.job.delete_job() # If the Job failed, we still want to log it. if self.job.job_status != JobStatus.COMPLETED_OK: exc = pvmex.JobRequestFailed( operation_name=self.job.op, error=self.job.get_job_message()) LOG.error(exc.args[0]) class CancelJobThread(threading.Thread): def __init__(self, job, sensitive): super(CancelJobThread, self).__init__() self.job = job self.sensitive = sensitive def run(self): self.job._monitor_job(timeout=0, sensitive=self.sensitive) self.job.delete_job() @ewrap.EntryWrapper.pvm_type('Job', ns=pc.WEB_NS) class Job(ewrap.EntryWrapper): """Wrapper object for job response schema.""" @classmethod def wrap(cls, response_or_entry, etag=None): wrap = super(Job, cls).wrap(response_or_entry, etag=etag) wrap.op = wrap._get_val_str(_JOB_OPERATION_NAME) return wrap @staticmethod def create_job_parameter(name, value, cdata=False): """Creates a JobParameter Element. :param name: ParameterName text value :param value: ParameterValue text value :param cdata: If True, the value text will be wrapped in CDATA tags :returns: JobParameter Element """ # JobParameter doesn't need adapter today adapter = None job_parm = ent.Element('JobParameter', adapter, attrib={'schemaVersion': 'V1_0'}, ns=pc.WEB_NS) job_parm.append(ent.Element('ParameterName', adapter, text=name, ns=pc.WEB_NS)) job_parm.append(ent.Element('ParameterValue', adapter, text=value, ns=pc.WEB_NS, cdata=cdata)) return job_parm def add_job_parameters_to_existing(self, *add_parms): """Adds JobParameter Elements to existing JobParameters xml. Must be a job response entry. :param add_parms: list of JobParamters to add """ job_parms = self.entry.element.find('JobParameters') for parm in add_parms: job_parms.append(parm) @property def job_id(self): """Gets the job ID string. :returns: String containing the job ID """ return self._get_val_str(_JOB_ID) @property def job_status(self): """Gets the job status string. :returns: String containing the job status """ return self._get_val_str(_JOB_STATUS) def get_job_resp_exception_msg(self, default=''): """Gets the job message string from the ResponseException. :returns: String containing the job message or default (defaults to empty string) if not found """ job_message = self._get_val_str(_JOB_MESSAGE, default) if job_message: # See if there is a stack trace to log stack_trace = self._get_val_str(_JOB_STACKTRACE, default) if stack_trace: LOG.error(pvmex.JobRequestFailed(operation_name=self.op, error=stack_trace)) return job_message def get_job_results_message(self, default=''): """Gets the job result message string. :returns: String containing the job result message or default (defaults to empty string) if not found """ message = default parm_names = self._get_vals(_JOB_RESULTS_NAME) parm_values = self._get_vals(_JOB_RESULTS_VALUE) for i in range(len(parm_names)): if parm_names[i] == 'result': message = parm_values[i] break return message def get_job_results_as_dict(self, default=None): """Gets the job results as a dictionary. :returns: Dictionary with result parm names and parm values as key, value pairs. """ results = default if default else {} parm_names = self._get_vals(_JOB_RESULTS_NAME) parm_values = self._get_vals(_JOB_RESULTS_VALUE) for i in range(len(parm_names)): results[parm_names[i]] = parm_values[i] return results def get_job_message(self, default=''): """Gets the job message string. It checks job results message first, if results message is not found, it checks for a ResponseException message. If neither is found, it returns the default. :returns: String containing the job message or default (defaults to empty string) if not found """ message = self.get_job_results_message(default=default) if not message: message = self.get_job_resp_exception_msg(default=default) return message def run_job(self, uuid, job_parms=None, timeout=CONF.pypowervm_job_request_timeout, sensitive=False, synchronous=True): """Invokes and polls a job. Adds job parameters to the job element if specified and calls the create_job method. It then monitors the job for completion and sends a JobRequestFailed exception if it did not complete successfully. :param uuid: uuid of the target :param job_parms: list of JobParamters to add :param timeout: maximum number of seconds for job to complete :param sensitive: If True, mask the Job payload in the logs. :param synchronous: If True (the default), wait for the Job to complete or time out. If False, return as soon as the Job starts. Note that this may still involve polling (if the Job is waiting in queue to start), and may still time out (if the Job hasn't started within the requested timeout.) :raise JobRequestFailed: if the job did not complete successfully. :raise JobRequestTimedOut: if the job timed out. """ if job_parms: self.add_job_parameters_to_existing(*job_parms) try: self.entry = self.adapter.create_job( self.entry.element, self._get_val_str(_JOB_GROUP_NAME), uuid, sensitive=sensitive).entry except pvmex.Error as exc: LOG.exception(exc) raise pvmex.JobRequestFailed(operation_name=self.op, error=exc) timed_out = self._monitor_job( timeout=timeout, sensitive=sensitive, synchronous=synchronous) if timed_out: try: self.cancel_job() except pvmex.JobRequestFailed as e: LOG.warning(six.text_type(e)) exc = pvmex.JobRequestTimedOut( operation_name=self.op, seconds=timeout) LOG.error(exc.args[0]) raise exc if not synchronous: # _monitor_job spawned a subthread that will delete_job when done. return self.delete_job() if self.job_status != JobStatus.COMPLETED_OK: exc = pvmex.JobRequestFailed( operation_name=self.op, error=self.get_job_message('')) LOG.error(exc.args[0]) raise exc def poll_while_status(self, statuses, timeout, sensitive): """Poll the Job as long as its status is in the specified list. :param statuses: Iterable of JobStatus enum values. This method continues to poll the Job as long as its status is in the specified list, or until the timeout is reached (whichever comes first). :param timeout: Maximum number of seconds to keep checking job status. If zero, poll indefinitely. :param sensitive: If True, mask the Job payload in the logs. :return: timed_out: True if the timeout was reached before the Job left the specified set of states. """ start_time = time.time() iteration_count = 1 while self.job_status in statuses: elapsed_time = time.time() - start_time if timeout: # wait up to timeout seconds if elapsed_time > timeout: return True # Log a warning every 5 minutes if not iteration_count % 300: msg = _("Job %(job_id)s monitoring for %(time)i seconds.") LOG.warning(msg, {'job_id': self.job_id, 'time': elapsed_time}) time.sleep(1) self.entry = self.adapter.read_job( self.job_id, sensitive=sensitive).entry iteration_count += 1 return False def _monitor_job(self, timeout=CONF.pypowervm_job_request_timeout, sensitive=False, synchronous=True): """Polls a job. Waits on a job until it is no longer running. If a timeout is given, it times out in the given amount of time. :param timeout: maximum number of seconds to keep checking job status :param sensitive: If True, mask the Job payload in the logs. :param synchronous: If True (the default), wait for the Job to complete or time out. If False, return as soon as the Job starts. Note that this may still involve polling (if the Job is waiting in queue to start), and may still time out (if the Job hasn't started within the requested timeout.) If synchronous=True, the caller must delete the Job (self.delete_job()); if False, this method spawns a subthread that deletes it when it finishes. :returns timed_out: boolean True if timed out waiting for job completion """ if synchronous: return self.poll_while_status( [JobStatus.RUNNING, JobStatus.NOT_ACTIVE], timeout, sensitive) # Asynchronous: wait for the Job to start, then spawn a thread to wait # (indefinitely) for it to finish, and delete it when done. if self.poll_while_status([JobStatus.NOT_ACTIVE], timeout, sensitive): return True PollAndDeleteThread(self, sensitive).start() return False def cancel_job(self, sensitive=False): """Cancels and deletes incomplete/running jobs. This method spawns a thread to monitor the job being cancelled and delete it. :param sensitive: If True, payload will be hidden in the logs """ job_id = self.job_id msg = _("Issuing cancel request for job %(job_id)s. Will poll the " "job indefinitely for termination.") LOG.warning(msg, {'job_id': job_id}) try: self.adapter.update(None, None, root_type=_JOBS, root_id=job_id, suffix_type='cancel') except pvmex.Error as exc: LOG.exception(exc) CancelJobThread(self, sensitive).start() def delete_job(self): """Cleans this Job off of the REST server, if it is completed. :raise JobRequestFailed: if the Job is detected to be running. """ if self.job_status == JobStatus.RUNNING: error = (_("Job %s not deleted. Job is in running state.") % self.job_id) LOG.error(error) raise pvmex.Error(error) try: self.adapter.delete(_JOBS, self.job_id) except pvmex.Error as exc: LOG.exception(exc) pypowervm-1.1.24/pypowervm/wrappers/event.py0000664000175000017500000001270213571367171020660 0ustar neoneo00000000000000# Copyright 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers and related artifacts around /rest/api/uom/Event. Events are produced semi-synchronously by the REST server. Event.get() will return right away if there are events waiting to be retrieved. Otherwise, it will block for up to ten seconds. If no events have been produced in that time, the request will return an empty feed. Using this mechanism, it is practical to poll Event.get in a hard loop with no sleeping on the client side. An Event has a type (Event.etype), a data field (Event.data), and a detail field (Event.detail). For *_URI types, the data field contains the REST URI of the object triggering the event, and the detail field may provide more granular information about the event. Events arrive in the order they are produced. Special event types CACHE_CLEARED and MISSING_EVENTS indicate that the client should refetch any objects of interest before processing subsequent events. Different clients may access the same Event feed at the same time. The REST server keeps track of which client has seen which events based on an application ID (the `appid` argument to the get method). Requests using a given application ID for repeated requests will receive a single stream of events (no duplicates). Two clients using different application IDs will each receive the same stream of events. It is possible to push a custom event to the server. This event will appear to all active listeners as a CUSTOM_CLIENT_EVENT. To use this mechanism, construct the event with Event.bld(), supplying any values desired in data and detail, and invoke .create() on the resulting Event wrapper. """ import pypowervm.wrappers.entry_wrapper as ewrap # Constants for Event _E_SCHEMA_TYPE = 'Event' _E_TYPE = 'EventType' _E_ID = 'EventID' _E_DATA = 'EventData' _E_DETAIL = 'EventDetail' _E_EL_ORDER = (_E_TYPE, _E_ID, _E_DATA, _E_DETAIL) class EventType(object): """Enumeration of event types (from EventType.Enum).""" INVALID_URI = 'INVALID_URI' CACHE_CLEARED = 'CACHE_CLEARED' MISSING_EVENTS = 'MISSING_EVENTS' ADD_URI = 'ADD_URI' MODIFY_URI = 'MODIFY_URI' DELETE_URI = 'DELETE_URI' NEW_CLIENT = 'NEW_CLIENT' HIDDEN_URI = 'HIDDEN_URI' VISIBLE_URI = 'VISIBLE_URI' CUSTOM_CLIENT_EVENT = 'CUSTOM_CLIENT_EVENT' @ewrap.EntryWrapper.pvm_type(_E_SCHEMA_TYPE, child_order=_E_EL_ORDER) class Event(ewrap.EntryWrapper): @classmethod def get(cls, adapter, appid): """Retrieve the latest Event feed for a given application ID. Note: This request may block for a finite amount of time (on the order of 10s) while the server is waiting for new events to occur. :param adapter: pypowervm.adapter.Adapter for REST API communication. :param appid: A hex string identifying the unique consumer. Consumers pulling Event feeds will see the same events duplicated in each request that uses a different appid. To see a single stream of unique events, a consumer should make repeated requests with the same appid. :return: Feed of Event EntryWrapper objects (may be empty). """ return super(Event, cls).get(adapter, xag=[], add_qp=[ ('QUEUE_CLIENTKEY_METHOD', 'USE_APPLICATIONID'), ('QUEUE_APPLICATIONID', appid)]) @classmethod def bld(cls, adapter, data, detail): """Construct a custom Event. Invoke .create() on the resulting Event to broadcast it to active listeners. :param adapter: pypowervm.adapter.Adapter for REST API communication. :param data: Any desired string to be included in the 'data' field of the Event. May be None. :param detail: Any desired string to be included in the 'detail' field of the Event. May be None. :return: An Event wrapper suitable for sending to the REST server via the .create() method. """ event = cls._bld(adapter) if data is not None: event.set_parm_value(_E_DATA, data) if detail is not None: event.set_parm_value(_E_DETAIL, detail) return event @property def etype(self): """The Event type, one of the EventType enum values.""" return self._get_val_str(_E_TYPE) @property def eid(self): """Unique sequence identifier of this Event.""" return self._get_val_str(_E_ID) @property def data(self): """Event data; for *_URI EventType, the URI of the affected object.""" return self._get_val_str(_E_DATA) @property def detail(self): """Custom Event detail; semantics dependent on type & data.""" return self._get_val_str(_E_DETAIL) def __str__(self): return "Event(id=%s, type=%s, data=%s, detail=%s)" % ( self.eid, self.etype, self.data, self.detail) pypowervm-1.1.24/pypowervm/wrappers/base_partition.py0000664000175000017500000014730613571367171022553 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes, enums, and constants shared by LPAR and VIOS EntryWrappers.""" from pypowervm import const from pypowervm.i18n import _ import pypowervm.util as u import pypowervm.wrappers.entry_wrapper as ewrap import pypowervm.wrappers.iocard as card # Base Partition (_BP) _BP_ALLOW_PERF_DATA_COLL = 'AllowPerformanceDataCollection' _BP_ASSOC_PROF = 'AssociatedPartitionProfile' _BP_AVAIL_PRIORITY = 'AvailabilityPriority' _BP_CURR_BSR_ARRAYS = 'CurrentAllocatedBarrierSynchronizationRegisterArrays' _BP_CURRENT_PROC_MODE = 'CurrentProcessorCompatibilityMode' _BP_PROFILE_SYNC = 'CurrentProfileSync' _BP_CURR_SECURE_BOOT = 'CurrentSecureBoot' _BP_HOSTNAME = 'Hostname' _BP_BOOTABLE = 'IsBootable' _BP_CALL_HOME = 'IsCallHomeEnabled' _BP_CONN_MONITORING = 'IsConnectionMonitoringEnabled' _BP_OP_IN_PROGRESS = 'IsOperationInProgress' _BP_REDUNDANT_ERR_PATH_REP = 'IsRedundantErrorPathReportingEnabled' _BP_TIME_REF = 'IsTimeReferencePartition' _BP_ATTN_LED = 'IsVirtualServiceAttentionLEDOn' _BP_TRUSTED_PLATFORM = 'IsVirtualTrustedPlatformModuleEnabled' _BP_KEYLOCK_POS = 'KeylockPosition' _BP_LOGICAL_SERIAL_NUM = 'LogicalSerialNumber' _BP_OS_VER = 'OperatingSystemVersion' _BP_CAPABILITIES = 'PartitionCapabilities' _BP_ID = 'PartitionID' _BP_IO_CFG = 'PartitionIOConfiguration' _BP_MEM_CFG = 'PartitionMemoryConfiguration' _BP_NAME = 'PartitionName' _BP_PROC_CFG = 'PartitionProcessorConfiguration' _BP_PROFS = 'PartitionProfiles' _BP_STATE = 'PartitionState' _BP_TYPE = 'PartitionType' _BP_UUID = 'PartitionUUID' _BP_PENDING_PROC_MODE = 'PendingProcessorCompatibilityMode' _BP_PENDING_SECURE_BOOT = 'PendingSecureBoot' _BP_PROC_POOL = 'ProcessorPool' _BP_PROG_DATA_REMAIN = 'ProgressPartitionDataRemaining' _BP_PROG_DATA_TOTAL = 'ProgressPartitionDataTotal' _BP_PROG_STATE = 'ProgressState' _BP_RMC_STATE = 'ResourceMonitoringControlState' _BP_RMC_IP = 'ResourceMonitoringIPAddress' _BP_VAL_INT_PERF = 'ValidInteractivePerformance' _BP_ASSOC_SYSTEM = 'AssociatedManagedSystem' _BP_SRIOV_ETH = 'SRIOVEthernetLogicalPorts' _BP_SRIOV_ROCE = 'SRIOVRoCELogicalPorts' _BP_SRIOV_FC_ETH = 'SRIOVFibreChannelOverEthernetLogicalPorts' _BP_CNAS = 'ClientNetworkAdapters' _BP_HOST_ETH = 'HostEthernetAdapterLogicalPorts' _BP_MAC_PREF = 'MACAddressPrefix' _BP_SVC_PARTITION = 'IsServicePartition' _BP_MGMT_CAP = 'PowerVMManagementCapable' _BP_REF_CODE = 'ReferenceCode' _BP_REF_CODE_FULL = 'ReferenceCodeFull' _BP_MGT_PARTITION = 'IsManagementPartition' _BP_AUTO_START = 'AutoStart' _BP_BOOT_MODE = 'BootMode' _BP_NVRAM = 'PartitionNVRAM' _BP_UPTIME = 'Uptime' _BP_DISABLE_SECURE_BOOT = 'DisableSecureBoot' _BP_ASSOC_GROUPS = 'AssociatedGroups' _BP_POWER_ON_WITH_HYP = 'PowerOnWithHypervisor' _BP_ASSOC_TASKS = 'AssociatedTasks' _BP_DESC = 'Description' BP_EL_ORDER = ( _BP_ALLOW_PERF_DATA_COLL, _BP_ASSOC_PROF, _BP_AVAIL_PRIORITY, _BP_CURR_BSR_ARRAYS, _BP_CURRENT_PROC_MODE, _BP_PROFILE_SYNC, _BP_HOSTNAME, _BP_BOOTABLE, _BP_CALL_HOME, _BP_CONN_MONITORING, _BP_OP_IN_PROGRESS, _BP_REDUNDANT_ERR_PATH_REP, _BP_TIME_REF, _BP_ATTN_LED, _BP_TRUSTED_PLATFORM, _BP_KEYLOCK_POS, _BP_LOGICAL_SERIAL_NUM, _BP_OS_VER, _BP_CAPABILITIES, _BP_ID, _BP_IO_CFG, _BP_MEM_CFG, _BP_NAME, _BP_PROC_CFG, _BP_PROFS, _BP_STATE, _BP_TYPE, _BP_UUID, _BP_PENDING_PROC_MODE, _BP_PROC_POOL, _BP_PROG_DATA_REMAIN, _BP_PROG_DATA_TOTAL, _BP_PROG_STATE, _BP_RMC_STATE, _BP_RMC_IP, _BP_VAL_INT_PERF, _BP_ASSOC_SYSTEM, _BP_SRIOV_ETH, _BP_SRIOV_ROCE, _BP_SRIOV_FC_ETH, _BP_CNAS, _BP_HOST_ETH, _BP_MAC_PREF, _BP_SVC_PARTITION, _BP_MGMT_CAP, _BP_REF_CODE, _BP_REF_CODE_FULL, _BP_MGT_PARTITION, _BP_AUTO_START, _BP_BOOT_MODE, _BP_NVRAM, _BP_UPTIME, _BP_DISABLE_SECURE_BOOT, _BP_PENDING_SECURE_BOOT, _BP_CURR_SECURE_BOOT, _BP_ASSOC_GROUPS, _BP_POWER_ON_WITH_HYP, _BP_ASSOC_TASKS, _BP_DESC ) # Partition Capabilities (_CAP) _CAP_DLPAR_IO_CAPABLE = 'DynamicLogicalPartitionIOCapable' _CAP_DLPAR_MEM_CAPABLE = 'DynamicLogicalPartitionMemoryCapable' _CAP_DLPAR_PROC_CAPABLE = 'DynamicLogicalPartitionProcessorCapable' _CAP_INTRUSION_DETECT_CAPABLE = 'InternalAndExternalIntrusionDetectionCapable' _CAP_RMC_OS_SHUTDOWN_CAPABLE = ('ResourceMonitoringControlOperatingSystem' 'ShutdownCapable') _CAP_EL_ORDER = (_CAP_DLPAR_IO_CAPABLE, _CAP_DLPAR_MEM_CAPABLE, _CAP_DLPAR_PROC_CAPABLE, _CAP_INTRUSION_DETECT_CAPABLE, _CAP_RMC_OS_SHUTDOWN_CAPABLE,) # Processor Configuration (_PC) _PC_DED_PROC_CFG = 'DedicatedProcessorConfiguration' _PC_HAS_DED_PROCS = 'HasDedicatedProcessors' _PC_SHR_PROC_CFG = 'SharedProcessorConfiguration' _PC_SHARING_MODE = 'SharingMode' _PC_CURR_HAS_DED_PROCS = 'CurrentHasDedicatedProcessors' _PC_CURR_SHARING_MODE = 'CurrentSharingMode' _PC_CURR_DED_PROC_CFG = 'CurrentDedicatedProcessorConfiguration' _PC_RUN_HAS_DED_PROCS = 'RuntimeHasDedicatedProcessors' _PC_RUN_SHARING_MODE = 'RuntimeSharingMode' _PC_CURR_SHR_PROC_CFG = 'CurrentSharedProcessorConfiguration' _PC_EL_ORDER = (_PC_DED_PROC_CFG, _PC_HAS_DED_PROCS, _PC_SHR_PROC_CFG, _PC_SHARING_MODE, _PC_CURR_HAS_DED_PROCS, _PC_CURR_SHARING_MODE, _PC_CURR_DED_PROC_CFG, _PC_RUN_HAS_DED_PROCS, _PC_RUN_SHARING_MODE, _PC_CURR_SHR_PROC_CFG) # Shared Processor Configuration (_SPC) _SPC_DES_PROC_UNIT = 'DesiredProcessingUnits' _SPC_DES_VIRT_PROC = 'DesiredVirtualProcessors' _SPC_MAX_PROC_UNIT = 'MaximumProcessingUnits' _SPC_MAX_VIRT_PROC = 'MaximumVirtualProcessors' _SPC_MIN_PROC_UNIT = 'MinimumProcessingUnits' _SPC_MIN_VIRT_PROC = 'MinimumVirtualProcessors' _SPC_SHARED_PROC_POOL_ID = 'SharedProcessorPoolID' _SPC_UNCAPPED_WEIGHT = 'UncappedWeight' _SPC_ALLOC_VIRT_PROC = 'AllocatedVirtualProcessors' _SPC_CURR_MAX_PROC_UNIT = 'CurrentMaximumProcessingUnits' _SPC_CURR_MIN_PROC_UNIT = 'CurrentMinimumProcessingUnits' _SPC_CURR_PROC_UNIT = 'CurrentProcessingUnits' _SPC_CURR_SHARED_PROC_POOL_ID = 'CurrentSharedProcessorPoolID' _SPC_CURR_UNCAPPED_WEIGHT = 'CurrentUncappedWeight' _SPC_CURR_MIN_VIRT_PROC = 'CurrentMinimumVirtualProcessors' _SPC_CURR_MAX_VIRT_PROC = 'CurrentMaximumVirtualProcessors' _SPC_RUN_PROC_UNIT = 'RuntimeProcessingUnits' _SPC_RUN_UNCAPPED_WEIGHT = 'RuntimeUncappedWeight' _SPC_EL_ORDER = (_SPC_DES_PROC_UNIT, _SPC_DES_VIRT_PROC, _SPC_MAX_PROC_UNIT, _SPC_MAX_VIRT_PROC, _SPC_MIN_PROC_UNIT, _SPC_MIN_VIRT_PROC, _SPC_SHARED_PROC_POOL_ID, _SPC_UNCAPPED_WEIGHT, _SPC_ALLOC_VIRT_PROC, _SPC_CURR_MAX_PROC_UNIT, _SPC_CURR_MIN_PROC_UNIT, _SPC_CURR_PROC_UNIT, _SPC_CURR_SHARED_PROC_POOL_ID, _SPC_CURR_UNCAPPED_WEIGHT, _SPC_CURR_MIN_VIRT_PROC, _SPC_CURR_MAX_VIRT_PROC, _SPC_RUN_PROC_UNIT, _SPC_RUN_UNCAPPED_WEIGHT) # Dedicated Processor Configuration (_DPC) _DPC_DES_PROCS = 'DesiredProcessors' _DPC_MAX_PROCS = 'MaximumProcessors' _DPC_MIN_PROCS = 'MinimumProcessors' # Partition Memory Configuration (_MEM) _MEM_PROF_AME_ENABLED = 'ActiveMemoryExpansionEnabled' _MEM_AMS_ENABLED = 'ActiveMemorySharingEnabled' _MEM_BSR_ARRAY_CT = 'BarrierSynchronizationRegisterArrayCount' _MEM_DES_ENT = 'DesiredEntitledMemory' _MEM_DES_HUGE_PAGE_CT = 'DesiredHugePageCount' _MEM_DES = 'DesiredMemory' _MEM_EXP_FACTOR = 'ExpansionFactor' _MEM_PPT_RATIO = 'PhysicalPageTableRatio' _MEM_HW_PG_TBL_RATIO = 'HardwarePageTableRatio' _MEM_MAN_ENT_MODE_ENABLED = 'ManualEntitledModeEnabled' _MEM_MAX_HUGE_PG_CT = 'MaximumHugePageCount' _MEM_MAX = 'MaximumMemory' _MEM_WT = 'MemoryWeight' _MEM_MIN_HUGE_PG_CT = 'MinimumHugePageCount' _MEM_MIN = 'MinimumMemory' _MEM_PRI_PGING_SVC_PART = 'PrimaryPagingServicePartition' _MEM_SEC_PGING_SVC_PART = 'SecondaryPagingServicePartition' _MEM_AUTO_ENT_MEM_ENABLED = 'AutoEntitledMemoryEnabled' _MEM_CURR_BSR_ARRAYS = 'CurrentBarrierSynchronizationRegisterArrays' _MEM_CURR_ENT = 'CurrentEntitledMemory' _MEM_CURR_EXP_FACT = 'CurrentExpansionFactor' _MEM_CURR_PPT_RATIO = 'CurrentPhysicalPageTableRatio' _MEM_CURR_HW_PG_TBL_RATIO = 'CurrentHardwarePageTableRatio' _MEM_CURR_HUGE_PG_CT = 'CurrentHugePageCount' _MEM_CURR_MAX_HUGE_PG_CT = 'CurrentMaximumHugePageCount' _MEM_CURR_MAX = 'CurrentMaximumMemory' _MEM_CUR = 'CurrentMemory' _MEM_CURR_MEM_WT = 'CurrentMemoryWeight' _MEM_CURR_MIN_HUGE_PG_CT = 'CurrentMinimumHugePageCount' _MEM_CURR_MIN = 'CurrentMinimumMemory' _MEM_CURR_PGING_SVC_PART = 'CurrentPagingServicePartition' _MEM_EXP_HW_ACC_ENABLED = 'MemoryExpansionHardwareAccessEnabled' _MEM_ENC_HW_ACC_ENABLED = 'MemoryEncryptionHardwareAccessEnabled' _MEM_AME_ENABLED = 'MemoryExpansionEnabled' _MEM_RELEASABLE = 'MemoryReleaseable' _MEM_TO_RELEASE = 'MemoryToRelease' _MEM_RED_ERR_PATH_REP_ENABLED = 'RedundantErrorPathReportingEnabled' _MEM_REQ_MIN_FOR_MAX = 'RequiredMinimumForMaximum' _MEM_RUNT_ENT = 'RuntimeEntitledMemory' _MEM_RUNT_EXP_FACT = 'RuntimeExpansionFactor' _MEM_RUNT_HUGE_PG_CT = 'RuntimeHugePageCount' _MEM_RUNT = 'RuntimeMemory' _MEM_RUNT_WT = 'RuntimeMemoryWeight' _MEM_RUNT_MIN = 'RuntimeMinimumMemory' _MEM_SHARED_MEM_ENABLED = 'SharedMemoryEnabled' _MEM_EL_ORDER = ( _MEM_PROF_AME_ENABLED, _MEM_AMS_ENABLED, _MEM_BSR_ARRAY_CT, _MEM_DES_ENT, _MEM_DES_HUGE_PAGE_CT, _MEM_DES, _MEM_EXP_FACTOR, _MEM_PPT_RATIO, _MEM_HW_PG_TBL_RATIO, _MEM_MAN_ENT_MODE_ENABLED, _MEM_MAX_HUGE_PG_CT, _MEM_MAX, _MEM_WT, _MEM_MIN_HUGE_PG_CT, _MEM_MIN, _MEM_PRI_PGING_SVC_PART, _MEM_SEC_PGING_SVC_PART, _MEM_AUTO_ENT_MEM_ENABLED, _MEM_CURR_BSR_ARRAYS, _MEM_CURR_ENT, _MEM_CURR_EXP_FACT, _MEM_CURR_PPT_RATIO, _MEM_CURR_HW_PG_TBL_RATIO, _MEM_CURR_HUGE_PG_CT, _MEM_CURR_MAX_HUGE_PG_CT, _MEM_CURR_MAX, _MEM_CUR, _MEM_CURR_MEM_WT, _MEM_CURR_MIN_HUGE_PG_CT, _MEM_CURR_MIN, _MEM_CURR_PGING_SVC_PART, _MEM_EXP_HW_ACC_ENABLED, _MEM_ENC_HW_ACC_ENABLED, _MEM_AME_ENABLED, _MEM_RELEASABLE, _MEM_TO_RELEASE, _MEM_RED_ERR_PATH_REP_ENABLED, _MEM_REQ_MIN_FOR_MAX, _MEM_RUNT_ENT, _MEM_RUNT_EXP_FACT, _MEM_RUNT_HUGE_PG_CT, _MEM_RUNT, _MEM_RUNT_WT, _MEM_RUNT_MIN, _MEM_SHARED_MEM_ENABLED) # Partition I/O Configuration (_IO) IO_CFG_ROOT = _BP_IO_CFG _IO_MAX_SLOTS = 'MaximumVirtualIOSlots' _IO_TIO = 'TaggedIO' # Tagged I/O (_TIO) _TIO_ALT_CONSOLE = 'AlternateConsole' _TIO_ALT_LOAD_SRC = 'AlternateLoadSource' _TIO_CONSOLE = 'Console' _TIO_LOAD_SRC = 'LoadSource' _TIO_OP_CONSOLE = 'OperationsConsole' _TIO_EL_ORDER = (_TIO_ALT_CONSOLE, _TIO_ALT_LOAD_SRC, _TIO_CONSOLE, _TIO_LOAD_SRC, _TIO_OP_CONSOLE) # Constants for the I/O Slot Configuration IO_SLOTS_ROOT = 'ProfileIOSlots' IO_SLOT_ROOT = 'ProfileIOSlot' _IO_SLOT_REQ = 'IsRequired' # Constants for the Associated I/O Slot ASSOC_IO_SLOT_ROOT = 'AssociatedIOSlot' _ASSOC_IO_SLOT_BUS_GRP = 'BusGroupingRequired' _ASSOC_IO_SLOT_DESC = 'Description' _ASSOC_IO_SLOT_FEAT_CODES = 'FeatureCodes' _ASSOC_IO_SLOT_PHYS_LOC = 'IOUnitPhysicalLocation' _ASSOC_IO_SLOT_ADPT_ID = 'PCAdapterID' _ASSOC_IO_SLOT_PCI_CLASS = 'PCIClass' _ASSOC_IO_SLOT_PCI_DEV_ID = 'PCIDeviceID' _ASSOC_IO_SLOT_PCI_SUBSYS_DEV_ID = 'PCISubsystemDeviceID' _ASSOC_IO_SLOT_PCI_MFG_ID = 'PCIManufacturerID' _ASSOC_IO_SLOT_PCI_REV_ID = 'PCIRevisionID' _ASSOC_IO_SLOT_PCI_VENDOR_ID = 'PCIVendorID' _ASSOC_IO_SLOT_SUBSYS_VENDOR_ID = 'PCISubsystemVendorID' _ASSOC_IO_SLOT_DRC_INDEX = 'SlotDynamicReconfigurationConnectorIndex' _ASSOC_IO_SLOT_DRC_NAME = 'SlotDynamicReconfigurationConnectorName' # Constants for generic I/O Adapter IO_ADPT_ROOT = 'IOAdapter' RELATED_IO_ADPT_ROOT = 'RelatedIOAdapter' _IO_SLOT_ORDER = (ASSOC_IO_SLOT_ROOT, _IO_SLOT_REQ) _AIO_ORDER = (_ASSOC_IO_SLOT_BUS_GRP, _ASSOC_IO_SLOT_DESC, _ASSOC_IO_SLOT_FEAT_CODES, _ASSOC_IO_SLOT_PHYS_LOC, _ASSOC_IO_SLOT_ADPT_ID, _ASSOC_IO_SLOT_PCI_CLASS, _ASSOC_IO_SLOT_PCI_DEV_ID, _ASSOC_IO_SLOT_PCI_SUBSYS_DEV_ID, _ASSOC_IO_SLOT_PCI_MFG_ID, _ASSOC_IO_SLOT_PCI_REV_ID, _ASSOC_IO_SLOT_PCI_VENDOR_ID, _ASSOC_IO_SLOT_SUBSYS_VENDOR_ID, RELATED_IO_ADPT_ROOT, _ASSOC_IO_SLOT_DRC_INDEX, _ASSOC_IO_SLOT_DRC_NAME) IO_PFC_ADPT_ROOT = 'PhysicalFibreChannelAdapter' _IO_ADPT_ID = 'AdapterID' _IO_ADPT_DESC = 'Description' _IO_ADPT_DEV_NAME = 'DeviceName' _IO_ADPT_DEV_TYPE = 'DeviceType' _IO_ADPT_DYN_NAME = 'DynamicReconfigurationConnectorName' _IO_ADPT_PHYS_LOC = 'PhysicalLocation' _IO_ADPT_UDID = 'UniqueDeviceID' PFC_PORT_WWPN = card.PFC_PORT_WWPN PFC_PORTS_ROOT = card.PFC_PORTS_ROOT PFC_PORT_ROOT = card.PFC_PORT_ROOT IOAdapter = card.IOAdapter PhysFCAdapter = card.PhysFCAdapter PhysFCPort = card.PhysFCPort class SharingMode(object): """Shared Processor sharing modes. Subset of LogicalPartitionProcessorSharingModeEnum. """ CAPPED = 'capped' UNCAPPED = 'uncapped' ALL_VALUES = (CAPPED, UNCAPPED) class DedicatedSharingMode(object): """Dedicated Processor sharing modes. Subset of LogicalPartitionProcessorSharingModeEnum. """ SHARE_IDLE_PROCS = 'sre idle proces' SHARE_IDLE_PROCS_ACTIVE = 'sre idle procs active' SHARE_IDLE_PROCS_ALWAYS = 'sre idle procs always' KEEP_IDLE_PROCS = 'keep idle procs' ALL_VALUES = (SHARE_IDLE_PROCS, SHARE_IDLE_PROCS_ACTIVE, SHARE_IDLE_PROCS_ALWAYS, KEEP_IDLE_PROCS) class LPARState(object): """State of a given LPAR. From LogicalPartitionStateEnum. """ ERROR = 'error' NOT_ACTIVATED = 'not activated' NOT_AVAILBLE = 'not available' OPEN_FIRMWARE = 'open firmware' RUNNING = 'running' SHUTTING_DOWN = 'shutting down' STARTING = 'starting' MIGRATING_NOT_ACTIVE = 'migrating not active' MIGRATING_RUNNING = 'migrating running' HARDWARE_DISCOVERY = 'hardware discovery' SUSPENDED = 'suspended' SUSPENDING = 'suspending' RESUMING = 'resuming' UNKNOWN = 'Unknown' class LPARType(object): """Subset of LogicalPartitionEnvironmentEnum.""" OS400 = 'OS400' AIXLINUX = 'AIX/Linux' VIOS = 'Virtual IO Server' class LPARCompat(object): """LPAR compatibility modes. From LogicalPartitionProcessorCompatibilityModeEnum. """ DEFAULT = 'default' POWER6 = 'POWER6' POWER6_PLUS = 'POWER6_Plus' POWER6_PLUS_ENHANCED = 'POWER6_Plus_Enhanced' POWER7 = 'POWER7' POWER8 = 'POWER8' POWER8_ENHANCED = 'POWER8_Enhanced' POWER9_BASE = 'POWER9_Base' POWER9 = 'POWER9' POWER9_ENHANCED = 'POWER9_Enhanced' ALL_VALUES = (DEFAULT, POWER6, POWER6_PLUS, POWER6_PLUS_ENHANCED, POWER7, POWER8, POWER8_ENHANCED, POWER9_BASE, POWER9, POWER9_ENHANCED) class RMCState(object): """Various RMC States. From ResourceMonitoringControlStateEnum. """ ACTIVE = 'active' INACTIVE = 'inactive' NONE = 'none' UNKNOWN = 'unknown' BUSY = 'busy' class BootMode(object): """Mirror of PartitionBootMode.Enum. Valid values for LPAR.bootmode/VIOS.bootmode. Not to be confused with pypowervm.tasks.power.BootMode. Example usage: lwrap.bootmode = BootMode.NORM lwrap.update() """ NORM = 'Normal' SMS = 'System_Management_Services' DD = 'Diagnostic_With_Default_Boot_List' DS = 'Diagnostic_With_Stored_Boot_List' OF = 'Open_Firmware' PBL = 'Petitboot' UNAVAILABLE = 'Unavailable' DEFAULT = 'Default' UNKNOWN = 'Unknown' ALL_VALUES = (NORM, SMS, DD, DS, OF, PBL, UNAVAILABLE, DEFAULT, UNKNOWN) class KeylockPos(object): """Mirror of KeylockPosition.Enum. Valid values for LPAR.keylock_pos/VIOS.keylock_pos. Not to be confused with pypowervm.tasks.power.KeylockPos. Example usage: lwrap.keylock_pos = KeylockPos.MANUAL lwrap.update() """ MANUAL = 'manual' NORMAL = 'normal' UNKNOWN = 'unknown' ALL_VALUES = (MANUAL, NORMAL, UNKNOWN) class _DlparCapable(object): def _can_modify(self, dlpar_cap, cap_desc): """Checks to determine if the partition can be modified. :param dlpar_cap: The appropriate DLPAR attribute to validate. Only used if system is active. :param cap_desc: A translated string indicating the DLPAR capability. :return capable: True if HW can be added/removed. False otherwise. :return reason: A translated message that will indicate why it was not capable of modification. If capable is True, the reason will be None. """ # First check is the not activated state if self.state == LPARState.NOT_ACTIVATED: return True, None if self.rmc_state != RMCState.ACTIVE and not self.is_mgmt_partition: return False, _('Partition does not have an active RMC ' 'connection.') if not dlpar_cap: return False, _('Partition does not have an active DLPAR ' 'capability for %s.') % cap_desc return True, None def can_modify_io(self): """Determines if a partition is capable of adding/removing I/O HW. :return capable: True if HW can be added/removed. False otherwise. :return reason: A translated message that will indicate why it was not capable of modification. If capable is True, the reason will be None. """ return self._can_modify(self.capabilities.io_dlpar, _('I/O')) def can_modify_mem(self): """Determines if a partition is capable of adding/removing Memory. :return capable: True if memory can be added/removed. False otherwise. :return reason: A translated message that will indicate why it was not capable of modification. If capable is True, the reason will be None. """ return self._can_modify(self.capabilities.mem_dlpar, _('Memory')) def can_modify_proc(self): """Determines if a partition is capable of adding/removing processors. :return capable: True if procs can be added/removed. False otherwise. :return reason: A translated message that will indicate why it was not capable of modification. If capable is True, the reason will be None. """ return self._can_modify(self.capabilities.proc_dlpar, _('Processors')) @ewrap.Wrapper.base_pvm_type class BasePartition(ewrap.EntryWrapper, _DlparCapable): """Base class for Logical Partition (LPAR) & Virtual I/O Server (VIOS). This corresponds to the abstract BasePartition object in the PowerVM schema. """ search_keys = dict(name=_BP_NAME, id=_BP_ID) @classmethod def _bld_base(cls, adapter, name, mem_cfg, proc_cfg, env, io_cfg=None): """Creates a BasePartition wrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param name: The name of the partition :param mem_cfg: The memory configuration wrapper :param proc_cfg: The processor configuration wrapper :param env: The type of partition, taken from LPARType :param io_cfg: The I/O configuration wrapper :returns: New BasePartition wrapper """ partition = super(BasePartition, cls)._bld(adapter) if io_cfg: partition.io_config = io_cfg partition.mem_config = mem_cfg partition.name = name partition.proc_config = proc_cfg partition._env(env) return partition @property def state(self): """See LPARState. e.g. 'not activated', 'running', 'migrating running', etc. """ return self._get_val_str(_BP_STATE) @property def name(self): """Short name (not ID, MTMS, or hostname).""" return self._get_val_str(_BP_NAME) @name.setter def name(self, val): self.set_parm_value(_BP_NAME, val) @property def id(self): """Short ID (not UUID).""" return self._get_val_int(_BP_ID) def _id(self, value): """Set ID (not UUID). Only settable on creation of the partition.""" self.set_parm_value(_BP_ID, int(value)) @property def env(self): """See the LPARType Enumeration. Should usually be 'AIX/Linux' for LPAR. 'Virtual IO Server' should only happen for VIOS. """ return self._get_val_str(_BP_TYPE) def _env(self, val): self.set_parm_value(_BP_TYPE, val) @property def partition_uuid(self): return self._get_val_str(_BP_UUID) @property def assoc_sys_uuid(self): """UUID of the associated ManagedSystem.""" href = self.get_href(_BP_ASSOC_SYSTEM, one_result=True) return u.get_req_path_uuid(href, preserve_case=True) if href else None @property def rmc_state(self): """See RMCState. e.g. 'active', 'inactive', 'busy', etc. """ return self._get_val_str(_BP_RMC_STATE) @property def rmc_ip(self): """IP address used for RMC communication, as a string.""" return self._get_val_str(_BP_RMC_IP) @property def operating_system(self): """String representing the OS and version, or 'Unknown'.""" return self._get_val_str(_BP_OS_VER, 'Unknown') @property def ref_code(self): return self._get_val_str(_BP_REF_CODE) @property def ref_code_full(self): return self._get_val_str(_BP_REF_CODE_FULL) @property def avail_priority(self): return self._get_val_int(_BP_AVAIL_PRIORITY, 0) @avail_priority.setter def avail_priority(self, value): self.set_parm_value(_BP_AVAIL_PRIORITY, value) @property def profile_sync(self): return self._get_val_str(_BP_PROFILE_SYNC, default='Off') == 'On' @profile_sync.setter def profile_sync(self, value): if type(value) == bool: value = 'On' if value else 'Off' self.set_parm_value(_BP_PROFILE_SYNC, value) @property def proc_compat_mode(self): """*Current* processor compatibility mode. See LPARCompat. E.g. 'POWER7', 'POWER7_Plus', 'POWER8', etc. """ return self._get_val_str(_BP_CURRENT_PROC_MODE) @property def pending_proc_compat_mode(self): """Pending processor compatibility mode. See LPARCompat. E.g. 'POWER7', 'POWER7_Plus', 'POWER8', etc. """ return self._get_val_str(_BP_PENDING_PROC_MODE) @proc_compat_mode.setter def proc_compat_mode(self, value): """Sets *PENDING* proc compat mode. Note that corresponding getter retrieves the *CURRENT* proc compat mode. """ self.set_parm_value(_BP_PENDING_PROC_MODE, value) @property def is_mgmt_partition(self): """Is this the management partition? Default False if field absent.""" return self._get_val_bool(_BP_MGT_PARTITION) @property def is_service_partition(self): """Is this the service partition? Default False if field absent.""" return self._get_val_bool(_BP_SVC_PARTITION) @is_service_partition.setter def is_service_partition(self, value): """Set if this is the service partition.""" self.set_parm_value(_BP_SVC_PARTITION, u.sanitize_bool_for_api(value)) @property def keylock_pos(self): """Keylock position - see KeylockPos enumeration.""" return self._get_val_str(_BP_KEYLOCK_POS) @keylock_pos.setter def keylock_pos(self, value): """Keylock position - see KeylockPos enumeration.""" if value not in KeylockPos.ALL_VALUES: raise ValueError(_("Invalid KeylockPos '%s'.") % value) self.set_parm_value(_BP_KEYLOCK_POS, value) @property def bootmode(self): """Boot mode - one of the BootMode enum values.""" return self._get_val_str(_BP_BOOT_MODE) @bootmode.setter def bootmode(self, val): if val not in BootMode.ALL_VALUES: raise ValueError(_("Invalid BootMode '%s'.") % val) self.set_parm_value(_BP_BOOT_MODE, val) @property def disable_secure_boot(self): return self._get_val_bool(_BP_DISABLE_SECURE_BOOT) @disable_secure_boot.setter def disable_secure_boot(self, value): self.set_parm_value( _BP_DISABLE_SECURE_BOOT, u.sanitize_bool_for_api(value), attrib=const.ATTR_KSV150) @property def pending_secure_boot(self): """The Pending Secure Boot Policy Value This parameter can be set while the LPAR is running. It will be the policy used when the LPAR is next booted. """ return self._get_val_int(_BP_PENDING_SECURE_BOOT, default=0) @pending_secure_boot.setter def pending_secure_boot(self, value): self.set_parm_value( _BP_PENDING_SECURE_BOOT, value, attrib=const.ATTR_KSV180) @property def current_secure_boot(self): """The Secure Boot Policy Value The secure boot value will determine what level of enforcement will take place when the LPAR is booted. The following are the values and their interpretations: * 0: Secure boot disabled * 1: Secure boot enabled (log only) * 2: Secure boot enabled and enforced * 3-9: Secure boot enabled and enforced. Firmware or OS may take additional measures. """ return self._get_val_int(_BP_CURR_SECURE_BOOT, default=0) @property def allow_perf_data_collection(self): return self._get_val_bool(_BP_ALLOW_PERF_DATA_COLL) @allow_perf_data_collection.setter def allow_perf_data_collection(self, value): self.set_parm_value(_BP_ALLOW_PERF_DATA_COLL, u.sanitize_bool_for_api(value)) @property def capabilities(self): elem = self._find(_BP_CAPABILITIES) return PartitionCapabilities.wrap(elem) @property def io_config(self): """The Partition I/O Configuration.""" elem = self._find(_BP_IO_CFG) return PartitionIOConfiguration.wrap(elem) @io_config.setter def io_config(self, io_cfg): """The Partition I/O Configuration for the LPAR.""" elem = self._find_or_seed(_BP_IO_CFG) # TODO(efried): All instances of _find_or_seed + element.replace should # probably be inject instead self.element.replace(elem, io_cfg.element) @property def mem_config(self): """The Partition Memory Configuration for the LPAR.""" elem = self._find(_BP_MEM_CFG) return PartitionMemoryConfiguration.wrap(elem) @mem_config.setter def mem_config(self, mem_cfg): """The Partition Memory Configuration for the LPAR.""" elem = self._find_or_seed(_BP_MEM_CFG) self.element.replace(elem, mem_cfg.element) @property def proc_config(self): """The Partition Processor Configuration for the LPAR.""" elem = self._find(_BP_PROC_CFG) return PartitionProcessorConfiguration.wrap(elem) @proc_config.setter def proc_config(self, proc_config): """The Partition Processor Configuration for the LPAR.""" elem = self._find_or_seed(_BP_PROC_CFG) self.element.replace(elem, proc_config.element) @ewrap.Wrapper.xag_property(const.XAG.NVRAM) def nvram(self): return self._get_val_str(_BP_NVRAM) @nvram.setter def nvram(self, nvram): self.set_parm_value(_BP_NVRAM, nvram, attrib=u.xag_attrs(const.XAG.NVRAM, base=const.ATTR_KSV130)) @property def uptime(self): """Integer time since partition boot, in seconds.""" return self._get_val_int(_BP_UPTIME) @ewrap.ElementWrapper.pvm_type(_BP_CAPABILITIES, has_metadata=True, child_order=_CAP_EL_ORDER) class PartitionCapabilities(ewrap.ElementWrapper): """See LogicalPartitionCapabilities.""" @property def io_dlpar(self): return self._get_val_bool(_CAP_DLPAR_IO_CAPABLE) @property def mem_dlpar(self): return self._get_val_bool(_CAP_DLPAR_MEM_CAPABLE) @property def proc_dlpar(self): return self._get_val_bool(_CAP_DLPAR_PROC_CAPABLE) @ewrap.ElementWrapper.pvm_type(_BP_PROC_CFG, has_metadata=True, child_order=_PC_EL_ORDER) class PartitionProcessorConfiguration(ewrap.ElementWrapper): """Represents the partitions Processor Configuration. Comprised of either the shared or dedicated processor config. """ @classmethod def bld_shared(cls, adapter, proc_unit, proc, sharing_mode=SharingMode.UNCAPPED, uncapped_weight=128, min_proc_unit=None, max_proc_unit=None, min_proc=None, max_proc=None, proc_pool=0): """Builds a Shared Processor configuration wrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param proc_unit: Amount of desired proc units (float) :param proc: Number of virtual processors (int) :param sharing_mode: Sharing mode of the processors (uncapped) :param uncapped_weight: Uncapped weight of the processors (0-255) :param min_proc_unit: Minimum proc units, default to proc unit value :param max_proc_unit: Maximum proc units, default to proc unit value :param min_proc: Minimum processors, default to proc value :param max_proc: Maximum processors, default to proc value :param proc_pool: The shared processor pool for the lpar, defaults to 0 :returns: Processor Config with shared processors """ proc_cfg = super(PartitionProcessorConfiguration, cls)._bld(adapter) proc_cfg._has_dedicated(False) sproc = SharedProcessorConfiguration.bld( adapter, proc_unit, proc, uncapped_weight=uncapped_weight, min_proc_unit=min_proc_unit, max_proc_unit=max_proc_unit, min_proc=min_proc, max_proc=max_proc, proc_pool=proc_pool) proc_cfg._shared_proc_cfg(sproc) proc_cfg.sharing_mode = sharing_mode return proc_cfg @classmethod def bld_dedicated(cls, adapter, proc, min_proc=None, max_proc=None, sharing_mode=DedicatedSharingMode.SHARE_IDLE_PROCS): """Builds a Dedicated Processor configuration wrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param proc: Number of virtual processors (int) :param min_proc: Minimum processors, default to proc value :param max_proc: Maximum processors, default to proc value :param sharing_mode: Sharing mode of the processors, 'sre idle proces' :returns: Processor Config with dedicated processors """ proc_cfg = super(PartitionProcessorConfiguration, cls)._bld(adapter) dproc = DedicatedProcessorConfiguration.bld( adapter, proc, min_proc=min_proc, max_proc=max_proc) proc_cfg._dedicated_proc_cfg(dproc) proc_cfg._has_dedicated(True) proc_cfg.sharing_mode = sharing_mode return proc_cfg @property def has_dedicated(self): """Returns boolean True if dedicated, False if shared or not found.""" return self._get_val_bool(_PC_HAS_DED_PROCS) def _has_dedicated(self, val): """Expects 'true' (string) for dedicated or 'false' for shared.""" self.set_parm_value(_PC_HAS_DED_PROCS, u.sanitize_bool_for_api(val)) @property def sharing_mode(self): return self._get_val_str(_PC_SHARING_MODE) @sharing_mode.setter def sharing_mode(self, value): self.set_parm_value(_PC_SHARING_MODE, value) @property def shared_proc_cfg(self): """Returns the Shared Processor Configuration.""" return SharedProcessorConfiguration.wrap( self._find(_PC_SHR_PROC_CFG)) def _shared_proc_cfg(self, spc): elem = self._find_or_seed(_PC_SHR_PROC_CFG) self.element.replace(elem, spc.element) @property def dedicated_proc_cfg(self): """Returns the Dedicated Processor Configuration.""" return DedicatedProcessorConfiguration.wrap( self._find(_PC_DED_PROC_CFG)) def _dedicated_proc_cfg(self, dpc): elem = self._find_or_seed(_PC_DED_PROC_CFG) self.element.replace(elem, dpc.element) @ewrap.ElementWrapper.pvm_type(_BP_MEM_CFG, has_metadata=True, child_order=_MEM_EL_ORDER) class PartitionMemoryConfiguration(ewrap.ElementWrapper): """Represents the partitions Memory Configuration.""" @classmethod def bld(cls, adapter, mem, min_mem=None, max_mem=None): """Creates the ParitionMemoryConfiguration. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param mem: The amount of memory for the partition in MB :param min_mem: The minimum amount of memory in MB. Defaults to the mem param :param max_mem: The maximum amount of memory in MB. Defaults to the mem param :returns: The memory configuration wrapper. """ if min_mem is None: min_mem = mem if max_mem is None: max_mem = mem cfg = super(PartitionMemoryConfiguration, cls)._bld(adapter) cfg.desired = mem cfg.max = max_mem cfg.min = min_mem return cfg @property def current(self): return self._get_val_int(_MEM_CUR) @property def desired(self): return self._get_val_int(_MEM_DES) @desired.setter def desired(self, mem): self.set_parm_value(_MEM_DES, str(mem)) @property def max(self): return self._get_val_int(_MEM_MAX) @max.setter def max(self, mem): self.set_parm_value(_MEM_MAX, str(mem)) @property def min(self): return self._get_val_int(_MEM_MIN) @min.setter def min(self, mem): self.set_parm_value(_MEM_MIN, str(mem)) @property def shared_enabled(self): # The default is None instead of False so that the caller # can know if the value is not set return self._get_val_bool(_MEM_SHARED_MEM_ENABLED, None) @property def ame_enabled(self): return self._get_val_bool(_MEM_AME_ENABLED) @property def exp_factor(self): """The Active Memory Expansion Factor The expansion factor represents the target memory multiplier. e.g. An LPAR with EF = 2 which has 4 GB of memory will have a target expansion memory of 8 GB. """ return self._get_val_float(_MEM_EXP_FACTOR, default=0) @exp_factor.setter def exp_factor(self, exp_factor): """The Active Memory Expansion Factor :param exp_factor: The expansion factor value. Setting this to 0 will turn/keep AME off. The valid values are 1.0 <= x <= 10.0 up to 2 decimal places. """ self.set_parm_value(_MEM_EXP_FACTOR, u.sanitize_float_for_api(exp_factor)) @property def ppt_ratio(self): """The Physical Page Table Ratio The physical page table ratio represents the ratio of a VM's maximum memory to the size of its physical page table. The PPT is used by the platform to maintain the translation of the VM's physical to virtual memory addresses during mobility operations (i.e. LPM). The ppt_ratio is represented as 1:2^N where accepted values for N range from 6 to 12. If the ratio is 1:4096 for a VM with 64 GB of maximum memory, it would have a PPT of 16 MB (64 GB / 4096 = 16 MB). """ return self._get_val_int(_MEM_PPT_RATIO) @ppt_ratio.setter def ppt_ratio(self, ppt_ratio): """The Physical Page Table Ratio :param ppt_ratio: The ppt ratio value. """ self.set_parm_value(_MEM_PPT_RATIO, ppt_ratio) @ewrap.ElementWrapper.pvm_type(_PC_SHR_PROC_CFG, has_metadata=True, child_order=_SPC_EL_ORDER) class SharedProcessorConfiguration(ewrap.ElementWrapper): """Represents the partition's Shared Processor Configuration.""" @classmethod def bld(cls, adapter, proc_unit, proc, uncapped_weight=None, min_proc_unit=None, max_proc_unit=None, min_proc=None, max_proc=None, proc_pool=0): """Builds a Shared Processor configuration wrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param proc_unit: Amount of desired proc units (float) :param proc: Number of virtual processors (int) :param uncapped_weight: Uncapped weight of the processors, 0-255 :param min_proc_unit: Minimum proc units, default to proc unit value :param max_proc_unit: Maximum proc units, default to proc unit value :param min_proc: Minimum processors, default to proc value :param max_proc: Maximum processors, default to proc value :param proc_pool: The shared processor pool for the lpar, defaults to 0 :returns: Processor Config with shared processors """ # Set defaults if not specified if min_proc_unit is None: min_proc_unit = proc_unit if max_proc_unit is None: max_proc_unit = proc_unit if min_proc is None: min_proc = proc if max_proc is None: max_proc = proc sproc = super(SharedProcessorConfiguration, cls)._bld(adapter) sproc.desired_units = proc_unit sproc.desired_virtual = proc sproc.max_units = max_proc_unit sproc.max_virtual = max_proc sproc.min_units = min_proc_unit sproc.min_virtual = min_proc sproc.pool_id = proc_pool if uncapped_weight is not None: sproc.uncapped_weight = uncapped_weight return sproc @property def desired_units(self): return self._get_val_float(_SPC_DES_PROC_UNIT) @desired_units.setter def desired_units(self, val): self.set_parm_value(_SPC_DES_PROC_UNIT, u.sanitize_float_for_api(val)) @property def max_units(self): return self._get_val_float(_SPC_MAX_PROC_UNIT) @max_units.setter def max_units(self, val): self.set_parm_value(_SPC_MAX_PROC_UNIT, u.sanitize_float_for_api(val)) @property def min_units(self): return self._get_val_float(_SPC_MIN_PROC_UNIT) @min_units.setter def min_units(self, val): self.set_parm_value(_SPC_MIN_PROC_UNIT, u.sanitize_float_for_api(val)) @property def desired_virtual(self): return self._get_val_int(_SPC_DES_VIRT_PROC) @desired_virtual.setter def desired_virtual(self, val): self.set_parm_value(_SPC_DES_VIRT_PROC, val) @property def max_virtual(self): return self._get_val_int(_SPC_MAX_VIRT_PROC) @max_virtual.setter def max_virtual(self, val): self.set_parm_value(_SPC_MAX_VIRT_PROC, val) @property def min_virtual(self): return self._get_val_int(_SPC_MIN_VIRT_PROC) @min_virtual.setter def min_virtual(self, val): self.set_parm_value(_SPC_MIN_VIRT_PROC, val) @property def pool_id(self): return self._get_val_int(_SPC_SHARED_PROC_POOL_ID, 0) @pool_id.setter def pool_id(self, val): self.set_parm_value(_SPC_SHARED_PROC_POOL_ID, val) @property def uncapped_weight(self): return self._get_val_int(_SPC_UNCAPPED_WEIGHT, 0) @uncapped_weight.setter def uncapped_weight(self, val): self.set_parm_value(_SPC_UNCAPPED_WEIGHT, val) @ewrap.ElementWrapper.pvm_type(_PC_DED_PROC_CFG, has_metadata=True) class DedicatedProcessorConfiguration(ewrap.ElementWrapper): """Represents the partition's Dedicated Processor Configuration.""" @classmethod def bld(cls, adapter, proc, min_proc=None, max_proc=None): """Builds a Dedicated Processor configuration wrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param proc: Number of virtual processors (int) :param min_proc: Minimum processors, default to proc value :param max_proc: Maximum processors, default to proc value :returns: Processor Config with dedicated processors """ # Set defaults if not specified if min_proc is None: min_proc = proc if max_proc is None: max_proc = proc dproc = super(DedicatedProcessorConfiguration, cls)._bld(adapter) dproc.desired = proc dproc.max = max_proc dproc.min = min_proc return dproc @property def desired(self): return self._get_val_int(_DPC_DES_PROCS, 0) @desired.setter def desired(self, value): self.set_parm_value(_DPC_DES_PROCS, value) @property def max(self): return self._get_val_int(_DPC_MAX_PROCS, 0) @max.setter def max(self, value): self.set_parm_value(_DPC_MAX_PROCS, value) @property def min(self): return self._get_val_int(_DPC_MIN_PROCS, 0) @min.setter def min(self, value): self.set_parm_value(_DPC_MIN_PROCS, value) @ewrap.ElementWrapper.pvm_type('PartitionIOConfiguration', has_metadata=True) class PartitionIOConfiguration(ewrap.ElementWrapper): """Represents the partitions Dedicated IO Configuration. Comprised of I/O Slots. There are two types of IO slots. Those dedicated to physical hardware (io_slots) and those that get used by virtual hardware. """ @classmethod def bld(cls, adapter, max_virt_slots, io_slots=None): """Builds a Partition IO configuration wrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param max_virt_slots: Number of virtual slots (int) :param io_slots: I/O slots to assign to the LPAR (list of IOSlot). :returns: Partition IO configuration wrapper """ cfg = super(PartitionIOConfiguration, cls)._bld(adapter) cfg.max_virtual_slots = max_virt_slots if io_slots is not None: cfg.io_slots = io_slots return cfg @property def max_virtual_slots(self): """The maximum number of virtual slots. Slots are used for every VirtualScsiServerAdapter, TrunkAdapter, etc... """ return self._get_val_int(_IO_MAX_SLOTS) @max_virtual_slots.setter def max_virtual_slots(self, value): self.set_parm_value(_IO_MAX_SLOTS, value) @property def io_slots(self): """The physical I/O Slots. Each slot will have hardware associated with it. """ es = ewrap.WrapperElemList(self._find_or_seed(IO_SLOTS_ROOT), IOSlot) return es @io_slots.setter def io_slots(self, val): self.replace_list(IO_SLOTS_ROOT, val) @property def tagged_io(self): """IBMi only - tagged I/O attributes of the I/O configuration.""" tio = self._find(_IO_TIO) return TaggedIO.wrap(tio) if tio else None @tagged_io.setter def tagged_io(self, tio): self.inject(tio.element) @ewrap.ElementWrapper.pvm_type('TaggedIO', has_metadata=True, child_order=_TIO_EL_ORDER) class TaggedIO(ewrap.ElementWrapper): """IBMi only - tagged I/O attributes of the I/O configuration.""" @classmethod def bld(cls, adapter, load_src='0', console='HMC', alt_load_src='NONE'): """Builds a Partition TaggedIO wrapper. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param load_src: Load source to use :param console: Console to use for IBMi :param alt_load_src: Alternate load source to use :returns: Partition TaggedIO wrapper """ cfg = super(TaggedIO, cls)._bld(adapter) cfg.load_src = load_src cfg.console = console cfg.alt_load_src = alt_load_src return cfg @property def alt_load_src(self): """Value may or may not be an integer - always returned as string.""" return self._get_val_str(_TIO_ALT_LOAD_SRC) @alt_load_src.setter def alt_load_src(self, value): self.set_parm_value(_TIO_ALT_LOAD_SRC, value) @property def console(self): """Value may or may not be an integer - always returned as string.""" return self._get_val_str(_TIO_CONSOLE) @console.setter def console(self, value): self.set_parm_value(_TIO_CONSOLE, value) @property def load_src(self): """Value may or may not be an integer - always returned as string.""" return self._get_val_str(_TIO_LOAD_SRC) @load_src.setter def load_src(self, value): self.set_parm_value(_TIO_LOAD_SRC, value) @ewrap.ElementWrapper.pvm_type('ProfileIOSlot', has_metadata=True, child_order=_IO_SLOT_ORDER) class IOSlot(ewrap.ElementWrapper): """An I/O Slot represents a device bus on the system. It may contain a piece of hardware within it. """ @classmethod def bld(cls, adapter, bus_grp_required, drc_index, required=False): """Build a new IOSlot wrapper with all required parameters. :returns: A new IOSlot wrapper. """ new_slot = super(IOSlot, cls)._bld(adapter) new_slot.required = required # Build out the AssociatedIOSlot assoc_io_slot = cls.AssociatedIOSlot._bld_new(adapter, bus_grp_required, drc_index) # Inject the AssociatedIOSlot into this wrapper new_slot.inject(assoc_io_slot.element) return new_slot @ewrap.ElementWrapper.pvm_type(ASSOC_IO_SLOT_ROOT, has_metadata=True, child_order=_AIO_ORDER) class AssociatedIOSlot(ewrap.ElementWrapper): """Internal class. Hides the nested AssociatedIOSlot from parent. Every ProfileIOSlot contains one AssociatedIOSlot. If both are exposed at the API level, the user would have to go from: - lpar -> partition i/o config -> i/o slot -> associated i/o slot -> i/o data Since every i/o slot has a single Associated I/O Slot (unless said I/O slot has no associated I/O), then we can just hide this from the user. We still keep the structure internally, but makes the API easier to consume. """ @classmethod def _bld_new(cls, adapter, bus_grp_required, drc_index): """Build a new AssociatedIOSlot wrapper. This will not typically be called outside of the IOSlot.bld class method. :returns: A new AssociatedIOSlot wrapper. """ new_slot = super(IOSlot.AssociatedIOSlot, cls)._bld(adapter) new_slot._bus_grp_required(bus_grp_required) new_slot._drc_index(drc_index) return new_slot @property def bus_grp_required(self): return self._get_val_bool(_ASSOC_IO_SLOT_BUS_GRP) def _bus_grp_required(self, val): self.set_parm_value(_ASSOC_IO_SLOT_BUS_GRP, u.sanitize_bool_for_api(val)) @property def description(self): return self._get_val_str(_ASSOC_IO_SLOT_DESC) @property def phys_loc(self): return self._get_val_str(_ASSOC_IO_SLOT_PHYS_LOC) @property def pc_adpt_id(self): return self._get_val_int(_ASSOC_IO_SLOT_ADPT_ID) @property def pci_class(self): return self._get_val_int(_ASSOC_IO_SLOT_PCI_CLASS) @property def pci_dev_id(self): return self._get_val_int(_ASSOC_IO_SLOT_PCI_DEV_ID) @property def pci_subsys_dev_id(self): return self._get_val_int(_ASSOC_IO_SLOT_PCI_SUBSYS_DEV_ID) @property def pci_mfg_id(self): return self._get_val_int(_ASSOC_IO_SLOT_PCI_MFG_ID) @property def pci_rev_id(self): return self._get_val_int(_ASSOC_IO_SLOT_PCI_REV_ID) @property def pci_vendor_id(self): return self._get_val_int(_ASSOC_IO_SLOT_PCI_VENDOR_ID) @property def pci_subsys_vendor_id(self): return self._get_val_int(_ASSOC_IO_SLOT_SUBSYS_VENDOR_ID) @property def drc_index(self): return self._get_val_int(_ASSOC_IO_SLOT_DRC_INDEX) def _drc_index(self, val): self.set_parm_value(_ASSOC_IO_SLOT_DRC_INDEX, val) @property def drc_name(self): return self._get_val_str(_ASSOC_IO_SLOT_DRC_NAME) @property def io_adapter(self): """Jumps over the 'Related IO Adapter' element direct to the I/O. This is another area where the schema has a two step jump that the API can avoid. This method skips over the RelatedIOAdapter and jumps right to the IO Adapter. Return values are either the generic IOAdapter or the PhysFCAdapter. """ # The child can be either an IO Adapter or a PhysFCAdapter. # Need to check for both... io_adpt_root = self._find( u.xpath(RELATED_IO_ADPT_ROOT, IOAdapter.schema_type)) if io_adpt_root is not None: return IOAdapter.wrap(io_adpt_root) # Didn't have the generic...check for non-generic. io_adpt_root = self._find( u.xpath(RELATED_IO_ADPT_ROOT, PhysFCAdapter.schema_type)) if io_adpt_root is not None: return PhysFCAdapter.wrap(io_adpt_root) return None @property def required(self): return self._get_val_bool(_IO_SLOT_REQ) @required.setter def required(self, val): self.set_parm_value(_IO_SLOT_REQ, u.sanitize_bool_for_api(val)) def __get_prop(self, func): """Thin wrapper to get the Associated I/O Slot and get a property.""" elem = self._find(ASSOC_IO_SLOT_ROOT) if elem is None: return None # Build the Associated IO Slot, find the function and execute it. assoc_io_slot = self.AssociatedIOSlot.wrap(elem) return getattr(assoc_io_slot, func) @property def bus_grp_required(self): return self.__get_prop('bus_grp_required') @property def description(self): return self.__get_prop('description') @property def phys_loc(self): return self.__get_prop('phys_loc') @property def pc_adpt_id(self): return self.__get_prop('pc_adpt_id') @property def pci_class(self): return self.__get_prop('pci_class') @property def pci_dev_id(self): return self.__get_prop('pci_dev_id') @property def pci_subsys_dev_id(self): return self.__get_prop('pci_subsys_dev_id') @property def pci_mfg_id(self): return self.__get_prop('pci_mfg_id') @property def pci_rev_id(self): return self.__get_prop('pci_rev_id') @property def pci_vendor_id(self): return self.__get_prop('pci_vendor_id') @property def pci_subsys_vendor_id(self): return self.__get_prop('pci_subsys_vendor_id') @property def drc_index(self): return self.__get_prop('drc_index') @property def drc_name(self): return self.__get_prop('drc_name') @property def adapter(self): """DEPRECATED - use 'io_adapter' method instead.""" import warnings warnings.warn( _("IOSlot.adapter is deprecated! Use IOSlot.io_adapter instead."), DeprecationWarning) return self.io_adapter @property def io_adapter(self): """Returns the physical I/O Adapter for this slot. This will be one of two types. Either a generic I/O Adapter or a Physical Fibre Channel Adapter (PhysFCAdapter). """ return self.__get_prop('io_adapter') pypowervm-1.1.24/pypowervm/wrappers/shared_proc_pool.py0000664000175000017500000000603113571367171023057 0ustar neoneo00000000000000# Copyright 2014, 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SharedProcPool, the EntryWrapper for SharedProcessorPool.""" from oslo_log import log as logging import pypowervm.util as u import pypowervm.wrappers.entry_wrapper as ewrap LOG = logging.getLogger(__name__) DEFAULT_POOL_DISPLAY_NAME = 'DefaultPool' # Shared Processor Pool Constants _POOL_ID = 'PoolID' _CURR_RSRV_PROC_UNITS = 'CurrentReservedProcessingUnits' _ASSIGNED_PARTITIONS = 'AssignedPartitions' _MAX_PROC_UNITS = 'MaximumProcessingUnits' _PEND_RSRV_PROC_UNITS = 'PendingReservedProcessingUnits' _AVAL_PROC_UNITS = 'AvailableProcUnits' _POOL_NAME = 'PoolName' _SHARED_EL_ORDER = (_ASSIGNED_PARTITIONS, _CURR_RSRV_PROC_UNITS, _MAX_PROC_UNITS, _PEND_RSRV_PROC_UNITS, _POOL_ID, _AVAL_PROC_UNITS, _POOL_NAME) @ewrap.EntryWrapper.pvm_type('SharedProcessorPool', child_order=_SHARED_EL_ORDER) class SharedProcPool(ewrap.EntryWrapper): @property def id(self): """Integer shared processor pool ID.""" return self._get_val_int(_POOL_ID, default=0) @property def curr_rsrv_proc_units(self): """Floating point number of reserved processing units.""" return self._get_val_float(_CURR_RSRV_PROC_UNITS, 0) @property def is_default(self): """If true, is the default processor pool.""" return self.id == 0 @property def name(self): """The name of the processor pool.""" return self._get_val_str(_POOL_NAME) @name.setter def name(self, value): self.set_parm_value(_POOL_NAME, value) @property def max_proc_units(self): """Floating point number of the max processing units.""" return self._get_val_float(_MAX_PROC_UNITS, 0) @max_proc_units.setter def max_proc_units(self, value): self.set_parm_value(_MAX_PROC_UNITS, u.sanitize_float_for_api(value)) @property def pend_rsrv_proc_units(self): """Floating point number of pending reserved proc units.""" return self._get_val_float(_PEND_RSRV_PROC_UNITS, 0) @pend_rsrv_proc_units.setter def pend_rsrv_proc_units(self, value): self.set_parm_value(_PEND_RSRV_PROC_UNITS, u.sanitize_float_for_api(value)) @property def avail_proc_units(self): """Returns the available proc units in the pool. If the default pool, will return 0. """ return self._get_val_float(_AVAL_PROC_UNITS, 0) pypowervm-1.1.24/pypowervm/wrappers/iocard.py0000664000175000017500000013105113571367171020777 0ustar neoneo00000000000000# Copyright 2016, 2019 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Wrappers, constants, and helpers around IOAdapter and its children.""" import pypowervm.const as pc import pypowervm.util as u import pypowervm.wrappers.entry_wrapper as ewrap # Constants for generic I/O Adapter IO_ADPT_ROOT = 'IOAdapter' _IO_ADPT_ID = 'AdapterID' _IO_ADPT_DESC = 'Description' _IO_ADPT_DEV_NAME = 'DeviceName' _IO_ADPT_DEV_TYPE = 'DeviceType' _IO_ADPT_DYN_NAME = 'DynamicReconfigurationConnectorName' _IO_ADPT_PHYS_LOC = 'PhysicalLocation' _IO_ADPT_UDID = 'UniqueDeviceID' _IO_ADPT_CHOICE = 'IOAdapterChoice' # SR-IOV Adapter constants _SRIOV_ADAPTER_ID = 'SRIOVAdapterID' _SRIOV_ADAPTER_MODE = 'AdapterMode' _SRIOV_ADAPTER_PERSONALITY = 'Personality' _SRIOV_ADAPTER_STATE = 'AdapterState' _SRIOV_CONVERGED_ETHERNET_PHYSICAL_PORTS = 'ConvergedEthernetPhysicalPorts' _SRIOV_ETHERNET_PHYSICAL_PORTS = 'EthernetPhysicalPorts' _ROCE_SRIOV_PHYSICAL_PORTS = 'SRIOVRoCEPhysicalPorts' # SR-IOV physical port constants _SRIOVPP_CFG_SPEED = 'ConfiguredConnectionSpeed' _SRIOVPP_CFG_FLOWCTL = 'ConfiguredFlowControl' _SRIOVPP_CFG_MTU = 'ConfiguredMTU' _SRIOVPP_CFG_OPTIONS = 'ConfiguredOptions' _SRIOVPP_CFG_SWMODE = 'ConfiguredPortSwitchMode' _SRIOVPP_CURR_SPEED = 'CurrentConnectionSpeed' _SRIOVPP_CURR_OPTIONS = 'CurrentOptions' _SRIOVPP_LBL = 'Label' _SRIOVPP_LOC_CODE = 'LocationCode' _SRIOVPP_MAX_DIAG_LPS = 'MaximumDiagnosticsLogicalPorts' _SRIOVPP_MAX_PROM_LPS = 'MaximumPromiscuousLogicalPorts' _SRIOVPP_ID = 'PhysicalPortID' _SRIOVPP_CAPABILITIES = 'PortCapabilities' _SRIOVPP_TYPE = 'PortType' _SRIOVPP_LP_LIMIT = 'PortLogicalPortLimit' _SRIOVPP_SUBLBL = 'SubLabel' _SRIOVPP_SUPP_SPEEDS = 'SupportedConnectionSpeeds' _SRIOVPP_SUPP_MTUS = 'SupportedMTUs' _SRIOVPP_SUPP_OPTIONS = 'SupportedOptions' _SRIOVPP_SUPP_PRI_ACL = 'SupportedPriorityAccessControlList' _SRIOVPP_LINK_STATUS = 'LinkStatus' _SRIOVPP_DEF_PORTVF_CFG = 'DefaultPortVFConfigurationOption' _SRIOVPP_SEL_PORTVF_CFG = 'SelectedPortVFConfigurationOption' _SRIOVPP_SUPP_PORTVF_CFG = 'SupportedPortVFConfigurationOptions' _SRIOVPP_ALLOC_CAPACITY = 'AllocatedCapacity' _SRIOVPP_CFG_MAX_ETHERNET_LPS = 'ConfiguredMaxEthernetLogicalPorts' _SRIOVPP_CFG_ETHERNET_LPS = 'ConfiguredEthernetLogicalPorts' _SRIOVPP_MAX_PVID = 'MaximumPortVLANID' _SRIOVPP_MAX_VLAN_ID = 'MaximumVLANID' _SRIOVPP_MIN_ETHERNET_CAPACITY_GRAN = 'MinimumEthernetCapacityGranularity' _SRIOVPP_MIN_PVID = 'MinimumPortVLANID' _SRIOVPP_MIN_VLAN_ID = 'MinimumVLANID' _SRIOVPP_MAX_SUPP_ETHERNET_LPS = 'MaxSupportedEthernetLogicalPorts' _SRIOVPP_MAX_ALLOW_ETH_VLANS = 'MaximumAllowedEthVLANs' _SRIOVPP_MAX_ALLOW_ETH_MACS = 'MaximumAllowedEthMACs' _SRIOVPP_SUPP_VLAN_RESTR = 'SupportedVLANRestrictions' _SRIOVPP_SUPP_MAC_RESTR = 'SupportedMACRestrictions' _SRIOVPP_CFG_MX_FCOE_LPS = 'ConfiguredMaxFiberChannelOverEthernetLogicalPorts' _SRIOVPP_DEF_FCTARG_BACK_DEV = 'DefaultFiberChannelTargetsForBackingDevice' _SRIOVPP_DEF_FTARG_NBACK_DEV = 'DefaultFiberChannelTargetsForNonBackingDevice' _SRIOVPP_CFG_FCOE_LPS = 'ConfiguredFiberChannelOverEthernetLogicalPorts' _SRIOVPP_MIN_FCOE_CAPACITY_GRAN = 'MinimumFCoECapacityGranularity' _SRIOVPP_FC_TARGET_ROUNDING_VALUE = 'FiberChannelTargetsRoundingValue' _SRIOVPP_MX_SUPP_FCOE_LPS = 'MaxSupportedFiberChannelOverEthernetLogicalPorts' _SRIOVPP_MAX_FC_TARGETS = 'MaximumFiberChannelTargets' _SRIOVPP_EL_ORDER = ( _SRIOVPP_CFG_SPEED, _SRIOVPP_CFG_FLOWCTL, _SRIOVPP_CFG_MTU, _SRIOVPP_CFG_OPTIONS, _SRIOVPP_CFG_SWMODE, _SRIOVPP_CURR_SPEED, _SRIOVPP_CURR_OPTIONS, _SRIOVPP_LBL, _SRIOVPP_LOC_CODE, _SRIOVPP_MAX_DIAG_LPS, _SRIOVPP_MAX_PROM_LPS, _SRIOVPP_ID, _SRIOVPP_CAPABILITIES, _SRIOVPP_TYPE, _SRIOVPP_LP_LIMIT, _SRIOVPP_SUBLBL, _SRIOVPP_SUPP_SPEEDS, _SRIOVPP_SUPP_MTUS, _SRIOVPP_SUPP_OPTIONS, _SRIOVPP_SUPP_PRI_ACL, _SRIOVPP_LINK_STATUS, _SRIOVPP_DEF_PORTVF_CFG, _SRIOVPP_SEL_PORTVF_CFG, _SRIOVPP_SUPP_PORTVF_CFG) _SRIOVEPP_EL_ORDER = _SRIOVPP_EL_ORDER + ( _SRIOVPP_ALLOC_CAPACITY, _SRIOVPP_CFG_MAX_ETHERNET_LPS, _SRIOVPP_CFG_ETHERNET_LPS, _SRIOVPP_MAX_PVID, _SRIOVPP_MAX_VLAN_ID, _SRIOVPP_MIN_ETHERNET_CAPACITY_GRAN, _SRIOVPP_MIN_PVID, _SRIOVPP_MIN_VLAN_ID, _SRIOVPP_MAX_SUPP_ETHERNET_LPS, _SRIOVPP_MAX_ALLOW_ETH_VLANS, _SRIOVPP_MAX_ALLOW_ETH_MACS, _SRIOVPP_SUPP_VLAN_RESTR, _SRIOVPP_SUPP_MAC_RESTR) _SRIOVCPP_EL_ORDER = _SRIOVEPP_EL_ORDER + ( _SRIOVPP_CFG_MX_FCOE_LPS, _SRIOVPP_DEF_FCTARG_BACK_DEV, _SRIOVPP_DEF_FTARG_NBACK_DEV, _SRIOVPP_CFG_FCOE_LPS, _SRIOVPP_MIN_FCOE_CAPACITY_GRAN, _SRIOVPP_FC_TARGET_ROUNDING_VALUE, _SRIOVPP_MX_SUPP_FCOE_LPS, _SRIOVPP_MAX_FC_TARGETS) # SR-IOV logical port constants _SRIOVLP_CFG_ID = 'ConfigurationID' _SRIOVLP_ID = 'LogicalPortID' _SRIOVLP_ADPT_ID = 'AdapterID' _SRIOVLP_DRC_NAME = 'DynamicReconfigurationConnectorName' _SRIOVLP_IS_FUNC = 'IsFunctional' _SRIOVLP_IS_PROMISC = 'IsPromiscous' # [sic] _SRIOVLP_IS_DIAG = 'IsDiagnostic' _SRIOVLP_IS_DEBUG = 'IsDebug' _SRIOVLP_IS_HUGE_DMA = 'IsHugeDMA' _SRIOVLP_DEV_NAME = 'DeviceName' _SRIOVLP_CFG_CAPACITY = 'ConfiguredCapacity' _SRIOVLP_CFG_MAX_CAPACITY = 'ConfiguredMaxCapacity' _SRIOVLP_PPORT_ID = 'PhysicalPortID' _SRIOVLP_PVID = 'PortVLANID' _SRIOVLP_LOC_CODE = 'LocationCode' _SRIOVLP_TUNING_BUF_ID = 'TuningBufferID' _SRIOVLP_VNIC_PORT_USAGE = 'VNICPortUsage' _SRIOVLP_ASSOC_LPARS = 'AssociatedLogicalPartitions' _SRIOVLP_ALLOWED_MACS = 'AllowedMACAddresses' _SRIOVLP_MAC = 'MACAddress' _SRIOVLP_CUR_MAC = 'CurrentMACAddress' _SRIOVLP_8021Q_ALLOW_PRI = 'IEEE8021QAllowablePriorities' _SRIOVLP_8021Q_PRI = 'IEEE8021QPriority' _SRIOVLP_MAC_FLAGS = 'MACAddressFlags' _SRIOVLP_NUM_ALLOWED_VLANS = 'NumberOfAllowedVLANs' _SRIOVLP_ALLOWED_VLANS = 'AllowedVLANs' _SRIOVLP_EL_ORDER = ( _SRIOVLP_CFG_ID, _SRIOVLP_ID, _SRIOVLP_ADPT_ID, _SRIOVLP_DRC_NAME, _SRIOVLP_IS_FUNC, _SRIOVLP_IS_PROMISC, _SRIOVLP_IS_DIAG, _SRIOVLP_IS_DEBUG, _SRIOVLP_IS_HUGE_DMA, _SRIOVLP_DEV_NAME, _SRIOVLP_CFG_CAPACITY, _SRIOVLP_CFG_MAX_CAPACITY, _SRIOVLP_PPORT_ID, _SRIOVLP_PVID, _SRIOVLP_LOC_CODE, _SRIOVLP_TUNING_BUF_ID, _SRIOVLP_VNIC_PORT_USAGE, _SRIOVLP_ASSOC_LPARS, _SRIOVLP_ALLOWED_MACS, _SRIOVLP_MAC, _SRIOVLP_CUR_MAC, _SRIOVLP_8021Q_ALLOW_PRI, _SRIOVLP_8021Q_PRI, _SRIOVLP_MAC_FLAGS, _SRIOVLP_NUM_ALLOWED_VLANS, _SRIOVLP_ALLOWED_VLANS) # Top-level VNIC properties _VNIC_DED = 'VirtualNICDedicated' _VNIC_ADP_TYPE = 'AdapterType' _VNIC_DRC_NAME = 'DynamicReconfigurationConnectorName' _VNIC_LOC_CODE = 'LocationCode' _VNIC_LPAR_ID = 'LocalPartitionID' _VNIC_REQ_ADP = 'RequiredAdapter' _VNIC_VARIED_ON = 'VariedOn' _VNIC_USE_NEXT_AVAIL_SLOT = 'UseNextAvailableSlotID' _VNIC_USE_NEXT_AVAIL_HIGH_SLOT = 'UseNextAvailableHighSlotID' _VNIC_SLOT_NUM = 'VirtualSlotNumber' _VNIC_ENABLED = 'Enabled' _VNIC_DETAILS = 'Details' _VNIC_BACK_DEVS = 'AssociatedBackingDevices' _VNIC_EL_ORDER = ( _VNIC_DED, _VNIC_ADP_TYPE, _VNIC_DRC_NAME, _VNIC_LOC_CODE, _VNIC_LPAR_ID, _VNIC_REQ_ADP, _VNIC_VARIED_ON, _VNIC_USE_NEXT_AVAIL_SLOT, _VNIC_USE_NEXT_AVAIL_HIGH_SLOT, _VNIC_SLOT_NUM, _VNIC_ENABLED, _VNIC_DETAILS, _VNIC_BACK_DEVS) # Properties for _VNICDetails (schema: VirtualNICDetails.Type) _VNICD_PVID = 'PortVLANID' _VNICD_PVID_PRI = 'PortVLANIDPriority' _VNICD_ALLOWED_VLANS = 'AllowedVLANIDs' _VNICD_MAC = 'MACAddress' _VNICD_ALLOWED_OS_MACS = 'AllowedOperatingSystemMACAddresses' _VNICD_OS_DEV_NAME = 'OSDeviceName' _VNICD_DES_MODE = 'DesiredMode' _VNICD_DES_CAP_PCT = 'DesiredCapacityPercentage' _VNICD_AUTO_FB = 'AutoFailBack' _VNICD_IP_ADDR = 'IPAddress' _VNICD_SUBNET_MASK = 'SubnetMask' _VNICD_GATEWAY = 'Gateway' _VNICD_EL_ORDER = ( _VNICD_PVID, _VNICD_PVID_PRI, _VNICD_ALLOWED_VLANS, _VNICD_MAC, _VNICD_ALLOWED_OS_MACS, _VNICD_OS_DEV_NAME, _VNICD_DES_MODE, _VNICD_DES_CAP_PCT, _VNICD_DES_CAP_PCT) # Properties for VNICBackDev (schema: VirtualNICSRIOVBackingDevice) _VNICBD_CHOICE = 'VirtualNICBackingDeviceChoice' _VNICBD = 'VirtualNICSRIOVBackingDevice' _VNICBD_DEV_TYP = 'DeviceType' _VNICBD_VIOS = 'AssociatedVirtualIOServer' _VNICBD_SWITCH = 'AssociatedVirtualNICSwitch' _VNICBD_VNIC = 'AssociatedVirtualNICDedicated' _VNICBD_ACTIVE = 'IsActive' _VNICBD_STATUS = 'Status' _VNICBD_FAILOVER_PRI = 'FailOverPriority' _VNICBD_ACTION = 'BackingDeviceAction' _VNICBD_SRIOV_ADP_ID = 'RelatedSRIOVAdapterID' _VNICBD_CUR_CAP_PCT = 'CurrentCapacityPercentage' _VNICBD_MAX_CAP_PCT = 'MaxCapacityPercentage' _VNICBD_PPORT_ID = 'RelatedSRIOVPhysicalPortID' _VNICBD_LPORT = 'RelatedSRIOVLogicalPort' _VNICBD_DES_CAP_PCT = 'DesiredCapacityPercentage' _VNIC_BD_DES_MAX_CAP_PCT = 'DesiredMaxCapacityPercentage' # For building the VIOS HREF. (Would have liked to use pypowervm.wrappers. # virtual_io_server.VIOS.schema_type, but circular import.) _VIOS = 'VirtualIOServer' _VNICBD_EL_ORDER = ( _VNICBD_DEV_TYP, _VNICBD_VIOS, _VNICBD_SWITCH, _VNICBD_VNIC, _VNICBD_ACTIVE, _VNICBD_STATUS, _VNICBD_FAILOVER_PRI, _VNICBD_ACTION, _VNICBD_SRIOV_ADP_ID, _VNICBD_CUR_CAP_PCT, _VNICBD_MAX_CAP_PCT, _VNICBD_PPORT_ID, _VNICBD_LPORT, _VNICBD_DES_CAP_PCT, _VNIC_BD_DES_MAX_CAP_PCT) # Physical Fibre Channel Port Constants _PFC_PORT_LOC_CODE = 'LocationCode' _PFC_PORT_NAME = 'PortName' _PFC_PORT_UDID = 'UniqueDeviceID' PFC_PORT_WWPN = 'WWPN' _PFC_PORT_AVAILABLE_PORTS = 'AvailablePorts' _PFC_PORT_TOTAL_PORTS = 'TotalPorts' PFC_PORTS_ROOT = 'PhysicalFibreChannelPorts' PFC_PORT_ROOT = 'PhysicalFibreChannelPort' class SRIOVAdapterMode(object): """Enumeration for SR-IOV adapter modes (from SRIOVAdapterMode.Enum).""" SRIOV = 'Sriov' DEDICATED = 'Dedicated' FORCE_DEDICATED = 'ForceDedicated' UNKNOWN = 'unknown' class SRIOVAdapterPersonality(object): """Enum for SR-IOV adapter personalities (from SRIOVPersonality.Enum). """ NO_QOS = "NO_QOS" MIN_CAPACITY = "MIN_CAPACITY" MAX_MIN_CAPACITY = "MAX_MIN_CAPACITY" UNKNOWN = "UNKNOWN" class SRIOVAdapterState(object): """Enumeration for SR-IOV adapter states (from SRIOVAdapterState.Enum).""" INITIALIZING = 'Initializing' NOT_CONFIG = 'NotConfigured' POWERED_OFF = 'PoweredOff' POWERING_OFF = 'PoweringOff' RUNNING = 'Running' DUMPING = 'Dumping' FAILED = 'Failed' MISSING = 'Missing' MISMATCH = 'PCIEIDMismatch' class SRIOVSpeed(object): """Enumeration for SR-IOV speed (from SRIOVConnectionSpeed.Enum).""" E10M = 'E10Mbps' E100M = 'E100Mbps' E1G = 'E1Gbps' E10G = 'E10Gbps' E40G = 'E40Gbps' E100G = 'E100Gbps' AUTO = 'Auto' UNKNOWN = 'Unknown' class SRIOVPPMTU(object): """SR-IOV Phys Port Max Transmission Unit (SRIOVPhysicalPortMTU.Enum).""" E1500 = "E_1500" E9000 = "E_9000" UNKNOWN = 'Unknown' class VNICBackDevStatus(object): """Enumeration of possible VNIC backing device statuses.""" OPERATIONAL = 'OPERATIONAL' POWERED_OFF = 'POWERED_OFF' LINK_DOWN = 'LINK_DOWN' NETWORK_ERROR = 'NETWORK_ERROR' UNRESPONSIVE = 'UNRESPONSIVE' ADAPTER_ERROR = 'ADAPTER_ERROR' UNKNOWN = 'UNKNOWN' class VNICPortUsage(object): """Enumeration of possible VNIC port usages.""" NOT_VNIC = 'NOT_VNIC' DEDICATED_VNIC = 'DEDICATED_VNIC' SHARED_VNIC = 'SHARED_VNIC' @ewrap.ElementWrapper.pvm_type(IO_ADPT_ROOT, has_metadata=True) class IOAdapter(ewrap.ElementWrapper): """A generic IO Adapter. This is a device plugged in to the system. The location code indicates where it is plugged into the system. """ @property def id(self): """The adapter system id.""" return self._get_val_str(_IO_ADPT_ID) @property def description(self): return self._get_val_str(_IO_ADPT_DESC) @property def dev_name(self): return self._get_val_str(_IO_ADPT_DEV_NAME) @property def dev_type(self): return self._get_val_str(_IO_ADPT_DEV_TYPE) @property def drc_name(self): return self._get_val_str(_IO_ADPT_DYN_NAME) @property def phys_loc_code(self): return self._get_val_str(_IO_ADPT_PHYS_LOC) @property def udid(self): return self._get_val_str(_IO_ADPT_UDID) @ewrap.ElementWrapper.pvm_type('PhysicalFibreChannelAdapter', has_metadata=True) class PhysFCAdapter(IOAdapter): """A Physical Fibre Channel I/O Adapter. Extends the generic I/O Adapter, but provides port detail as well. The adapter has a set of Physical Fibre Channel Ports (PhysFCPort). """ @property def fc_ports(self): """A set of Physical Fibre Channel Ports. The set of PhysFCPort's that are attached to this adapter. The data on this should be considered read only. """ es = ewrap.WrapperElemList(self._find_or_seed(PFC_PORTS_ROOT), PhysFCPort) return es @ewrap.ElementWrapper.pvm_type('PhysicalFibreChannelPort', has_metadata=True) class PhysFCPort(ewrap.ElementWrapper): """A Physical Fibre Channel Port.""" @classmethod def bld_ref(cls, adapter, name, ref_tag=None): """Create a wrapper that serves as a reference to a port. This is typically used when another element (ex. Virtual FC Mapping) requires a port to be specified in it. Rather than query to find the port, one can simply be built and referenced as a child element. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param name: The name of the physical FC port. End users need to verify the port name. Typically starts with 'fcs'. :param ref_tag: (Optional, Default=None) If set, override the default 'PhysicalFibreChannelPort' tag/label in the element with the string specified. """ port = super(PhysFCPort, cls)._bld(adapter) port._name(name) if ref_tag: port.element.tag = ref_tag return port @property def loc_code(self): return self._get_val_str(_PFC_PORT_LOC_CODE) @property def name(self): return self._get_val_str(_PFC_PORT_NAME) def _name(self, value): return self.set_parm_value(_PFC_PORT_NAME, value) @property def udid(self): return self._get_val_str(_PFC_PORT_UDID) @property def wwpn(self): return self._get_val_str(PFC_PORT_WWPN) @property def npiv_available_ports(self): return self._get_val_int(_PFC_PORT_AVAILABLE_PORTS, 0) @property def npiv_total_ports(self): return self._get_val_int(_PFC_PORT_TOTAL_PORTS, 0) @ewrap.ElementWrapper.pvm_type('SRIOVAdapter', has_metadata=True) class SRIOVAdapter(IOAdapter): """The SR-IOV adapters for this system.""" @property def sriov_adap_id(self): """Not to be confused with the 'id' property (IOAdapter.AdapterID).""" return self._get_val_int(_SRIOV_ADAPTER_ID) @property def mode(self): return self._get_val_str(_SRIOV_ADAPTER_MODE) @mode.setter def mode(self, value): self.set_parm_value(_SRIOV_ADAPTER_MODE, value) @property def personality(self): return self._get_val_str(_SRIOV_ADAPTER_PERSONALITY) @property def state(self): return self._get_val_str(_SRIOV_ADAPTER_STATE) def _convergedphysicalports(self): """Retrieve all Converged physical ports.""" elem = self._find(_SRIOV_CONVERGED_ETHERNET_PHYSICAL_PORTS) if elem is None: return None return ewrap.WrapperElemList(elem, child_class=SRIOVConvPPort) def _rocephysicalports(self): """Retrieve all RoCE adapter physical ethernet ports.""" elem = self._find(_ROCE_SRIOV_PHYSICAL_PORTS) if elem is None: return None return ewrap.WrapperElemList(elem, child_class=SRIOVRoCEPPort) def _ethernetphysicalports(self): """Retrieve all Ethernet physical ports.""" elem = self._find(_SRIOV_ETHERNET_PHYSICAL_PORTS) if elem is None: return None return ewrap.WrapperElemList(elem, child_class=SRIOVEthPPort) @property def phys_ports(self): """Retrieve Combined list of all physical ports. Returns a list of converged and ethernet physical ports. This list is not modifiable, cannot insert or remove items from it, however, individual item can be updated. For example, label and sublabels can be updated. """ allports = [] cports = self._convergedphysicalports() # Physical ports for RoCE adapters is part of different schema rports = self._rocephysicalports() eports = self._ethernetphysicalports() for c in cports or []: allports.append(c) for r in rports or []: allports.append(r) for e in eports or []: allports.append(e) # Set the ports' backpointers to this SRIOVAdapter for pport in allports: pport._sriov_adap = self return allports @ewrap.ElementWrapper.pvm_type('SRIOVEthernetPhysicalPort', has_metadata=True, child_order=_SRIOVEPP_EL_ORDER) class SRIOVEthPPort(ewrap.ElementWrapper): """The SR-IOV Ethernet Physical port.""" def __init__(self): super(SRIOVEthPPort, self).__init__() # This must be set by the instantiating SRIOVAdapter. self._sriov_adap = None @property def sriov_adap(self): """Backpointer to the SRIOVAdapter owning this physical port.""" if self._sriov_adap is None: raise NotImplementedError("Developer error: SRIOVAdapter pointer " "not set!") return self._sriov_adap @property def sriov_adap_id(self): """The integer sriov_adap_id of the SRIOVAdapter owning this port.""" return self.sriov_adap.sriov_adap_id @property def label(self): return self._get_val_str(_SRIOVPP_LBL) @label.setter def label(self, value): self.set_parm_value(_SRIOVPP_LBL, value) @property def loc_code(self): return self._get_val_str(_SRIOVPP_LOC_CODE) @property def port_id(self): return self._get_val_int(_SRIOVPP_ID) @property def sublabel(self): return self._get_val_str(_SRIOVPP_SUBLBL) @sublabel.setter def sublabel(self, value): self.set_parm_value(_SRIOVPP_SUBLBL, value) @property def link_status(self): return self._get_val_bool(_SRIOVPP_LINK_STATUS) @property def cfg_max_lps(self): return self._get_val_int(_SRIOVPP_CFG_MAX_ETHERNET_LPS) @cfg_max_lps.setter def cfg_max_lps(self, value): self.set_parm_value(_SRIOVPP_CFG_MAX_ETHERNET_LPS, value) @property def cfg_lps(self): return self._get_val_int(_SRIOVPP_CFG_ETHERNET_LPS) @property def min_granularity(self): """Gets the minimum granularity in a float-percentage format. :return: If the property is say "2.45%", a value of .0245 will be returned. """ return self._get_val_percent(_SRIOVPP_MIN_ETHERNET_CAPACITY_GRAN) @property def supp_max_lps(self): return self._get_val_int(_SRIOVPP_MAX_SUPP_ETHERNET_LPS) @property def allocated_capacity(self): """Gets the allocated capacity in a float-percentage format. :return: If the property is say "2.45%", a value of .0245 will be returned. """ return self._get_val_percent(_SRIOVPP_ALLOC_CAPACITY) @property def curr_speed(self): return self._get_val_str(_SRIOVPP_CURR_SPEED) @property def mtu(self): """Result should be a SRIOVPPMTU value.""" return self._get_val_str(_SRIOVPP_CFG_MTU) @mtu.setter def mtu(self, val): """Input val should be a SRIOVPPMTU value.""" self.set_parm_value(_SRIOVPP_CFG_MTU, val) @property def switch_mode(self): """Result should be a network.VSwitchMode value.""" return self._get_val_str(_SRIOVPP_CFG_SWMODE) @switch_mode.setter def switch_mode(self, val): """Input val should be a network.VSwitchMode value.""" self.set_parm_value(_SRIOVPP_CFG_SWMODE, val) @property def flow_ctl(self): return self._get_val_bool(_SRIOVPP_CFG_FLOWCTL) @flow_ctl.setter def flow_ctl(self, val): self.set_parm_value(_SRIOVPP_CFG_FLOWCTL, u.sanitize_bool_for_api(val)) @ewrap.ElementWrapper.pvm_type('SRIOVConvergedNetworkAdapterPhysicalPort', has_metadata=True, child_order=_SRIOVCPP_EL_ORDER) class SRIOVConvPPort(SRIOVEthPPort): """The SR-IOV Converged Physical port.""" pass @ewrap.ElementWrapper.pvm_type('SRIOVRoCEPhysicalPort', has_metadata=True, child_order=_SRIOVCPP_EL_ORDER) class SRIOVRoCEPPort(SRIOVEthPPort): """The SR-IOV RoCE Physical Ethernet port.""" pass @ewrap.EntryWrapper.pvm_type('SRIOVEthernetLogicalPort', child_order=_SRIOVLP_EL_ORDER) class SRIOVEthLPort(ewrap.EntryWrapper): """The SR-IOV Ethernet Logical port.""" @classmethod def bld(cls, adapter, sriov_adap_id, pport_id, pvid=None, mac=None, allowed_vlans=u.VLANList.ALL, allowed_macs=u.MACList.ALL, is_promisc=False, cfg_capacity=None, max_capacity=None): """Create a wrapper used to create a logical port on the server. :param adapter: A pypowervm.adapter.Adapter (for traits, etc.) :param sriov_adap_id: Corresponds to SRIOVAdapter.SRIOVAdapterID, *not* SRIOVAdapter.AdapterID :param pport_id: The physical port ID this logical port is part of. :param pvid: The port VLAN identifier for this logical port. Any untagged traffic passing through this port will have this VLAN tag added. :param mac: The MAC address to assign to the logical port. :param allowed_vlans: An integer list of VLANS allowed on this logical port. Specify pypowervm.util.VLANList.ALL to allow all VLANs or .NONE to allow no VLANs on this logical port. Default: ALL. :param allowed_macs: List of string MAC addresses allowed on this logical port. Specify pypowervm.util.MACList.ALL to allow all MAC addresses, or .NONE to allow no MAC addresses on this logical port. Default: ALL. :param is_promisc: Set to True if using the logical port for bridging (e.g. SEA, OVS, etc.); False if assigning directly to an LPAR. Only one logical port per physical port may be promiscuous. :param cfg_capacity: The configured capacity of the logical port as a percentage. This represents the minimum bandwidth this logical port will receive, as a percentage of bandwidth available from the physical port. The valid values are 0.0 <= x <= 1.0 up to 2 decimal places. This will be interpreted as a percentage, where 0.02 == 2%. :param max_capacity: The configured max capacity of the logical port as a percentage. This represents the maximum bandwidth this logical port will receive. The valid values are 0.0 <= x <= 1.0 up to 2 decimal places. This will be interpreted as a percentage, where 0.02 == 2%. """ lport = super(SRIOVEthLPort, cls)._bld(adapter) lport._sriov_adap_id(sriov_adap_id) lport._pport_id(pport_id) if pvid is not None: lport.pvid = pvid lport.allowed_vlans = allowed_vlans if mac is not None: lport._mac(mac) lport.allowed_macs = allowed_macs lport._is_promisc(is_promisc) if cfg_capacity is not None: lport._cfg_capacity(cfg_capacity) if max_capacity is not None: lport._cfg_max_capacity(max_capacity) return lport @property def lport_id(self): return self._get_val_int(_SRIOVLP_ID) @property def sriov_adap_id(self): return self._get_val_int(_SRIOVLP_ADPT_ID) def _sriov_adap_id(self, value): self.set_parm_value(_SRIOVLP_ADPT_ID, value) @property def is_promisc(self): return self._get_val_bool(_SRIOVLP_IS_PROMISC) def _is_promisc(self, value): self.set_parm_value(_SRIOVLP_IS_PROMISC, u.sanitize_bool_for_api(value)) @property def dev_name(self): return self._get_val_str(_SRIOVLP_DEV_NAME) @property def cfg_capacity(self): """Gets the configured capacity in a float-percentage format. :return: If the property is say "2.45%", a value of .0245 will be returned. """ return self._get_val_percent(_SRIOVLP_CFG_CAPACITY) def _cfg_capacity(self, value): """The configured capacity :param value: The configured capacity value. The valid values are 0.0 <= x <= 1.0 up to 2 decimal places. This will be interpreted as a percentage, where 0.02 == 2%. """ self.set_parm_value(_SRIOVLP_CFG_CAPACITY, u.sanitize_percent_for_api(value)) @property def cfg_max_capacity(self): """Gets the configured maximum capacity in a float-percentage format. :return: If the property is say "2.45%", a value of .0245 will be returned. """ return self._get_val_percent(_SRIOVLP_CFG_MAX_CAPACITY) def _cfg_max_capacity(self, value): """The configured maximum capacity :param value: The configured max capacity value. The valid values are 0.0 <= x <= 1.0 up to 2 decimal places. This will be interpreted as a percentage, where 0.02 == 2%. """ self.set_parm_value(_SRIOVLP_CFG_MAX_CAPACITY, u.sanitize_percent_for_api(value)) @property def pport_id(self): """Gets the physical port short ID.""" return self._get_val_int(_SRIOVLP_PPORT_ID) def _pport_id(self, value): """Internal setter for the physical port short ID.""" self.set_parm_value(_SRIOVLP_PPORT_ID, value) @property def pvid(self): return self._get_val_int(_SRIOVLP_PVID) @pvid.setter def pvid(self, value): self.set_parm_value(_SRIOVLP_PVID, value) @property def allowed_vlans(self): vlan_str = self._get_val_str(_SRIOVLP_ALLOWED_VLANS) return u.VLANList.unmarshal(vlan_str) if vlan_str is not None else None @allowed_vlans.setter def allowed_vlans(self, vlans): self.set_parm_value(_SRIOVLP_ALLOWED_VLANS, u.VLANList.marshal(vlans)) @property def mac(self): """MAC address of the format XXXXXXXXXXXX (12 uppercase hex digits). This is the MAC address "burned into" the logical port. The actual MAC address on the interface (cur_mac) may be this value or the value set from within the OS on the VM. """ return self._get_val_str(_SRIOVLP_MAC) def _mac(self, value): self.set_parm_value(_SRIOVLP_MAC, u.sanitize_mac_for_api(value)) @property def allowed_macs(self): amstr = self._get_val_str(_SRIOVLP_ALLOWED_MACS) return u.MACList.unmarshal(amstr) if amstr is not None else None @allowed_macs.setter def allowed_macs(self, maclist): self.set_parm_value(_SRIOVLP_ALLOWED_MACS, u.MACList.marshal(maclist)) @property def cur_mac(self): """MAC address of the format XXXXXXXXXXXX (12 uppercase hex digits). This is the real value set on the interface, possibly by the VM's OS. Note that some SR-IOV cards are broken and don't report the OS-assigned value correctly. In such cases, cur_mac will report the same as mac. """ return self._get_val_str(_SRIOVLP_CUR_MAC) @property def loc_code(self): return self._get_val_str(_SRIOVLP_LOC_CODE) @property def vnic_port_usage(self): return self._get_val_str(_SRIOVLP_VNIC_PORT_USAGE) @ewrap.EntryWrapper.pvm_type(_VNIC_DED, child_order=_VNIC_EL_ORDER) class VNIC(ewrap.EntryWrapper): """A dedicated, possibly-redundant Virtual NIC.""" @classmethod def bld(cls, adapter, pvid=None, slot_num=None, allowed_vlans=u.VLANList.ALL, mac_addr=None, allowed_macs=u.MACList.ALL, back_devs=None): """Build a new VNIC wrapper suitable for .create() A VNIC is a CHILD object on a LogicalPartition. Usage models: vnic = VNIC.bld(...) vnic.back_devs.append(back_dev1) ... or vnic = VNIC.bld(..., back_devs=[back_dev1, back_dev2, ...]) then vnic.create(parent=lpar_wrap) :param adapter: pypowervm.adapter.Adapter for REST API communication. :param pvid: Port VLAN ID for this vNIC. If not specified, the vNIC's traffic is untagged. :param slot_num: Desired virtual slot number on the owning LPAR. :param allowed_vlans: An integer list of VLANS allowed on this vNIC. Specify pypowervm.util.VLANList.ALL to allow all VLANs or .NONE to allow no VLANs on this vNIC. Default: ALL. :param mac_addr: MAC address for the vNIC. :param allowed_macs: List of string MAC addresses allowed on this vNIC. Specify pypowervm.util.MACList.ALL to allow all MAC addresses, or .NONE to allow no MAC addresses on this vNIC. Default: ALL. :param back_devs: List of VNICBackDev wrappers each indicating a combination of VIOS, SR-IOV adapter and physical port on which to create the VF for the backing device. See VNICBackDev.bld. If not specified to bld, at least one must be added before the VNIC can be created. :return: A new VNIC wrapper. """ vnic = super(VNIC, cls)._bld(adapter) if slot_num is not None: vnic._slot(slot_num) else: vnic._use_next_avail_slot_id = True vnic._details = _VNICDetails._bld_new( adapter, pvid=pvid, allowed_vlans=allowed_vlans, mac_addr=mac_addr, allowed_macs=allowed_macs) if back_devs: vnic.back_devs = back_devs return vnic @property def drc_name(self): return self._get_val_str(_VNIC_DRC_NAME) @property def lpar_id(self): """The integer ID, not UUID, of the LPAR owning this VNIC.""" return self._get_val_int(_VNIC_LPAR_ID) @property def slot(self): return self._get_val_int(_VNIC_SLOT_NUM) def _slot(self, val): self.set_parm_value(_VNIC_SLOT_NUM, val) @property def _use_next_avail_slot_id(self): """Use next available (high) slot ID, true or false.""" unasi_field = (_VNIC_USE_NEXT_AVAIL_HIGH_SLOT if self.traits.has_high_slot else _VNIC_USE_NEXT_AVAIL_SLOT) return self._get_val_bool(unasi_field) @_use_next_avail_slot_id.setter def _use_next_avail_slot_id(self, unasi): """Use next available (high) slot ID. :param unasi: Boolean value to set (True or False) """ unasi_field = (_VNIC_USE_NEXT_AVAIL_HIGH_SLOT if self.traits.has_high_slot else _VNIC_USE_NEXT_AVAIL_SLOT) self.set_parm_value(unasi_field, u.sanitize_bool_for_api(unasi)) @property def pvid(self): """The integer port VLAN ID, or None if the vNIC has no PVID.""" return self._details.pvid @pvid.setter def pvid(self, val): self._details.pvid = val @property def allowed_vlans(self): return self._details.allowed_vlans @allowed_vlans.setter def allowed_vlans(self, vlans): self._details.allowed_vlans = vlans @property def mac(self): """MAC address of the format XXXXXXXXXXXX (12 uppercase hex digits).""" return self._details.mac def _mac(self, val): self._details._mac(val) @property def allowed_macs(self): return self._details.allowed_macs @allowed_macs.setter def allowed_macs(self, maclist): self._details.allowed_macs = maclist @property def capacity(self): """The capacity (float, 0.0-1.0) of the active backing logical port.""" return self._details.capacity @property def _details(self): return _VNICDetails.wrap(self._find_or_seed(_VNIC_DETAILS)) @_details.setter def _details(self, val): self.element.replace(self._find_or_seed(_VNIC_DETAILS), val.element) @property def back_devs(self): return ewrap.WrapperElemList(self._find_or_seed(_VNIC_BACK_DEVS), child_class=VNICBackDev, indirect=_VNICBD_CHOICE) @back_devs.setter def back_devs(self, new_devs): self.replace_list(_VNIC_BACK_DEVS, new_devs, indirect=_VNICBD_CHOICE) @property def auto_pri_failover(self): return self._details.auto_pri_failover @auto_pri_failover.setter def auto_pri_failover(self, val): self._details.auto_pri_failover = val @ewrap.Wrapper.xag_property(pc.XAG.ADV) def ip_address(self): """Returns the IP Address of the network interface.""" return self._details.ip_address @ewrap.Wrapper.xag_property(pc.XAG.ADV) def subnet_mask(self): """Returns the subnet mask of the network interface.""" return self._details.subnet_mask @ewrap.Wrapper.xag_property(pc.XAG.ADV) def gateway(self): """Returns the gateway of the network interface.""" return self._details.gateway @ewrap.ElementWrapper.pvm_type(_VNIC_DETAILS, has_metadata=True, child_order=_VNICD_EL_ORDER) class _VNICDetails(ewrap.ElementWrapper): """The 'Details' sub-element of a VirtualNICDedicated.""" @classmethod def _bld_new(cls, adapter, pvid=None, allowed_vlans=u.VLANList.ALL, mac_addr=None, allowed_macs=u.MACList.ALL): """Create a new _VNICDetails wrapper suitable for insertion into a VNIC. Not to be called outside of VNIC.bld(). :param adapter: pypowervm.adapter.Adapter for REST API communication. :param pvid: Port VLAN ID for this vNIC. If not specified, the vNIC's traffic is untagged. :param allowed_vlans: An integer list of VLANS allowed on this vNIC. Specify pypowervm.util.VLANList.ALL to allow all VLANs or .NONE to allow no VLANs on this vNIC. Default: ALL. :param mac_addr: MAC address for the vNIC. :param allowed_macs: List of string MAC addresses allowed on this vNIC. Specify pypowervm.util.MACList.ALL to allow all MAC addresses, or .NONE to allow no MAC addresses on this vNIC. Default: ALL. :return: A new _VNICDetails wrapper. """ vnicd = super(_VNICDetails, cls)._bld(adapter) if pvid is not None: vnicd.pvid = pvid vnicd.allowed_vlans = allowed_vlans if mac_addr is not None: vnicd._mac(mac_addr) vnicd.allowed_macs = allowed_macs return vnicd @property def pvid(self): """The integer port VLAN ID, or None if the vNIC has no PVID.""" return self._get_val_int(_VNICD_PVID) @pvid.setter def pvid(self, val): self.set_parm_value(_VNICD_PVID, val) @property def allowed_vlans(self): vlan_str = self._get_val_str(_VNICD_ALLOWED_VLANS) return u.VLANList.unmarshal(vlan_str) if vlan_str is not None else None @allowed_vlans.setter def allowed_vlans(self, vlans): self.set_parm_value(_VNICD_ALLOWED_VLANS, u.VLANList.marshal(vlans)) @property def mac(self): """MAC address of the format XXXXXXXXXXXX (12 uppercase hex digits).""" return self._get_val_str(_VNICD_MAC) def _mac(self, val): self.set_parm_value(_VNICD_MAC, u.sanitize_mac_for_api(val)) @property def allowed_macs(self): amstr = self._get_val_str(_VNICD_ALLOWED_OS_MACS) return u.MACList.unmarshal(amstr) if amstr is not None else None @allowed_macs.setter def allowed_macs(self, maclist): self.set_parm_value(_VNICD_ALLOWED_OS_MACS, u.MACList.marshal(maclist)) @property def capacity(self): """The capacity (float, 0.0-1.0) of the active backing logical port.""" return self._get_val_percent(_VNICD_DES_CAP_PCT) @property def auto_pri_failover(self): return self._get_val_bool(_VNICD_AUTO_FB) @auto_pri_failover.setter def auto_pri_failover(self, val): self.set_parm_value(_VNICD_AUTO_FB, u.sanitize_bool_for_api(val)) @ewrap.Wrapper.xag_property(pc.XAG.ADV) def ip_address(self): """Returns the IP Address of the network interface. Typical format would be: 255.255.255.255 (IPv4) and ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (IPv6) or other short forms of IPv6 address """ return self._get_val_str(_VNICD_IP_ADDR) @ewrap.Wrapper.xag_property(pc.XAG.ADV) def subnet_mask(self): """Returns the subnet mask of the network interface. Typical format would be: 255.255.255.0 (IPv4) and ffff:ffff:ffff:ffff:: (IPv6) or other forms of IPv6 address """ return self._get_val_str(_VNICD_SUBNET_MASK) @ewrap.Wrapper.xag_property(pc.XAG.ADV) def gateway(self): """Returns the gateway of the network interface. Typical format would be: 10.0.0.1 (IPv4) and cafe::1 (IPv6) or other forms of IPv6 address """ return self._get_val_str(_VNICD_GATEWAY) @ewrap.ElementWrapper.pvm_type(_VNICBD, has_metadata=True, child_order=_VNICBD_EL_ORDER) class VNICBackDev(ewrap.ElementWrapper): """SR-IOV backing device for a vNIC.""" @classmethod def bld(cls, adapter, vios_uuid, sriov_adap_id, pport_id, capacity=None, failover_pri=None, max_capacity=None): """Create a new VNICBackDev, suitable for inclusion in a VNIC wrapper. :param adapter: pypowervm.adapter.Adapter for REST API communication. :param vios_uuid: String UUID of the Virtual I/O Server to host the vNIC server for this backing device. :param sriov_adap_id: Integer SR-IOV Adapter ID of the SR-IOV adapter owning the physical port on which the backing VF is to be created: SRIOVAdapter.sriov_adap_id. :param pport_id: Integer physical port ID of SR-IOV physical port on which the VF is to be created: SRIOVEthPPort.port_id :param capacity: Float value between 0.0 and 1.0 indicating the minimum fraction of the physical port's bandwidth allocated to traffic over this backing device. Must be a multiple of SRIOVEthPPort.min_granularity for the physical port indicated by pport_id. If not specified, SRIOVEthPPort.min_granularity is used by the platform. :param failover_pri: Positive integer value representing the failover priority of this backing device. :param max_capacity: Float value between 0.0 and 1.0 indicating the maximum fraction of the physical port's bandwidth allocated to traffic over this backing device. :return: A new VNICBackDev, suitable for inclusion in a VNIC wrapper. """ bdev = super(VNICBackDev, cls)._bld(adapter) # TODO(IBM): Verify that this can be ManagedSystem-less bdev._vios_href(adapter.build_href(_VIOS, vios_uuid, xag=[])) bdev._sriov_adap_id(sriov_adap_id) bdev._pport_id(pport_id) if capacity is not None: bdev._capacity(capacity) if failover_pri is not None: bdev.failover_pri = failover_pri if max_capacity is not None: bdev._max_capacity(max_capacity) return bdev @property def vios_href(self): return self.get_href(_VNICBD_VIOS, one_result=True) def _vios_href(self, href): self.set_href(_VNICBD_VIOS, href) @property def sriov_adap_id(self): return self._get_val_int(_VNICBD_SRIOV_ADP_ID) def _sriov_adap_id(self, val): self.set_parm_value(_VNICBD_SRIOV_ADP_ID, val) @property def pport_id(self): return self._get_val_int(_VNICBD_PPORT_ID) def _pport_id(self, val): self.set_parm_value(_VNICBD_PPORT_ID, val) @property def lport_href(self): return self.get_href(_VNICBD_LPORT, one_result=True) @property def capacity(self): """Gets the allocated capacity in a float-percentage format. :return: If the property is say "2.45%", a value of .0245 will be returned. """ return self._get_val_percent(_VNICBD_CUR_CAP_PCT) def _capacity(self, float_val): self.set_parm_value(_VNICBD_CUR_CAP_PCT, u.sanitize_percent_for_api(float_val)) @property def max_capacity(self): """Gets the max capacity in a float-percentage format. :return: If the property is say "2.45%", a value of .0245 will be returned. """ return self._get_val_percent(_VNICBD_MAX_CAP_PCT) def _max_capacity(self, float_val): self.set_parm_value(_VNICBD_MAX_CAP_PCT, u.sanitize_percent_for_api(float_val)) @property def desired_max_capacity(self): """Gets the desired max capacity in a float-percentage format. :return: If the property is say "2.45%", a value of .0245 will be returned. """ return self._get_val_percent(_VNIC_BD_DES_MAX_CAP_PCT) @property def failover_pri(self): """The failover priority value for this backing device. :return: A value between 1 and 100, inclusive, with a lower number indicating the higher priority (i.e. the backingdevice with priority 1 will take precedence over that with priority 2). """ return self._get_val_int(_VNICBD_FAILOVER_PRI) @failover_pri.setter def failover_pri(self, val): self.set_parm_value(_VNICBD_FAILOVER_PRI, val, attrib=pc.ATTR_KSV140) @property def is_active(self): return self._get_val_bool(_VNICBD_ACTIVE) @property def status(self): return self._get_val_str(_VNICBD_STATUS) @ewrap.ElementWrapper.pvm_type(_IO_ADPT_CHOICE, has_metadata=False) class LinkAggrIOAdapterChoice(ewrap.ElementWrapper): """A free I/O Adapter link aggregation choice. Flattens this two step hierarchy to pull the information needed directly from the IOAdapter element. """ def __get_prop(self, func): """Thin wrapper to get the IOAdapter and get a property.""" elem = self._find('IOAdapter') if elem is None: return None io_adpt = IOAdapter.wrap(elem) return getattr(io_adpt, func) @property def id(self): return self.__get_prop('id') @property def description(self): return self.__get_prop('description') @property def dev_name(self): return self.__get_prop('dev_name') @property def dev_type(self): return self.__get_prop('dev_type') @property def drc_name(self): return self.__get_prop('drc_name') @property def phys_loc_code(self): return self.__get_prop('phys_loc_code') @property def udid(self): return self.__get_prop('udid') pypowervm-1.1.24/pypowervm/entities.py0000664000175000017500000005323513571367171017526 0ustar neoneo00000000000000# Copyright 2014, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """High-level pythonic abstractions of XML entities returned by PowerVM.""" import collections import copy import re from lxml import etree from pypowervm import const from pypowervm import util class Atom(object): def __init__(self, properties): self.properties = properties @property def uuid(self): try: return self.properties['id'] except KeyError: return None @property def links(self): """Get the rel-keyed dict of s for this Atom, or {} if none.""" return self.properties.get('links', {}) @property def self_link(self): """Get the *first* SELF link, or None if none exists.""" return self.links.get('SELF', [None])[0] @classmethod def _process_props(cls, el, props): pat = '{%s}' % const.ATOM_NS if re.match(pat, el.tag): # strip off atom namespace qualification for easier access param_name = el.tag[el.tag.index('}') + 1:] else: # leave qualified anything that is not in the atom # namespace param_name = el.tag if param_name == 'link': # properties['links'][REL] = [href, ...] # Note that rel may (legally) be None rel = el.get('rel') if rel: rel = rel.upper() href = el.get('href') if 'links' not in props: props['links'] = collections.defaultdict(list) props['links'][rel].append(href) elif param_name == 'category': props[param_name] = el.get('term') elif param_name == '{%s}etag' % const.UOM_NS: props['etag'] = el.text elif el.text: props[param_name] = el.text class Feed(Atom): """Represents an Atom Feed returned from PowerVM.""" def __init__(self, properties, entries): super(Feed, self).__init__(properties) self.entries = entries def findentries(self, subelem, text): entries = [] for entry in self.entries: subs = entry.element.findall(subelem) for s in subs: if s.text == text: entries.append(entry) break return entries @classmethod def unmarshal_atom_feed(cls, feedelem, resp): """Factory method producing a Feed object from a parsed ElementTree :param feedelem: Parsed ElementTree object representing an atom feed. :param resp: The Response from which this Feed was parsed. :return: a new Feed object representing the feedelem parameter. """ ret = cls({}, []) for child in list(feedelem): if child.tag == str(etree.QName(const.ATOM_NS, 'entry')): # NB: The use of ret.self_link here relies on s being # AFTER the self link in the . (They are in fact last.) ret.entries.append(Entry.unmarshal_atom_entry(child, resp)) elif not list(child): cls._process_props(child, ret.properties) return ret class Entry(Atom): """Represents an Atom Entry returned by the PowerVM API.""" def __init__(self, properties, element, adapter): """Create an Entry from an etree.Element representing a PowerVM object. :param properties: Dict of -level properties as produced by unmarshal_atom_entry. :param element: etree.Element (not entities.Element) - the root of the PowerVM object (not the , , or ). :param adapter: pypowervm.adapter.Adapter through which the element was fetched, and/or through which it should be updated. """ super(Entry, self).__init__(properties) self.element = Element.wrapelement(element, adapter) def __deepcopy__(self, memo=None): """Produce a deep (except for adapter) copy of this Entry.""" return self.__class__(copy.deepcopy(self.properties, memo=memo), copy.deepcopy(self.element, memo=memo).element, self.adapter) @property def etag(self): return self.properties.get('etag', None) @property def adapter(self): return self.element.adapter @classmethod def unmarshal_atom_entry(cls, entryelem, resp): """Factory method producing an Entry object from a parsed ElementTree :param entryelem: Parsed ElementTree object representing an atom entry. :param resp: The Response containing (the feed containing) the entry. :return: a new Entry object representing the entryelem parameter. """ entryprops = {} element = None for child in list(entryelem): if child.tag == str(etree.QName(const.ATOM_NS, 'content')): # PowerVM API only has one element per entry element = child[0] elif not list(child): cls._process_props(child, entryprops) return cls(entryprops, element, resp.adapter) class Element(object): """Represents an XML element - a utility wrapper around etree.Element.""" def __init__(self, tag, adapter, ns=const.UOM_NS, attrib=None, text='', children=(), cdata=False): # Defaults shouldn't be mutable attrib = attrib if attrib else {} self.element = None if ns: self.element = etree.Element(str(etree.QName(ns, tag)), attrib=attrib) else: self.element = etree.Element(tag, attrib=attrib) if text: self.element.text = etree.CDATA(text) if cdata else text for c in children: # Use a deep copy, else, c.element gets *removed* from its parent # hierarchy (see fourth bullet: http://lxml.de/compatibility.html). # Doing the deepcopy here means the caller doesn't have to worry # about it. self.element.append(copy.deepcopy(c.element)) self.adapter = adapter def __len__(self): return len(self.element) def __getitem__(self, index): return Element.wrapelement(self.element[index], self.adapter) def __setitem__(self, index, value): if not isinstance(value, Element): raise ValueError('Value must be of type Element') self.element[index] = value.element def __delitem__(self, index): del self.element[index] def __eq__(self, other): if other is None: return False return self._element_equality(self, other) def __deepcopy__(self, memo=None): """Produce a deep (except for adapter) copy of this Element.""" return self.wrapelement(etree.fromstring(self.toxmlstring()), self.adapter) @staticmethod def _element_equality(one, two): """Tests element equality. There is no common mechanism for defining 'equality' in the element tree. This provides a good enough equality that meets the schema definition. :param one: The first element. Is the backing element. :param two: The second element. Is the backing element. :returns: True if the children, text, attributes and tag are equal. """ # Make sure that the children length is equal one_children = list(one) two_children = list(two) if len(one_children) != len(two_children): return False if one.text != two.text: return False if one.tag != two.tag: return False # Recursively validate for one_child in one_children: found = util.find_equivalent(one_child, two_children) if found is None: return False # Found a match, remove it as it is no longer a valid match. # Its equivalence was validated by the upper block. two_children.remove(found) return True def __iter__(self): """Returns the children as a list of Elements.""" return iter([Element.wrapelement(i, self.adapter) for i in list(self.element)]) @classmethod def wrapelement(cls, element, adapter): if element is None: return None # create with minimum inputs e = cls('element', adapter) # assign element over the one __init__ creates e.element = element return e def toxmlstring(self, pretty=False): """Produce an XML dump of this Element. :param pretty: If True, format the XML in a visually-pleasing manner. :return: An XML string representing this Element. """ # To be sure of backward compatibility, don't pass pretty_print=False. kwargs = {'pretty_print': True} if pretty else {} return etree.tostring(self.element, **kwargs) @property def tag(self): return etree.QName(self.element.tag).localname @tag.setter def tag(self, tag): ns = self.namespace if ns: self.element.tag = etree.QName(ns, tag).text else: self.element.tag = tag @property def namespace(self): ns = etree.QName(self.element.tag).namespace return '' if ns is None else ns @namespace.setter def namespace(self, ns): self.element.tag = etree.QName(ns, self.tag).text @property def text(self): return self.element.text @text.setter def text(self, text): self.element.text = text @property def attrib(self): return self.element.attrib @attrib.setter def attrib(self, attrib): self.element.attrib = attrib def get(self, key, default=None): """Gets the element attribute named key. Returns the attribute value, or default if the attribute was not found. """ return self.element.get(key, default) def items(self): """Returns the element attributes as a sequence of (name, value) pairs. The attributes are returned in an arbitrary order. """ return self.element.items() def keys(self): """Returns the element attribute names as a list. The names are returned in an arbitrary order. """ return self.element.keys() def set(self, key, value): """Set the attribute key on the element to value.""" self.element.set(key, value) def append(self, subelement): """Adds subelement to the end of this element's list of subelements. Note: if subelement is a reference to an element within another XML hierarchy, it will be *removed* from that hierarchy. If you intend to reuse the parent object, you should pass a copy.deepcopy of the subelement to this method. """ # TODO(IBM): We *should* deepcopy to prevent child poaching (see fourth # bullet here: http://lxml.de/compatibility.html) - but this breaks the # world. Figure out why, and fix it. # self.element.append(copy.deepcopy(subelement.element)) self.element.append(subelement.element) def inject(self, subelement, ordering_list=(), replace=True): """Inserts subelement at the correct position in self's children. Uses ordering_list to determine the proper spot at which to insert the specified subelement. :param subelement: The element to inject as a child of this element. :param ordering_list: Iterable of string tag names representing the desired ordering of children for this element. If subelement's tag is not included in this list, the behavior is self.append(subelement). :param replace: If True, and an existing child with subelement's tag is found, it is replaced. If False, subelement is added after the existing child(ren). Note: You probably want to use True only/always when subelement is maxOccurs=1. Conversely, you probably want to use False only/always when subelement is unbounded. If you use True and more than one matching child is found, the last one is replaced. """ def lname(tag): """Localname of a tag (without namespace).""" return etree.QName(tag).localname children = list(self.element) # If no children, just append if not children: self.append(subelement) return # Any children with the subelement's tag? subfound = self.findall(subelement.tag) if subfound: if replace: self.replace(subfound[-1], subelement) else: subfound[-1].element.addnext(subelement.element) return # Now try to figure out insertion point based on ordering_list. # Ignore namespaces. ordlist = [lname(field) for field in ordering_list] subtag = lname(subelement.element.tag) # If subelement's tag is not in the ordering list, append if subtag not in ordlist: self.append(subelement) return # Get the tags preceding that of subelement pres = ordlist[:ordlist.index(subtag)] # Find the first child whose tag is not in that list for child in children: if lname(child.tag) not in pres: # Found the insertion point child.addprevious(subelement.element) return # If we got here, all existing children need to precede subelement. self.append(subelement) def find(self, match): """Finds the first subelement matching match. :param match: May be a tag name or path. :return: an element instance or None. """ qpath = Element._qualifypath(match, self.namespace) e = self.element.find(qpath) if e is not None: # must specify "is not None" here to work return Element.wrapelement(e, self.adapter) else: return None def findall(self, match): """Finds all matching subelements. :param match: May be a tag name or path. :return: a list containing all matching elements in document order. """ qpath = Element._qualifypath(match, self.namespace) e_iter = self.element.findall(qpath) elems = [] for e in e_iter: elems.append(Element.wrapelement(e, self.adapter)) return elems def findtext(self, match, default=None): """Finds text for the first subelement matching match. :param match: May be a tag name or path. :return: the text content of the first matching element, or default if no element was found. Note that if the matching element has no text content an empty string is returned. """ qpath = Element._qualifypath(match, self.namespace) text = self.element.findtext(qpath, default) return text if text else default def insert(self, index, subelement): """Inserts subelement at the given position in this element. :raises TypeError: if subelement is not an etree.Element. """ self.element.insert(index, subelement.element) def iter(self, tag=None): """Creates a tree iterator with the current element as the root. The iterator iterates over this element and all elements below it, in document (depth first) order. If tag is not None or '*', only elements whose tag equals tag are returned from the iterator. If the tree structure is modified during iteration, the result is undefined. """ # Determine which iterator to use # etree.Element.getiterator has been deprecated in favor of # etree.Element.iter, but the latter was not added until python 2.7 if hasattr(self.element, 'iter'): lib_iter = self.element.iter else: lib_iter = self.element.getiterator # Fix up the tag value if not tag or tag == '*': qtag = None else: qtag = str(etree.QName(self.namespace, tag)) it = lib_iter(tag=qtag) for e in it: yield Element.wrapelement(e, self.adapter) def replace(self, existing, new_element): """Replaces the existing child Element with the new one.""" self.element.replace(existing.element, new_element.element) def remove(self, subelement): """Removes subelement from the element. Unlike the find* methods this method compares elements based on the instance identity, not on tag value or contents. """ self.element.remove(subelement.element) @staticmethod def _qualifypath(path, ns): if not ns: return path parts = path.split('/') for i in range(len(parts)): if parts[i] and not re.match(r'[\.\*\[\{]', parts[i]): parts[i] = str(etree.QName(ns, parts[i])) return '/'.join(parts) class ElementList(object): """Useful list ops on a list of Element. In a schema where a simpleType element has a multiplicity allowing more than one instance within the containing element, this class provides a way to treat those instances as a list, to a limited extent. For example, given XML like: ...(stuff that isn't )... one two three ...(stuff that isn't )... fooList = ElementList(parent_element, 'foo') len(fooList) 3 repr(fooList) "['one', 'two', 'three']" 'two' in fooList True 'four' in fooList False fooList.append('four') repr(fooList) "['one', 'two', 'three', 'four']" print root.toxmlstring() ...(stuff that isn't )... one two three four ...(stuff that isn't )... """ def __init__(self, root_elem, tag, ordering_list=()): """Initialize a new ElementList. Note: The current implementation is limited to simple string elements. When inserting new values, they will have the default namespace and attrs. :param root_elem: The entities.Element representing the parent node containing the elements of interest. :param tag: The XML tag of the elements of interest. :param ordering_list: Iterable of tag strings indicating the desired overall ordering of the elements within the root_elem. Used to create the first value in the appropriate spot if the ElementList is initially empty. """ self.root_elem = root_elem self.tag = tag self.order = ordering_list def __find_elems(self): """List of entities.Element under self.root_elem with tag self.tag.""" return self.root_elem.findall(self.tag) def __get_values(self): """List of the string values within the entities.Element instances.""" return [elem.text for elem in self.__find_elems()] def __create_elem(self, val): """Create a new entities.Element suitable for this list. :param val: The raw string value for the text content of the new element. E.g. self.__create_elem('foo') will yield an entities.Element representing foo (where tag is whatever this ElementList was initialized with). :return: A new entities.Element containing the specified string val. """ return Element(self.tag, self.root_elem.adapter, text=val) def index(self, val): return self.__get_values().index(val) def __len__(self): return len(self.__find_elems()) def extend(self, val_list): for val in val_list: self.append(val) def __repr__(self): return repr(self.__get_values()) def __contains__(self, val): return val in self.__get_values() def __str__(self): return str(self.__get_values()) def append(self, val): self.root_elem.inject( self.__create_elem(val), ordering_list=self.order, replace=False) def __getitem__(self, idx): return self.__get_values()[idx] def __setitem__(self, idx, value): self.__find_elems()[idx].text = value def __delitem__(self, idx): self.root_elem.remove(self.__find_elems()[idx]) def remove(self, val): self.__delitem__(self.__get_values().index(val)) def __iter__(self): return iter(self.__get_values()) def clear(self): for val in self.__get_values(): self.remove(val) pypowervm-1.1.24/pypowervm/__init__.py0000664000175000017500000000207213571367171017432 0ustar neoneo00000000000000# Copyright 2014, 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg ibmpowervm_opts = [ cfg.IntOpt('pypowervm_update_collision_retries', default=5, help='Number of retries if an update operation failed due to ' 'collision'), cfg.IntOpt('pypowervm_job_request_timeout', default=1800, help='Default timeout in seconds for PowerVM Job requests.'), ] CONF = cfg.CONF CONF.register_opts(ibmpowervm_opts) pypowervm-1.1.24/pypowervm/traits.py0000664000175000017500000001025513571367171017203 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Identify behavioral traits specific to a PowerVM server type/version.""" import weakref class APITraits(object): """Represents capabilities inherent to the backing API server. For instance, depending on a schema version, or the backing REST server, there may be different code paths that the user needs to go through. A key example of this would be the VirtualNetworks. The HMC requires that Virtual Networks be the driver of code paths for various code paths. However, in other API servers, the Virtual Networks are simply realized based off the VLANs/virtual switches that adapters are currently tied to. This class encapsulates the various traits so that tasks and users do not have to inspect the header data directly to determine how the API should behave. """ def __init__(self, session): # Avoid circular references to the session by using a weak reference. # Circular references prevent garabage collection. self._sess_ref = weakref.ref(session) self._is_hmc = session.mc_type == 'HMC' @property def session(self): # Get the session through the weak reference return self._sess_ref() @property def vnet_aware(self): """Indicates whether Virtual Networks are pre-reqs to Network changes. Some APIs (such as modifying the SEA or creating a Client Network Adapter) require that the VirtualNetwork (or VNet wrapper) be pre-created for the operation. This is typically done when working against an HMC. This trait will return True if the Virtual Networks need to be passed in on NetworkBridge or Client Network Adapter creation, or False if the API should directly work with VLANs and Virtual Switches. """ return self._is_hmc @property def has_lpar_profiles(self): """Indicates whether the platform manager supports LPAR profiles. This trait will return True if LPAR profiles are supported. """ return self._is_hmc @property def local_api(self): """Indicates whether or not the PowerVM API Server is running locally. The PowerVM API server in some deployments may be running co-located with the pypowervm API. In those cases, certain optimizations may be available (like uploading from a file instead of a pipe). This trait is a coarse check to determine, for certain, if the API is co-located on the same server. """ # If the file auth is set to true, we must be colocated. All other # routes could be error prone and lead to significant branches of # complexity. return self.session.use_file_auth @property def dynamic_pvid(self): """Indicates whether a CNA can dynamically modify its PVID.""" return not self._is_hmc @property def rmdev_job_available(self): """Indicates whether or not the Job API supports RMDev.""" return not self._is_hmc @property def has_high_slot(self): """Does the API support UseNextAvailableHighSlotID?""" return not self._is_hmc @property def vea_as_ibmi_console(self): """Indicates whether the console type of IBMi VM is vea. IBMi depends on the trait to determine the console type. If the host is not managed by HMC, the console type of an IBMi VM deployed on the host shall be the slot number of its first virtual ethernet adapter. Otherwise, the Console type shall be "HMC". """ return not self._is_hmc pypowervm-1.1.24/pypowervm/exceptions.py0000664000175000017500000003775713571367171020076 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Module-specific error/exception subclasses.""" import abc from lxml import etree import six from pypowervm import const as c from pypowervm import entities as ent from pypowervm.i18n import _ class Error(Exception): """Error on PowerVM API Adapter method invocation.""" def __init__(self, msg, response=None): """Initializes Error with optional `response` object.""" self.response = response self.orig_response = None super(Error, self).__init__(msg) class ConnectionError(Error): """Connection Error on PowerVM API Adapter method invocation.""" class SSLError(Error): """SSL Error on PowerVM API Adapter method invocation.""" class TimeoutError(Error): """Timeout Error on PowerVM API Adapter method invocation.""" class HttpError(Error): """HTTP Error on PowerVM API Adapter method invocation.""" def __init__(self, resp): """Initializes HttpError with required `response` object. 1) Constructs the exception message based on the contents of the response parameter. 2) If possible, initializes a 'her_wrap' member which is a pypowervm.wrappers.http_error.HttpError - an EntryWrapper for the payload. Consumers should check this member for None before using. :param resp: pypowervm.adapter.Response containing an response from the REST server. """ self.her_wrap = None reason = resp.reason # Attempt to extract PowerVM API's HttpErrorResponse object. # Since this is an exception path, we use best effort only - don't want # problems here to obscure the real exception. try: root = etree.fromstring(resp.body) if root is not None and root.tag == str(etree.QName(c.ATOM_NS, 'entry')): resp.entry = ent.Entry.unmarshal_atom_entry(root, resp) # Import inline to avoid circular dependencies import pypowervm.wrappers.http_error as he self.her_wrap = he.HttpError.wrap(resp) # Add the message to the reason if it is available. if self.her_wrap.message: reason += ' -- ' + self.her_wrap.message except Exception: pass # Construct the exception message msg = ('HTTP error %(status)s for method %(method)s on path ' '%(path)s: %(reason)s') % dict(status=resp.status, method=resp.reqmethod, path=resp.reqpath, reason=reason) # Initialize the exception super(HttpError, self).__init__(msg, response=resp) class HttpNotFound(HttpError): """HttpError subclass where response.status == c.HTTPStatus.NOT_FOUND.""" pass class HttpUnauth(HttpError): """HttpError where response.status == c.HTTPStatus.UNAUTHORIZED.""" pass class AtomError(Error): """Atom Error on PowerVM API Adapter method invocation.""" def __init__(self, msg, response): """Initializes AtomError with required `response` object.""" super(AtomError, self).__init__(msg, response=response) @six.add_metaclass(abc.ABCMeta) class AbstractMsgFmtError(Error): """Used to raise an exception with a formattable/parameterized message. The subclass must set the msg_fmt class variable. The consumer should instantiate the subclass with **kwargs appropriate to its msg_fmt. """ def __init__(self, response=None, **kwa): msg = self.msg_fmt % kwa super(AbstractMsgFmtError, self).__init__(msg, response=response) class UnableToDerivePhysicalPortForNPIV(AbstractMsgFmtError): msg_fmt = _("Unable to derive the appropriate physical FC port for WWPN " "%(wwpn)s. The VIOS Extended Attribute Groups may have been " "insufficient. The VIOS URI for the query was %(vio_uri)s.") class NotFound(AbstractMsgFmtError): msg_fmt = _('Element not found: %(element_type)s %(element)s') class LPARNotFound(AbstractMsgFmtError): msg_fmt = _('LPAR not found: %(lpar_name)s') class AdapterNotFound(AbstractMsgFmtError): msg_fmt = _('Adapter not found') class JobRequestFailed(AbstractMsgFmtError): msg_fmt = _("The '%(operation_name)s' operation failed. %(error)s") class JobRequestTimedOut(JobRequestFailed): msg_fmt = _("The '%(operation_name)s' operation failed. " "Failed to complete the task in %(seconds)d seconds.") class OSShutdownNoRMC(AbstractMsgFmtError): msg_fmt = _("Can not perform OS shutdown on Virtual Machine %(lpar_nm)s " "because its RMC connection is not active.") class VMPowerOffFailure(AbstractMsgFmtError): msg_fmt = _("Failed to power off Virtual Machine %(lpar_nm)s: %(reason)s") class VMPowerOffTimeout(VMPowerOffFailure): msg_fmt = _("Power off of Virtual Machine %(lpar_nm)s timed out after " "%(timeout)d seconds.") class VMPowerOnFailure(AbstractMsgFmtError): msg_fmt = _("Failed to power on Virtual Machine %(lpar_nm)s: %(reason)s") class VMPowerOnTimeout(VMPowerOnFailure): msg_fmt = _("Power on of Virtual Machine %(lpar_nm)s timed out after " "%(timeout)d seconds.") class PvidOfNetworkBridgeError(AbstractMsgFmtError): msg_fmt = _("Unable to remove VLAN %(vlan_id)d as it is the Primary VLAN " "Identifier on a different Network Bridge.") class OrphanVLANFoundOnProvision(AbstractMsgFmtError): msg_fmt = _("Unable to provision VLAN %(vlan_id)d. It appears to be " "contained on device '%(dev_name)s' on Virtual I/O Server " "%(vios)s. That device is not connected to any Network " "Bridge (Shared Ethernet Adapter). Please manually remove " "the device or add it to the Network Bridge before " "continuing.") class DuplicateLUNameError(AbstractMsgFmtError): msg_fmt = _("A Logical Unit with name %(lu_name)s already exists on " "Shared Storage Pool %(ssp_name)s.") class UnableToFindFCPortMap(AbstractMsgFmtError): msg_fmt = _("Unable to find a physical port to map a virtual Fibre " "Channel port to. This is due to either a Virtual I/O " "Server being unavailable, or improper port specification " "for the physical Fibre Channel ports.") class ConsoleNotLocal(AbstractMsgFmtError): msg_fmt = _("Unable to start the console to the Virtual Machine. The " "pypowervm API is running in a non-local mode. The console " "can only be deployed when pypowervm is co-located with " "the PowerVM API.") class WrapperTaskNoSubtasks(AbstractMsgFmtError): msg_fmt = _("WrapperTask %(name)s has no subtasks!") class FeedTaskEmptyFeed(AbstractMsgFmtError): msg_fmt = _("FeedTask can't have an empty feed.") class AuthFileReadError(AbstractMsgFmtError): msg_fmt = _("OS denied access to file %(access_file)s.") class AuthFileAccessError(AbstractMsgFmtError): msg_fmt = _("OS encountered an I/O error attempting to read file " "%(access_file)s: %(error)s") class MigrationFailed(AbstractMsgFmtError): msg_fmt = _("The migration task failed. %(error)s") class IBMiLoadSourceNotFound(AbstractMsgFmtError): msg_fmt = _("No load source found for VM %(vm_name)s") class UnableToBuildPG83EncodingMissingParent(AbstractMsgFmtError): msg_fmt = _("Unable to derive the pg83 encoding for hdisk %(dev_name)s. " "The parent_entry attribute is not set. This may be due to " "using a PV obtained through an unsupported property chain. " "The PV must be accessed via VIOS.phys_vols, VG.phys_vols, or " "VIOS.scsi_mappings[n].backing_storage.") class SingleMappingNotFoundRemapError(AbstractMsgFmtError): msg_fmt = _("Unable to remap storage element of vSCSI mapping. Expected " "to find exactly one matching mapping, found " "%(num_mappings)d.") class StorageMapExistsRemapError(AbstractMsgFmtError): msg_fmt = _("Unable to remap storage element of vSCSI mapping. A mapping " "for storage element %(stg_name)s already exists to client " "LPAR %(lpar_uuid)s.") class FoundDevMultipleTimes(AbstractMsgFmtError): msg_fmt = _("Found device %(devname)s %(count)d times; expected to find " "it at most once.") class MultipleExceptionsInFeedTask(AbstractMsgFmtError): """Exception concatenating messages in WrappedFailure exceptions. Exception raised when a pypowervm.utils.transaction.FeedTask run raises a tasflow.exceptions.WrappedFailure containing more than one exception. The message string is a concatenation of the message strings of the wrapped exceptions. """ def __init__(self, ft_name, wrapped_failure): # In case the caller wants to trap this and get at the WrappedFailure self.wrapped_failure = wrapped_failure self.msg_fmt = _("FeedTask %(ft_name)s experienced multiple " "exceptions:\n\t%(concat_msgs)s") concat_msgs = '\n\t'.join([fail.exception_str for fail in wrapped_failure]) super(MultipleExceptionsInFeedTask, self).__init__( response=None, ft_name=ft_name, concat_msgs=concat_msgs) class ManagementPartitionNotFoundException(AbstractMsgFmtError): """Couldn't find exactly one management partition on the system.""" msg_fmt = _("Expected to find exactly one management partition; found " "%(count)d.") class ThisPartitionNotFoundException(AbstractMsgFmtError): """Couldn't find exactly one partition with the local VM's short ID.""" msg_fmt = _("Expected to find exactly one partition with ID %(lpar_id)d; " "found %(count)d.") class NoDefaultTierFoundOnSSP(AbstractMsgFmtError): """Looked for a default Tier on the SSP, but didn't find it.""" msg_fmt = _("Couldn't find the default Tier on Shared Storage Pool " "%(ssp_name)s.") class InvalidHostForRebuild(AbstractMsgFmtError): pass class InvalidHostForRebuildNoVIOSForUDID(InvalidHostForRebuild): msg_fmt = _("The device with UDID %(udid)s was not found on any of the " "Virtual I/O Servers.") class InvalidHostForRebuildNotEnoughVIOS(InvalidHostForRebuild): msg_fmt = _("There are not enough Virtual I/O Servers to support the " "virtual machine's device with UDID %(udid)s.") class InvalidHostForRebuildFabricsNotFound(InvalidHostForRebuild): msg_fmt = _("The expected fabrics (%(fabrics)s) were not found on any of " "the Virtual I/O Servers.") class InvalidHostForRebuildInvalidIOType(InvalidHostForRebuild): msg_fmt = _("Can not rebuild the virtual machine. It is using an I/O " "type of %(io_type)s which is not supported for VM rebuild.") class InvalidHostForRebuildSlotMismatch(InvalidHostForRebuild): msg_fmt = _("The number of VFC slots on the target system " "(%(rebuild_slots)d) does not match the number of slots on " "the client system (%(original_slots)d). Unable to rebuild " "this virtual machine on this system.") class InvalidVirtualNetworkDeviceType(AbstractMsgFmtError): msg_fmt = _("To register the slot information of the network device a " "CNA or VNIC adapter is needed. Instead the following " "was given: %(wrapper)s.") class NotEnoughActiveVioses(AbstractMsgFmtError): msg_fmt = _("There are not enough active Virtual I/O Servers available. " "Expected %(exp)d; found %(act)d.") class ViosNotAvailable(AbstractMsgFmtError): msg_fmt = _("No Virtual I/O Servers are available. Attempted to wait for " "a VIOS to become active for %(wait_time)d seconds. Please " "check the RMC connectivity between the PowerVM NovaLink and " "the Virtual I/O Servers.") class NoRunningSharedSriovAdapters(AbstractMsgFmtError): # sriov_loc_mode_state should be a string comprising one SRIOV adapter per # line, each line comprising the physical location code, the mode, and the # state, separated by ' | '. msg_fmt = _("Could not find any SR-IOV adapters in Sriov mode and Running " "state.\nLocation | Mode | State\n%(sriov_loc_mode_state)s") class InsufficientSRIOVCapacity(AbstractMsgFmtError): msg_fmt = _("Unable to fulfill redundancy requirement of %(red)d. Found " "%(found_vfs)d viable backing device(s).") class SystemNotVNICCapable(AbstractMsgFmtError): msg_fmt = _("The Managed System is not vNIC capable.") class NoVNICCapableVIOSes(AbstractMsgFmtError): msg_fmt = _("There are no active vNIC-capable VIOSes.") class VNICFailoverNotSupportedSys(AbstractMsgFmtError): msg_fmt = _("A redundancy of %(red)d was specified, but the Managed " "System is not vNIC failover capable.") class VNICFailoverNotSupportedVIOS(AbstractMsgFmtError): msg_fmt = _("A redundancy of %(red)d was specified, but there are no " "active vNIC failover-capable VIOSes.") class NoMediaRepoVolumeGroupFound(AbstractMsgFmtError): msg_fmt = _("Unable to locate the volume group %(vol_grp)s to store the " "virtual optical media within. Unable to create the " "media repository.") class CantUpdatePPortsInUse(AbstractMsgFmtError): msg_fmt = _("The ManagedSystem update was not attempted because changes " "were requested to one or more SR-IOV physical ports which " "are in use by vNICs.\n%(warnings)s") class VNCBasedTerminalFailedToOpen(AbstractMsgFmtError): msg_fmt = _("Unable to create VNC based virtual terminal: %(err)s") class CacheNotSupportedException(AbstractMsgFmtError): msg_fmt = _("The Adapter cache is not supported.") class InvalidEnumValue(AbstractMsgFmtError): msg_fmt = _("Invalid value '%(value)s' for '%(enum)s'. Valid values are: " "%(valid_values)s") class VIOSNotFound(AbstractMsgFmtError): msg_fmt = _("No VIOS found with name %(vios_name)s.") class VGNotFound(AbstractMsgFmtError): msg_fmt = _("No volume group found with name %(vg_name)s.") class PartitionIsNotIBMi(AbstractMsgFmtError): msg_fmt = _("Partition with name %(part_name)s is not an IBMi partition.") class PanelFunctionRequiresPartition(AbstractMsgFmtError): msg_fmt = _("PanelJob function partition argument is empty.") class InvalidIBMiPanelFunctionOperation(AbstractMsgFmtError): msg_fmt = _("Panel function operation %(op_name)s is invalid. " "One of %(valid_ops)s expected.") class ISCSIDiscoveryFailed(AbstractMsgFmtError): msg_fmt = _("ISCSI discovery failed for VIOS %(vios_uuid)s. " "Return code: %(status)s") class ISCSILogoutFailed(AbstractMsgFmtError): """Exception currently unused""" msg_fmt = _("ISCSI Logout failed for VIOS %(vios_uuid)s. " "Return code: %(status)s") class ISCSIRemoveFailed(AbstractMsgFmtError): msg_fmt = _("ISCSI Remove failed for VIOS %(vios_uuid)s. " "Return code: %(status)s") class VstorNotFound(AbstractMsgFmtError): msg_fmt = _("Vstor %(stor_udid)s not found for VIOS %(vios_uuid)s.") pypowervm-1.1.24/pypowervm/i18n.py0000664000175000017500000000220613571367171016451 0ustar neoneo00000000000000# Copyright 2014, 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Internationalization helpers.""" from oslo_config import cfg import oslo_i18n._message as _msg cfg.CONF.register_opts([ cfg.BoolOpt('translation_lazy_load', default=True, help='Specifies whether to Lazy-Load Translation') ]) def _(msg, domain='pypowervm'): """Shortcut method to return a translated message.""" msg = _msg.Message(msg, domain=domain) # If we are supposed to lazy-load, then return the message object return msg if cfg.CONF.translation_lazy_load else msg.translate() pypowervm-1.1.24/pypowervm/helpers/0000775000175000017500000000000013571367172016763 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/helpers/log_helper.py0000664000175000017500000001266113571367171021462 0ustar neoneo00000000000000# Copyright 2014, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This Adapter helper logs recent requests/responses on an exception.""" import collections import copy import threading from oslo_log import log as logging import pypowervm.const as c import pypowervm.exceptions as pvmex from pypowervm.i18n import _ LOG = logging.getLogger(__name__) log_store = threading.local() def _init_thread_stg(max_entries): """Sets up the storage for the logs for this thread.""" if not hasattr(log_store, 'powervm_log'): log_store.powervm_log = collections.deque(maxlen=max_entries) def _stash(sensitive, type_, value): """Enters the request or response in the thread log.""" if sensitive: value = '' log_store.powervm_log.append({type_: value}) def _stash_response(sensitive, resp): if resp is not None: logged_resp = dict(resp.__dict__) # Remove feed and entry from the dictionary of response attributes. # This avoids keeping heavy ElementTree objects in memory since # only the string version of the body is dumped for responses. logged_resp.pop('entry', None) logged_resp.pop('feed', None) _stash(sensitive, 'response', logged_resp) def _write_thread_log(): def format_request(req): body = None # Parse the arguments if we're passed a tuple else its a string if isinstance(req, tuple): req_args = req[0] req_kwds = req[1] dump = dict(method=req_args[0], path=req_args[1]) for key in req_kwds: if key == 'body': # special format for body body = req_kwds.get('body') elif key == 'headers': # deep copy the header and change what we can't dump headers = copy.deepcopy(req_kwds.get(key)) if 'X-API-Session' in headers: headers['X-API-Session'] = '' dump[key] = str(headers) else: dump[key] = str(req_kwds.get(key)) else: dump = req # Dump all fields besides the body LOG.info(_('REQUEST: %s') % dump) # Now dump the full body if body is not None: LOG.info(body) def format_response(resp): body = None # Parse the arguments if we're passed a dict else it's a string if isinstance(resp, dict): dump = {} for key in resp: if key == 'body': # special format for body body = resp.get('body') else: dump[key] = str(resp.get(key)) else: dump = resp # Dump all fields besides the body first LOG.info(_('RESPONSE: %s') % dump) # Now dump the full body, on the next line, if available if body is not None: LOG.info(body) # Pop each entry out of the log until it's empty try: while True: entry = log_store.powervm_log.popleft() request = entry.get('request') if request is not None: format_request(request) response = entry.get('response') if response is not None: format_response(response) except IndexError: pass def log_helper(func, max_logs=3): """Log recent requests/responses on exception. This helper stashes the requests/responses it sees passing through to thread local storage. If it then sees an exception surfacing, it will write the req/resp logs. :param func: The Adapter request method to call :param max_logs (int): Max number of req/resps to retain at a time This value can only be set once per thread. Once it's set, subsequent calls will ignore the value. """ def is_etag_mismatch(ex): """Is ex an HttpError with status 412 (etag mismatch)?""" return (isinstance(ex, pvmex.HttpError) and ex.response.status == c.HTTPStatus.ETAG_MISMATCH) def log_req_resp(*args, **kwds): # Set aside storage for a req/resp pair _init_thread_stg(max_entries=(max_logs * 2)) # See if this request has sensitive data sensitive = kwds.get('sensitive', False) # Log the request before the call _stash(sensitive, 'request', (args, kwds)) try: # Call the request() response = func(*args, **kwds) except pvmex.Error as e: _stash_response(sensitive, e.response) # Now dump the log and raise the exception. # Special case for 412 (etag mismatch) - don't dump. if not is_etag_mismatch(e): _write_thread_log() raise else: _stash_response(sensitive, response) return response return log_req_resp pypowervm-1.1.24/pypowervm/helpers/__init__.py0000664000175000017500000000000013571367171021061 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/helpers/vios_busy.py0000664000175000017500000000553013571367171021361 0ustar neoneo00000000000000# Copyright 2014, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This Adapter helper retries a request when 'VIOS busy' error is detected. A 'VIOS busy' error usually means the VIOS is processing another operation which is mutually exclusive with the submitted request. If this state persists, it may mean the VIOS is in need of manual intervention. """ import time import pypowervm.const as c import pypowervm.exceptions as pvmex import pypowervm.wrappers.entry_wrapper as ew import pypowervm.wrappers.http_error as he # Make UT a little easier SLEEP = time.sleep def vios_busy_retry_helper(func, max_retries=3, delay=5): """This helper retries the request if the resource is busy. Args: func: The Adapter request method to call max_retries (int): Max number retries. """ def is_retry(http_error): """Determines if the error is one that can be retried.""" # If for some reason it is not an Http Error, it can't be retried. if not isinstance(http_error, he.HttpError): return False # Check if the VIOS is clearly busy, or if there is a service # unavailable. The service unavailable can occur on child objects when # a VIOS is busy. return (http_error.is_vios_busy() or http_error.status == c.HTTPStatus.SERVICE_UNAVAILABLE) def wrapper(*args, **kwds): retries = 0 while True: try: # Call the request() resp = func(*args, **kwds) except pvmex.Error as e: # See if there are any retries left. if retries >= max_retries: raise # See if the system was busy resp = e.response if resp and resp.body and resp.entry: wrap = ew.EntryWrapper.wrap(resp.entry) if is_retry(wrap): retries += 1 # Wait a few seconds before trying again, scaling # out the delay based on the retry count. SLEEP(delay * retries) continue # Doesn't look like a VIOS busy error, so just raise it. raise else: return resp return wrapper pypowervm-1.1.24/pypowervm/helpers/sample_helper.py0000664000175000017500000000366213571367171022163 0ustar neoneo00000000000000# Copyright 2014, 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example of an Adapter helper. Helpers can be associated with an Adapter or on an individual request. They take as their first parameter the next helper or request method to call to handle the request / response. """ import time import pypowervm.exceptions as pvmex BUSY_ERR_CODES = ['HSCL3205'] def sample_retry_helper(func, max_retries=1): """This helper retries the request if the resource is busy. Args: func: The Adapter request method to call max_logs (int): Max number retries. """ def wrapper(*args, **kwds): retries = 0 while True: try: # Call the request() resp = func(*args, **kwds) except pvmex.Error as e: resp = e.response # See if the system was busy if resp.body is not None: resp_body = str(resp.body) if any(code in resp_body for code in BUSY_ERR_CODES): retries += 1 if retries <= max_retries: # Wait a few seconds before trying again time.sleep(5 * retries) continue raise e else: return resp return wrapper pypowervm-1.1.24/pypowervm/tasks/0000775000175000017500000000000013571367172016446 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tasks/ibmi.py0000664000175000017500000001371313571367171017744 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tasks around IBMi VM changes.""" from oslo_config import cfg from oslo_log import log as logging import pypowervm.const as c import pypowervm.exceptions as pvmex from pypowervm import i18n import pypowervm.tasks.scsi_mapper as pvm_smap import pypowervm.tasks.vfc_mapper as pvm_vfcmap import pypowervm.wrappers.base_partition as pvm_bp from pypowervm.wrappers import job import pypowervm.wrappers.logical_partition as pvm_lpar from pypowervm.wrappers import virtual_io_server as pvm_vios LOG = logging.getLogger(__name__) # TODO(IBM) translation _LI = i18n._ def update_ibmi_settings(adapter, lpar_w, boot_type): """Update TaggedIO, Keylock postion and IPL Source of IBMi VM. TaggedIO of IBMi vm will be updated to identify the load source, alternative load source and console type. Keylock position will be set to the value of NORMAL in KeylockPos enumration. IPL Source will be set to the value of B in IPLSrc enumration. :param adapter: The pypowervm adapter. :param lpar_w: The lpar wrapper. :param boot_type: The boot connectivity type of the VM. It is a string value that represents one of the values in the BootStorageType enumeration. :return: The updated LPAR wrapper. The update is not executed against the system, but rather the wrapper itself is updated locally. """ load_source = None alt_load_source = None client_adapters = [] if boot_type == pvm_lpar.BootStorageType.VFC: LOG.info("Setting Virtual Fibre Channel slot as load source for VM %s", lpar_w.name) for vios_wrap in pvm_vios.VIOS.get(adapter, xag=[c.XAG.VIO_FMAP]): existing_maps = pvm_vfcmap.find_maps( vios_wrap.vfc_mappings, lpar_w.id) client_adapters.extend([vfcmap.client_adapter for vfcmap in existing_maps if vfcmap.client_adapter is not None]) else: # That boot volume, which is vscsi physical volume, ssp lu # and local disk, could be handled here. LOG.info("Setting Virtual SCSI slot slot as load source for VM %s", lpar_w.name) for vios_wrap in pvm_vios.VIOS.get(adapter, xag=[c.XAG.VIO_SMAP]): existing_maps = pvm_smap.find_maps( vios_wrap.scsi_mappings, lpar_w.id) client_adapters.extend([smap.client_adapter for smap in existing_maps if smap.client_adapter is not None]) slot_nums = set(s.lpar_slot_num for s in client_adapters) slot_nums = list(slot_nums) slot_nums.sort() if len(slot_nums) > 0: load_source = slot_nums.pop(0) if len(slot_nums) > 0: alt_load_source = slot_nums.pop(0) if load_source is not None: if alt_load_source is None: alt_load_source = load_source lpar_w.io_config.tagged_io = pvm_bp.TaggedIO.bld( adapter, load_src=load_source, console='HMC', alt_load_src=alt_load_source) else: raise pvmex.IBMiLoadSourceNotFound(vm_name=lpar_w.name) lpar_w.desig_ipl_src = pvm_lpar.IPLSrc.B lpar_w.keylock_pos = pvm_bp.KeylockPos.NORMAL return lpar_w class IBMiPanelOperations(object): DUMPRESTART = 'dumprestart' DSTON = 'dston' RETRYDUMP = 'retrydump' REMOTEDSTOFF = 'remotedstoff' REMOTEDSTON = 'remotedston' IOPRESET = 'iopreset' IOPDUMP = 'iopdump' CONSOLESERVICE = 'consoleservice' ALL_VALUES = (DUMPRESTART, DSTON, RETRYDUMP, REMOTEDSTOFF, REMOTEDSTON, IOPRESET, IOPDUMP, CONSOLESERVICE) CONF = cfg.CONF IBMI_PANEL_JOB_SUFFIX = 'PanelFunction' IBMI_PARAM_KEY = 'operation' def start_panel_job(part, opt=None, timeout=CONF.pypowervm_job_request_timeout, synchronous=True): """Run an IBMi Panel job operation. :param part: Partition (LPAR or VIOS) wrapper indicating the partition to run the panel function against. :param opt: One of the IBMiPanelOperations enum values to run. :param timeout: value in seconds for specifying how long to wait for the Job to complete. :param synchronous: If True, this method will not return until the Job completes (whether success or failure) or times out. If False, this method will return as soon as the Job has started on the server (that is, achieved any state beyond NOT_ACTIVE). Note that timeout is still possible in this case. """ if not part: raise pvmex.PanelFunctionRequiresPartition() if opt not in IBMiPanelOperations.ALL_VALUES: raise pvmex.InvalidIBMiPanelFunctionOperation( op_name=opt, valid_ops=', '.join(IBMiPanelOperations.ALL_VALUES)) if part.env != pvm_bp.LPARType.OS400: raise pvmex.PartitionIsNotIBMi(part_name=part.name) # Fetch the Job template wrapper jwrap = job.Job.wrap(part.adapter.read( part.schema_type, part.uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=IBMI_PANEL_JOB_SUFFIX)) # Run the Job, letting exceptions raise up. jwrap.run_job( part.uuid, job_parms=[job.Job.create_job_parameter(IBMI_PARAM_KEY, opt)], timeout=timeout, synchronous=synchronous) pypowervm-1.1.24/pypowervm/tasks/slot_map.py0000664000175000017500000010603013571367171020635 0ustar neoneo00000000000000# Copyright 2016, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities to map slot numbers to I/O elements and vice versa. These utilities facilitate rebuilding the storage/network topology of an LPAR e.g. on the target system of a remote rebuild. """ import collections import copy import pickle import six import warnings from pypowervm import exceptions as pvm_ex from pypowervm.i18n import _ from pypowervm import util as pvm_util from pypowervm.utils import lpar_builder as lb from pypowervm.wrappers import iocard as ioc from pypowervm.wrappers import managed_system as sys from pypowervm.wrappers import network as net from pypowervm.wrappers import storage as stor class IOCLASS(object): """Enumeration of differently-handled I/O classes.""" VFC = 'VFC' LU = stor.LU.__name__ VDISK = stor.VDisk.__name__ VOPT = stor.VOptMedia.__name__ PV = stor.PV.__name__ CNA = net.CNA.__name__ MGMT_CNA = 'MGMT' + net.CNA.__name__ VNIC = ioc.VNIC.__name__ class SlotMapStore(object): """Save/fetch slot-to-I/O topology for an LPAR. This class should be extended by something that can interact with a persistent storage device to implement the save, load, and delete methods. The slot metadata is used during a rebuild operation (e.g. Remote Restart) to ensure that the client devices are in the same slots on the target. Typically this map is constructed on the source system and then saved. It is loaded on the target system and used to rebuild an LPARthe same way. """ def __init__(self, inst_key, load=True): """Load (or create) a SlotMapStore for a given LPAR. :param inst_key: Unique key (e.g. LPAR UUID) by which the slot map for a given LPAR is referenced in storage. :param load: If True (the default), the load method is invoked to retrieve existing data from backing storage. If False (e.g. if the caller knows there's nothing in the backing store for inst_key; or deliberately wants to replace it), this instance is initialized with an empty topology map. """ self.inst_key = inst_key self._vswitch_map = None map_str = self.load() if load else None # Deserialize or initialize self._slot_topo = pickle.loads(map_str) if map_str else {} # Save a copy of the topology so we can tell when it has changed self._loaded_topo = copy.deepcopy(self._slot_topo) @property def serialized(self): """Internal use only. Do not override. Do not invoke.""" # Used by the save method to serialize the slot map data to an opaque # value to write to external storage. # Use py2/3-compatible protocol. return pickle.dumps(self.topology, protocol=2) def load(self): """Internal use only. Do not override. Do not invoke.""" # Used by __init__ to bootstrap this instance with saved data, if any. return self._load(self.inst_key) def _load(self, key): """Subclass implementation to load a slot map for an LPAR from storage. The subclass must implement this method to retrieve the slot map - an opaque data blob - from a storage back-end, where it was stored keyed on 'key'. :param key: Unique key to associate with this instance in the storage back-end. :return: Opaque data blob loaded from storage. If no value exists in storage for this key, this method should return None. """ return None def save(self): """Save this slot map to storage, if needed.""" # Only save if needed. if self._slot_topo != self._loaded_topo: self._save(self.inst_key, self.serialized) # Update the saved version of the loaded topology so a subsequent # save (without intervening changes) can condition properly. self._loaded_topo = copy.deepcopy(self._slot_topo) def _save(self, key, blob): """Subclass implementation to write this slot map to storage. The subclass must implement this method to save 'blob' - an opaque data blob - to a storage back-end, keyed via 'key'. The object must be retrievable subsequently via the same key. If the back-end already contains a value for that key, this method must overwrite it. This method need not implement save-only-if-changed logic; that is handled by the 'save' method. :param key: Unique key to associate with this instance in the storage back-end. :param blob: Opaque data blob to save to the storage back-end. This value is not to be introspected, as its format may change without notice. """ pass def delete(self): """Remove the back-end storage for this slot map.""" self._delete(self.inst_key) def _delete(self, key): """Subclass implementation to delete this slot map from storage. The subclass must implement this method to remove the opaque data string associated with 'key' from the storage back-end. :param key: Unique key to associate with this instance in the storage back-end. """ pass def register_max_vslots(self, max_vslots): """Register the maximum number of virtual slots on the source LPAR. :param max_vslots: The maximum number of virtual slots on the LPAR. """ self._slot_topo['_max_vslots'] = max_vslots def register_vnet(self, vnet_w): """Register the slot number for a CNA or VNIC. :param vnet_w: Either a CNA wrapper or a VNIC wrapper. :raises: InvalidVirtualNetworkDeviceType: If the wrapper passed in is not a CNA or VNIC this will be raised. """ if isinstance(vnet_w, net.CNA): cna_map = self._vswitch_id2name(vnet_w.adapter) self._reg_slot(IOCLASS.CNA, vnet_w.mac, vnet_w.slot, extra_spec=cna_map[vnet_w.vswitch_id]) elif isinstance(vnet_w, ioc.VNIC): self._reg_slot(IOCLASS.VNIC, vnet_w.mac, vnet_w.slot) else: raise pvm_ex.InvalidVirtualNetworkDeviceType(wrapper=vnet_w) def drop_vnet(self, vnet_w): """Drops the slot number for a CNA or VNIC. :param vnet_w: Either a CNA wrapper or a VNIC wrapper. :raises: InvalidVirtualNetworkDeviceType: If the wrapper passed in is not a CNA or VNIC this will be raised. """ if isinstance(vnet_w, net.CNA): self._drop_slot(IOCLASS.CNA, vnet_w.mac, vnet_w.slot) elif isinstance(vnet_w, ioc.VNIC): self._drop_slot(IOCLASS.VNIC, vnet_w.mac, vnet_w.slot) else: raise pvm_ex.InvalidVirtualNetworkDeviceType(wrapper=vnet_w) def register_cna(self, cna): """Register the slot and switch topology of a client network adapter. :deprecated: Use register_vnet instead. :param cna: CNA EntryWrapper to register. """ warnings.warn(_("The register_cna method is deprecated! Please use " "the register_vnet method."), DeprecationWarning) self.register_vnet(cna) def drop_cna(self, cna): """Drops the client network adapter from the slot topology. :deprecated: Use drop_vnet instead. :param cna: CNA EntryWrapper to drop. """ warnings.warn(_("The drop_cna method is deprecated! Please use " "the drop_vnet method."), DeprecationWarning) self.drop_vnet(cna) def register_vfc_mapping(self, vfcmap, fab): """Incorporate the slot topology associated with a VFC mapping. :param vfcmap: VFCMapping ElementWrapper representing the mapping to be incorporated. :param fab: The fabric name associated with the mapping. """ self._reg_slot(IOCLASS.VFC, fab, vfcmap.server_adapter.lpar_slot_num) def drop_vfc_mapping(self, vfcmap, fab): """Drops the client network adapter from the slot topology. :param vfcmap: VFCMapping ElementWrapper representing the mapping to be removed. :param fab: The fabric name associated with the mapping. """ self._drop_slot(IOCLASS.VFC, fab, vfcmap.server_adapter.lpar_slot_num) @staticmethod def _parse_vscsi_mapping(vscsimap): """Splits out a VSCSIMapping object for use in the internal slot map. :param vscsimap: A VSCSIMapping ElementWrapper to process. :return: The backing storage element, which may be any of the storage types supported by VSCSIMapping. None if the vscsimap lacks either backing storage or server adapter. :return: The stg_key (storage key) appropriate to the backing storage type. See the topology @property. None if the vscsimap lacks either backing storage or server adapter. :return: The integer client slot number to which the VSCSIMapping is attached. None if the vscsimap lacks either backing storage or server adapter. :return: The extra_spec (extra specification) value appropriate to the backing storage type. See the topology @property. None if the vscsimap lacks either backing storage or server adapter. """ # Must have backing storage and a server adapter to register if any(attr is None for attr in (vscsimap.backing_storage, vscsimap.server_adapter)): return None, None, None, None extra_spec = None bstor = vscsimap.backing_storage cslot = vscsimap.server_adapter.lpar_slot_num stg_key = bstor.udid if isinstance(bstor, stor.VOptMedia): # Virtual Optical Media - The IDs will be different on the # target. Using the UDID as a key (above) will at least allow us # to determine how many VIOSes should have VOptMedia devices. For # remote rebuild, assuming the consumer uses consistent image # naming, we can use the extra_spec to identify which of multiple # VOpts we should pick up. extra_spec = bstor.name else: # For shared storage (PV/LU) and for local (VDISK), we need to # make sure the LUA (Logical Unit Address) of the device is # preserved on the target. This informs things like boot order. extra_spec = vscsimap.target_dev.lua return bstor, stg_key, cslot, extra_spec def register_vscsi_mapping(self, vscsimap): """Incorporate the slot topology associated with a VSCSI mapping. :param vscsimap: VSCSIMapping ElementWrapper to be incorporated into the slot topology. """ bstor, stg_key, cslot, extra_spec = self._parse_vscsi_mapping(vscsimap) if bstor: self._reg_slot(bstor.__class__.__name__, stg_key, cslot, extra_spec=extra_spec) def drop_vscsi_mapping(self, vscsimap): """Drops the vscsi mapping from the slot topology. :param vscsimap: VSCSIMapping ElementWrapper to be removed from the slot topology. """ bstor, stg_key, cslot = self._parse_vscsi_mapping(vscsimap)[:3] if bstor: self._drop_slot(bstor.__class__.__name__, stg_key, cslot) @property def topology(self): """Produce the slot-to-I/O topology structure from this SlotMapStore. :return: A dict of the form: { slot_num: { IOCLASS: { io_key: extra_spec } } } ...where: - slot_num: Integer client slot ID. - IOCLASS: The IOCLASS enum indicating the type of I/O. Each IOCLASS enum is only present if the source had at least one I/O element of that type. - io_key: The unique identifier of the mapped I/O element. This differs by IOCLASS type - see below. - extra_spec: Additional information about the I/O element. This differs by IOCLASS type - see below. IOCLASS stg_key extra_spec ============================================================== CNA CNA.mac VSwitch.name VOPT VOptMedia.udid Media name VDISK VDisk.udid VDisk.capacity (float) PV PV.udid LUA LU LU.udid LUA VFC fabric name None VNIC VNIC.mac None """ ret = copy.deepcopy(self._slot_topo) ret.pop('_max_vslots', None) return ret @property def max_vslots(self): """Returns the highest slot number for the LPAR, or None if not set. This number corresponds to base_partition.max_virtual_slots, and indicates the maximum number of virtual slots (I/O buses) allowed on the LPAR. If register_max_vslots has never bene called, this value will be None. """ return self._slot_topo.get('_max_vslots', None) def _vswitch_id2name(self, adap): """(Cache and) return a map of VSwitch short ID to VSwitch name. :param adap: pypowervm.adapter.Adapter through which to query the VSwitch topology. :return: Dict of { vswitch_short_id: vswitch_name } """ if self._vswitch_map is None: self._vswitch_map = { vsw.switch_id: vsw.name for vsw in net.VSwitch.get( adap, parent=sys.System.get(adap)[0])} return self._vswitch_map def _reg_slot(self, io_class, io_key, client_slot, extra_spec=None): """Register a slot ID where an I/O key can be connected to many slots. :param io_class: Outer key representing one of the major classes of I/O handled by SlotMapStore. Must be one of the IOCLASS enum values. :param io_key: Unique identifier of the I/O element to be used as the secondary key. This differs based on io_class - see the topology @property. :param client_slot: The integer slot number by which the I/O element is attached to the client. :param extra_spec: Optional extra value to associate with the io_key. This should always be the same for a given io_key. The format/meaning of the value differs based on io_class = see the topology @property. """ # See the topology @property # { slot_num: { IOCLASS: { io_key: extra_spec } } } if client_slot not in self._slot_topo: self._slot_topo[client_slot] = {} if io_class not in self._slot_topo[client_slot]: self._slot_topo[client_slot][io_class] = {} # Always overwrite the extra_spec self._slot_topo[client_slot][io_class][io_key] = extra_spec def _drop_slot(self, io_class, io_key, client_slot): """Drops a client slot ID entry from the topology. :param io_class: Outer key representing one of the major classes of I/O handled by SlotMapStore. Must be one of the IOCLASS enum values. :param io_key: Unique identifier of the I/O element to be used as the secondary key. This differs based on io_class - see the topology @property. :param client_slot: The integer slot number by which the I/O element is now detached from the client. """ # See the topology @property # { slot_num: { IOCLASS: { io_key: extra_spec } } } if client_slot not in self._slot_topo: return if io_class not in self._slot_topo[client_slot]: return # Remove the key if it is in the topology if io_key in self._slot_topo[client_slot][io_class]: del self._slot_topo[client_slot][io_class][io_key] # Remove empty internal dicts if not self._slot_topo[client_slot][io_class]: del self._slot_topo[client_slot][io_class] if not self._slot_topo[client_slot]: del self._slot_topo[client_slot] class BuildSlotMap(object): """Provides information on which slots should be used for LPAR creates. This class takes in a SlotMapStore and provides information on which slots should be used on the client adapters. If not overridden, this base implementation returns a client slot that allows the deploy implementation to choose the 'next available' slot. """ def __init__(self, slot_store): """Initializes the slot map. :param slot_store: The existing instances SlotMapStore. """ self._slot_store = slot_store self._build_map = {} def get_vscsi_slot(self, vios_w, udid): """Gets the vSCSI client slot and extra spec for the VSCSI device. :param vios_w: VIOS wrapper. :param udid: UDID of the VSCSI device. :return: Integer client slot number on which to create the VSCSIMapping from the specified VIOS for the storage with the specified udid. :return: Extra specification appropriate to the storage type. See the SlotMapStore.topology @property. """ # Pull from the build map. Will default to None (indicating to # fuse an existing vscsi mapping or use next available slot for the # mapping). # Since the UDID should be universally unique, search all storage types for by_vuuid in six.itervalues(self._build_map): if vios_w.uuid in by_vuuid and udid in by_vuuid[vios_w.uuid]: return by_vuuid[vios_w.uuid][udid] return None, None def get_pv_vscsi_slot(self, vios_w, udid): """DEPRECATED; Gets the vSCSI client slot for the PV. Use get_vscsi_slot. This method will be removed shortly. :param vios_w: VIOS wrapper. :param udid: UDID of the physical volume. :return: Integer client slot number on which to create the VSCSIMapping from the specified VIOS for the PV with the specified udid. """ # Pull from the build map. Will default to None (indicating to # fuse an existing vscsi mapping or use next available slot for the # mapping). pv_vscsi_map = self._build_map.get(IOCLASS.PV, {}) return pv_vscsi_map.get(vios_w.uuid, {}).get(udid, (None,))[0] def get_vea_slot(self, mac): """Gets the client slot for the VEA. :param mac: MAC address string to look up. :return: Integer client slot number on which to create a CNA with the specified MAC address. """ # Pull from the build map. Will default to None (indicating to use # the next available high slot). return self._build_map.get(IOCLASS.CNA, {}).get( pvm_util.sanitize_mac_for_api(mac), None) def get_mgmt_vea_slot(self): """Gets the client slot and MAC for the mgmt VEA. :return: MAC Address for the NIC. :return: Integer client slot number for the NIC. """ mgmt_vea = self._build_map.get(IOCLASS.MGMT_CNA, {}) return mgmt_vea.get('mac', None), mgmt_vea.get('slot', None) def get_vnet_slot(self, mac): """Gets the client slot for the VEA or VNIC, mgmt VEA not included. :param mac: MAC address string to look up. :return: Integer client slot number on which to create a CNA or VNIC with the specified MAC address. """ # Pull from the build map. Will default to None (indicating to use # the next available high slot). mac = pvm_util.sanitize_mac_for_api(mac) return (self._build_map.get(IOCLASS.CNA, {}).get(mac, None) or self._build_map.get(IOCLASS.VNIC, {}).get(mac, None)) def get_vfc_slots(self, fabric, number_of_slots): """Gets the client slot list for a given NPIV fabric. :param fabric: Fabric name. :param number_of_slots: The number of slots for the specified fabric. :return: List of integer client slot numbers on which to map the given fabric. :raises: InvalidHostForRebuildSlotMismatch : if the target server requires more or less slots than the source server had. If this is a first deploy (ex. a standard BuildSlotMap) will not matter, and will return an array of None's (indicating to use the next available slots). """ number_of_map_slots = len(self._build_map.get(IOCLASS.VFC, {}).get(fabric, [])) if not number_of_map_slots: return [None] * number_of_slots if number_of_map_slots == number_of_slots: return self._build_map.get(IOCLASS.VFC, {}).get(fabric, None) raise pvm_ex.InvalidHostForRebuildSlotMismatch( rebuild_slots=number_of_slots, original_slots=number_of_map_slots) def get_max_vslots(self): """Retrieve or derive the maximum number of virtual slots for the LPAR. If the source LPAR's maximum number of virtual slots was registered in the source slot map, that value is returned. Otherwise, we attempt to derive a reasonable value based on the highest registered slot number. If none was registered, the minimum returned value will be the default from pypowervm.utils.lpar_builder.DEF_MAX_SLOT. It is the caller's responsibility to determine whether the value returned by this method is sufficiently high for the LPAR being created. """ # Return the exact value if it was registered if self._slot_store.max_vslots is not None: return self._slot_store.max_vslots # Otherwise, return the higher of # - The default from lpar_builder # - The highest registered slot, plus 10 from_hi_slot = (max(self._slot_store.topology.keys()) if self._slot_store.topology else 0) + 10 return max(lb.DEF_MAX_SLOT, from_hi_slot) class RebuildSlotMap(BuildSlotMap): """Used to determine the slot topology when rebuilding a VM. A LPAR rebuild needs to configure the client slots with the exact topology as their source. This implementation requires additional details from the target server, but then provides the LPAR's appropriate client slot layout. """ def __init__(self, slot_store, vios_wraps, vscsi_vol_to_vio, npiv_fabrics): """Initializes the rebuild map. :param slot_store: The existing instances SlotMapStore. :param vios_wraps: List of VIOS EntryWrappers. Must have been retrieved with the appropriate XAGs. :param vscsi_vol_to_vio: The volume to virtual I/O server mapping. Of the following format: { 'lu_udid' : [ 'vios_uuid', 'vios_uuid'], 'pv_udid' : [ 'vios_uuid', 'vios_uuid'] } :param npiv_fabrics: List of vFC fabric names. """ super(RebuildSlotMap, self).__init__(slot_store) self.vios_wraps = vios_wraps # Lets first get the VEAs and VNICs built self._vea_build_out() self._vnic_build_out() # Next up is vSCSI self._vscsi_build_out(vscsi_vol_to_vio) # And finally vFC (npiv) self._npiv_build_out(npiv_fabrics) def get_mgmt_vea_slot(self): """Gets the client slot and MAC for the mgmt VEA. There should only be one MGMT VEA per system. As such, subsequent calls to this will return the single MGMT VEA. :return: MAC Address for the NIC. :return: Integer client slot number for the NIC. """ # On a rebuild specifically, the MGMT VIF may not be there. If that # is the case, we want to make sure that it is the next available slot. mgmt_vea = self._build_map.get(IOCLASS.MGMT_CNA, {}) slot = mgmt_vea.get('slot') # If the slot is None, that means the MGMT vif wasn't there initially # but its being requested for the rebuild. if slot is None: # Make sure there were slots, then use the 'next highest' available slots_in_use = sorted(self._slot_store.topology, reverse=True) if len(slots_in_use) > 0: slot = slots_in_use[0] + 1 return mgmt_vea.get('mac', None), slot def _vscsi_build_slot_order(self): """Order slots by (descending) number of storage elements they host. :return: An ordered dictionary of the form { slot_num: count } where slot_num is the integer slot number and count is the number of supported* storage elements attached to this slot. The dict is ordered such that an iterator over its keys will return the slot_num with the highest count first, etc. *Only PV and LU are supported at this time. """ slots_order = {} for slot in self._slot_store.topology: io_dict = self._slot_store.topology[slot] # There are multiple types of things that can go into the vSCSI # map. Some are not supported for rebuild. if io_dict.get(IOCLASS.VOPT): raise pvm_ex.InvalidHostForRebuildInvalidIOType( io_type='Virtual Optical Media') # Create a dictionary of slots to the number of mappings per # slot. This will determine which slots we assign first. slots_order[slot] = (len(io_dict.get(IOCLASS.PV, {})) + len(io_dict.get(IOCLASS.LU, {})) + len(io_dict.get(IOCLASS.VDISK, {}))) # For VSCSI we need to figure out which slot numbers have the most # mappings and assign these ones to VIOSes first in descending order. # # We do this because if we have 4 mappings for storage elements 1 # through 4 on slot X and 5 mappings for storage elements 1 through 5 # on slot Y, then we must ensure the VIOS that has storage elements 1 # through 5 gets slot Y (even though it's a candidate for slot X). We # solve this by assigning the most used slot numbers first. slots_order = collections.OrderedDict(sorted( six.iteritems(slots_order), key=lambda t: t[1], reverse=True)) return slots_order def _vscsi_build_out(self, vol_to_vio): """Builds the '_build_map' for physical volumes and logical units.""" slots_order = self._vscsi_build_slot_order() # We're going to use the vol_to_vio dictionary for consistency and # remove elements from it. We need to deepcopy so that the original # remains the same. vol_to_vio_cp = copy.deepcopy(vol_to_vio) for slot in slots_order: slot_topo = self._slot_store.topology[slot] if not any(slot_topo.get(x) for x in (IOCLASS.PV, IOCLASS.LU, IOCLASS.VDISK)): continue # Initialize the set of candidate VIOSes to all available VIOSes. # We'll filter out and remove any VIOSes that can't host any PV or # LU for this slot. candidate_vioses = set(vio.uuid for vio in self.vios_wraps) slot_info = [(udid, lua, IOCLASS.PV) for udid, lua in six.iteritems(slot_topo.get(IOCLASS.PV, {}))] slot_info.extend([(udid, lua, IOCLASS.LU) for udid, lua in six.iteritems(slot_topo.get(IOCLASS.LU, {}))]) slot_info.extend([(udid, lua, IOCLASS.VDISK) for udid, lua in six.iteritems(slot_topo.get(IOCLASS.VDISK, {}))]) for udid, lua, stg_class in slot_info: # If the UDID isn't anywhere to be found on the destination # VIOSes then we have a problem. if udid not in vol_to_vio_cp: raise pvm_ex.InvalidHostForRebuildNoVIOSForUDID(udid=udid) # Inner Join. The goal is to end up with a set that only has # VIOSes which can see every backing storage elem for this # slot. candidate_vioses &= set(vol_to_vio_cp[udid]) # If the set of candidate VIOSes is empty then this host is # not a candidate for rebuild. if not candidate_vioses: raise pvm_ex.InvalidHostForRebuildNotEnoughVIOS(udid=udid) # Just take one, doesn't matter which one. # TODO(IBM): Perhaps find a way to ensure better distribution. vios_uuid_for_slot = candidate_vioses.pop() for udid, lua, stg_class in slot_info: self._put_vios_val(stg_class, vios_uuid_for_slot, udid, (slot, lua)) # There's somewhat of a problem with this. We want to remove # the VIOS UUID we're picking from this list so that other # VIOSes will pick up the other mappings for this storage, but # it may be the case that the original storage actually # belonged to more than one mapping on a single VIOS. It's not # clear if this is allowed, and if it is the backing storage # can potentially be corrupted. # # If there were multiple mappings for the same vSCSI storage # element on the same VIOS then the slot store could not # identify it. We may hit an invalid host for rebuild exception # if this happens or we may not. It depends on the differences # between source and destination VIOSes. vol_to_vio_cp[udid].remove(vios_uuid_for_slot) def _vea_build_out(self): """Builds the '_build_map' for the veas.""" for slot, io_dict in six.iteritems(self._slot_store.topology): for mac, vswitch in six.iteritems(io_dict.get(IOCLASS.CNA, {})): mac = pvm_util.sanitize_mac_for_api(mac) if vswitch == 'MGMTSWITCH': self._put_mgmt_vea_slot(mac, slot) else: self._put_novios_val(IOCLASS.CNA, mac, slot) def _vnic_build_out(self): """Builds the '_build_map' for the vnics.""" for slot, io_dict in six.iteritems(self._slot_store.topology): for mac in io_dict.get(IOCLASS.VNIC, {}): self._put_novios_val( IOCLASS.VNIC, pvm_util.sanitize_mac_for_api(mac), slot) def _npiv_build_out(self, fabrics): """Builds the build map for the NPIV fabrics. :param fabrics: List of NPIV fabric names. :raise InvalidHostForRebuildFabricsNotFound: If any fabrics in the slot_map topology are not in fabrics. """ seen_fabrics = set() for fabric in fabrics: fabric_slots = [] # Add the slot numbers for this fabric for slot, iomap in six.iteritems(self._slot_store.topology): if fabric not in iomap.get(IOCLASS.VFC, {}): continue fabric_slots.append(slot) seen_fabrics.add(fabric) self._put_novios_val(IOCLASS.VFC, fabric, fabric_slots) # Make sure all the topology's fabrics are accounted for. # topo_fabrics is all the fabrics in all the slots from the slot_map # topology. topo_fabrics = {fab for iomap in self._slot_store.topology.values() for fab in iomap.get(IOCLASS.VFC, {}).keys()} if topo_fabrics - seen_fabrics: raise pvm_ex.InvalidHostForRebuildFabricsNotFound( fabrics=', '.join(topo_fabrics - seen_fabrics)) def _put_mgmt_vea_slot(self, mac, slot): """Store client slot data for the managament VEA. There should only ever be one of these. Enhances the rebuild map with: { IOCLASS_MGMT_CNA: { 'mac': mac, 'slot': 'slot' } } :param mac: MAC address (string) of the management VEA. :param slot: Client slot number for the management VEA. """ if IOCLASS.MGMT_CNA not in self._build_map: self._build_map[IOCLASS.MGMT_CNA] = {} self._build_map[IOCLASS.MGMT_CNA] = {'mac': mac, 'slot': slot} def _put_novios_val(self, io_class, io_key, val): """Store a keyed value not associated with a VIOS. This applies to non-management CNAs, VNICs, and NPIV fabrics. Enhances the rebuild map with: { io_class: { io_key: val } } :param io_class: IOCLASS const value representing the type of I/O. Either IOCLASS.CNA, IOCLASS.VNIC, or IOCLASS.NPIV :param io_key: Key of the I/O device to be added. MAC address for IOCLASS.CNA, IOCLASS.VNIC; fabric name for IOCLASS.VFC. :param val: The slot value(s) to be added. A list of slot numbers for IOCLASS.VFC; a single slot number for IOCLASS.CNA and IOCLASS.VNIC. """ if io_class not in self._build_map: self._build_map[io_class] = {} self._build_map[io_class][io_key] = val def _put_vios_val(self, stg_class, vios_uuid, udid, val): """Store client slot info associated with a storage dev and VIOS. This applies to VSCSI devices. Enhances the rebuild map with: { stg_class: { vios_uuid: { stg_key: val } } } :param stg_class: IOCLASS const value representing the type of storage. :param vios_uuid: UUID of the VIOS which will host the storage device indicated by stg_key. :param udid: UDID of the storage device to be added. :param val: The slot data to be added. For IOCLASS.PV, this is a tuple of (slot, lua). """ if stg_class not in self._build_map: self._build_map[stg_class] = {} if vios_uuid not in self._build_map[stg_class]: self._build_map[stg_class][vios_uuid] = {} self._build_map[stg_class][vios_uuid][udid] = val pypowervm-1.1.24/pypowervm/tasks/storage.py0000664000175000017500000017130113571367171020466 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Create, remove, map, unmap, and populate virtual storage objects.""" import contextlib import math import os import tempfile import threading import time from concurrent import futures from oslo_concurrency import lockutils as lock from oslo_log import log as logging from taskflow import engines as tf_eng from taskflow.patterns import unordered_flow as tf_uf from taskflow import task as tf_tsk from pypowervm import const as c from pypowervm import exceptions as exc from pypowervm.helpers import vios_busy from pypowervm.i18n import _ from pypowervm.tasks import scsi_mapper as sm from pypowervm.tasks import vfc_mapper as fm from pypowervm import util from pypowervm.utils import retry from pypowervm.utils import transaction as tx from pypowervm.wrappers import job from pypowervm.wrappers import logical_partition as lpar from pypowervm.wrappers import managed_system as sys from pypowervm.wrappers import storage as stor from pypowervm.wrappers import vios_file as vf from pypowervm.wrappers import virtual_io_server as vios FILE_UUID = 'FileUUID' _RESCAN_VSTOR = 'RescanVirtualDisk' VOLUME_NOT_FOUND = '-1' # Setup logging LOG = logging.getLogger(__name__) _LOCK_VOL_GRP = 'vol_grp_lock' # Concurrent uploads _UPLOAD_SEM = threading.Semaphore(3) class UploadType(object): """Used in conjunction with the upload_xx methods. Indicates how the invoker will pass in the handle to the data. """ # The data stream (either a file handle or stream) to upload. Must have # the 'read' method that returns a chunk of bytes. IO_STREAM = 'stream' # A parameter-less function that builds an IO_STREAM. IO_STREAM_BUILDER = 'stream_builder' # DEPRECATED: Known issues combining threads and greenlets may cause hangs. # # A method function that will be invoked to stream the data into the # virtual disk. Only one parameter is passed in, and that is the path to # the file to stream the data into. FUNC = 'delegate_function' def _delete_vio_file(vio_file): """Try to delete a File artifact. :param vio_file: pypowervm.wrappers.vios_file.File object, retrieved from the server, representing the File object to delete. :return: If the deletion is successful (or the File was already gone), the method returns None. Otherwise, the vio_file parameter is returned. """ # Try to delete the file. try: vio_file.adapter.delete(vio_file.schema_type, root_id=vio_file.uuid, service='web') except exc.HttpNotFound: # Already gone - ignore pass except exc.Error: LOG.exception(_("Failed to delete vio_file with UUID %s. It must be " "manually deleted."), vio_file.uuid) return vio_file return None def crt_copy_vdisk(adapter, v_uuid, vol_grp_uuid, src, f_size, d_name, d_size=None, file_format=None): """Create a new virtual disk that contains all the data of the src given. :param adapter: The adapter to talk over the API. :param v_uuid: The UUID of the Virtual I/O Server that will host the new VDisk. :param vol_grp_uuid: The UUID of the volume group that will host the new VDisk. :param src: UDID of virtual disk to copy data from :param f_size: The size (in bytes) of the src disk. :param d_name: The name that should be given to the disk on the Virtual I/O Server that will contain the file. :param d_size: (Optional) The desired size of the new VDisk in bytes. If omitted or smaller than f_size, it will be set to match f_size. :param file_format: (Optional) File format of src VDisk. See stor.FileFormatType enumeration for valid formats. :return: The virtual disk that the file is uploaded into. """ # Create the new virtual disk. The size here is in GB. We can use decimal # precision on the create call. What the VIOS will then do is determine # the appropriate segment size (pp) and will provide a virtual disk that # is 'at least' that big. Depends on the segment size set up on the # volume group how much over it could go. if d_size is None or d_size < f_size: d_size = f_size gb_size = util.convert_bytes_to_gb(d_size) # The REST API requires that we round up to the highest GB. gb_size = math.ceil(gb_size) return crt_vdisk(adapter, v_uuid, vol_grp_uuid, d_name, gb_size, base_image=src, file_format=file_format) def _clean_out_bad_upload(adapter, vol_grp_uuid, v_uuid, n_vdisk, vio_file): """Cleans out a bad vDisk after a failed upload.""" # Keeps sonar happy. vol_grp = stor.VG.get(adapter, vol_grp_uuid, parent_type=vios.VIOS, parent_uuid=v_uuid) rm_vg_storage(vol_grp, vdisks=[n_vdisk]) _delete_vio_file(vio_file) def upload_new_vdisk(adapter, v_uuid, vol_grp_uuid, io_handle, d_name, f_size, d_size=None, sha_chksum=None, upload_type=UploadType.IO_STREAM, file_format=None): """Uploads a new virtual disk. :param adapter: The adapter to talk over the API. :param v_uuid: The Virtual I/O Server UUID that will host the disk. :param vol_grp_uuid: The volume group that will host the Virtual Disk's UUID. :param io_handle: The I/O handle (as defined by the upload_type) :param d_name: The name that should be given to the disk on the Virtual I/O Server that will contain the file. :param f_size: The size (in bytes) of the stream to be uploaded. :param d_size: (Optional) The desired size of the new VDisk in bytes. If omitted or smaller than f_size, it will be set to match f_size. :param sha_chksum: (Optional) The SHA256 checksum for the file. Useful for integrity checks. :param upload_type: (Optional, Default: IO_STREAM) Defines the way in which the vdisk should be uploaded. Refer to the UploadType enumeration for valid upload mechanisms. :param file_format: (Optional) Format of file coming from io_handle. See stor.FileFormatType enumeration for valid formats. :return: The first return value is the virtual disk that the file is uploaded into. :return: Normally the second return value will be None, indicating that the disk and image were uploaded without issue. If for some reason the File metadata for the VIOS was not cleaned up, the return value is the File EntryWrapper. This is simply a metadata marker to be later used to retry the cleanup. """ # Create the new virtual disk. The size here is in GB. We can use decimal # precision on the create call. What the VIOS will then do is determine # the appropriate segment size (pp) and will provide a virtual disk that # is 'at least' that big. Depends on the segment size set up on the # volume group how much over it could go. if d_size is None or d_size < f_size: d_size = f_size gb_size = util.convert_bytes_to_gb(d_size) # The REST API requires that we round up to the highest GB. gb_size = math.ceil(gb_size) n_vdisk = crt_vdisk(adapter, v_uuid, vol_grp_uuid, d_name, gb_size, file_format=file_format) # Next, create the file, but specify the appropriate disk udid from the # Virtual Disk vio_file = _create_file( adapter, d_name, vf.FileType.DISK_IMAGE, v_uuid, f_size=f_size, tdev_udid=n_vdisk.udid, sha_chksum=sha_chksum) try: # Run the upload maybe_file = _upload_stream(vio_file, io_handle, upload_type) except Exception: _clean_out_bad_upload(adapter, vol_grp_uuid, v_uuid, n_vdisk, vio_file) # Re-raise the original exception raise return n_vdisk, maybe_file def upload_vopt(adapter, v_uuid, d_stream, f_name, f_size=None, sha_chksum=None): """Upload a file/stream into a virtual media repository on the VIOS. :param adapter: The adapter to talk over the API. :param v_uuid: The Virtual I/O Server UUID that will host the file. :param d_stream: A file path or data stream (must have 'read' method) to upload. :param f_name: The name that should be given to the file. :param f_size: (OPTIONAL) The size in bytes of the file to upload. Useful for integrity checks. :param sha_chksum: (OPTIONAL) The SHA256 checksum for the file. Useful for integrity checks. :return: The vOpt loaded into the media repository. This is a reference, for use in scsi mappings. :return: Normally this method will return None, indicating that the disk and image were uploaded without issue. If for some reason the File metadata for the VIOS was not cleaned up, the return value is the File EntryWrapper. This is simply a marker to be later used to retry the cleanup. """ # First step is to create the 'file' on the system. vio_file = _create_file( adapter, f_name, vf.FileType.MEDIA_ISO, v_uuid, sha_chksum, f_size) if isinstance(d_stream, str): f_wrap = _upload_file(vio_file, d_stream) else: f_wrap = _upload_stream(vio_file, d_stream, UploadType.IO_STREAM) # Simply return a reference to this. reference = stor.VOptMedia.bld_ref(adapter, f_name) return reference, f_wrap def upload_new_lu(v_uuid, ssp, io_handle, lu_name, f_size, d_size=None, sha_chksum=None, return_ssp=False, upload_type=UploadType.IO_STREAM): """Creates a new SSP Logical Unit and uploads an image to it. Note: return spec varies based on the return_ssp parameter: # Default/legacy behavior new_lu, maybe_file = upload_new_lu(..., return_ssp=False) # With return_ssp=True ssp, new_lu, maybe_file = upload_new_lu(..., return_ssp=True) :param v_uuid: The UUID of the Virtual I/O Server through which to perform the upload. (Note that the new LU will be visible from any VIOS in the Shared Storage Pool's Cluster.) :param ssp: SSP EntryWrapper representing the Shared Storage Pool on which to create the new Logical Unit. :param io_handle: The I/O handle (as defined by the upload_type) :param lu_name: The name that should be given to the new LU. :param f_size: The size (in bytes) of the stream to be uploaded. :param d_size: (OPTIONAL) The size of the LU (in bytes). Not required if it should match the file. Must be at least as large as the file. :param sha_chksum: (Optional) The SHA256 checksum for the file. Useful for integrity checks. :param return_ssp: (Optional) If True, the return value of the method is a three-member tuple whose third value is the updated SSP EntryWrapper. If False (the default), the method returns a two-member tuple. :param upload_type: (Optional, Default: IO_STREAM) Defines the way in which the LU should be uploaded. Refer to the UploadType enumeration for valid upload mechanisms. :return: If the return_ssp parameter is True, the first return value is the updated SSP EntryWrapper, containing the newly-created and -uploaded LU. If return_ssp is False, this return value is absent - only the below two values are returned. :return: An LU EntryWrapper corresponding to the Logical Unit into which the file was uploaded. :return: Normally None, indicating that the LU was created and the image was uploaded without issue. If for some reason the File metadata for the VIOS was not cleaned up, the return value is the File EntryWrapper. This is simply a marker to be later used to retry the cleanup. """ # Create the new Logical Unit. The LU size needs to be in decimal GB. if d_size is None or d_size < f_size: d_size = f_size gb_size = util.convert_bytes_to_gb(d_size, dp=2) ssp, new_lu = crt_lu(ssp, lu_name, gb_size, typ=stor.LUType.IMAGE) maybe_file = upload_lu(v_uuid, new_lu, io_handle, f_size, sha_chksum=sha_chksum, upload_type=upload_type) return (ssp, new_lu, maybe_file) if return_ssp else (new_lu, maybe_file) def upload_lu(v_uuid, lu, io_handle, f_size, sha_chksum=None, upload_type=UploadType.IO_STREAM): """Uploads a data stream to an existing SSP Logical Unit. :param v_uuid: The UUID of the Virtual I/O Server through which to perform the upload. :param lu: LU Wrapper representing the Logical Unit to which to upload the data. The LU must already exist in the SSP. :param io_handle: The I/O handle (as defined by the upload_type) :param f_size: The size (in bytes) of the stream to be uploaded. :param sha_chksum: (Optional) The SHA256 checksum for the file. Useful for integrity checks. :param upload_type: (Optional, Default: IO_STREAM) Defines the way in which the LU should be uploaded. Refer to the UploadType enumeration for valid upload mechanisms. :return: Normally the return value will be None, indicating that the image was uploaded without issue. If for some reason the File metadata for the VIOS was not cleaned up, the return value is the LU EntryWrapper. This is simply a marker to be later used to retry the cleanup. """ # Create the file, specifying the UDID from the new Logical Unit. # The File name matches the LU name. vio_file = _create_file( lu.adapter, lu.name, vf.FileType.DISK_IMAGE, v_uuid, f_size=f_size, tdev_udid=lu.udid, sha_chksum=sha_chksum) return _upload_stream(vio_file, io_handle, upload_type) def _upload_file(vio_file, path): """Upload a file by its path :param vio_file: The File EntryWrapper representing the metadata for the file. :param path: The path as a string to the file to be uploaded. :return: Returns None if file upload is successful. Otherwise returns the File EntryWrapper if the File metadata was not cleaned up. """ f_wrap = None i = 0 while True: try: with open(path, 'rb') as d_stream: f_wrap = _upload_stream(vio_file, d_stream, UploadType.IO_STREAM) break except Exception: if i < 3: LOG.warning(_("Encountered an issue while uploading. " "Will retry.")) else: raise i += 1 return f_wrap def _upload_stream(vio_file, io_handle, upload_type): """Upload a file stream and clean up the metadata afterward. When files are uploaded to either VIOS or the PowerVM management platform, they create artifacts on the platform. These artifacts must be cleaned up because there is a 100 file limit. When the file UUID is cleaned, two things can happen: 1) if the file is targeted to the PowerVM management platform, then both the file and the metadata artifacts are cleaned up. 2) if the file is a VIOS file, then just the PowerVM management platform artifacts are cleaned up. It's safe to cleanup VIOS file artifacts directly after uploading, as it will not affect the VIOS entity. :param vio_file: The File EntryWrapper representing the metadata for the file. :param io_handle: The I/O handle (as defined by the upload_type) :param upload_type: Defines the way in which the element should be uploaded. Refer to the UploadType enumeration for valid upload mechanisms. :return: Normally this method will return None, indicating that the disk and image were uploaded without issue. If for some reason the File metadata for the VIOS was not cleaned up, the return value is the File EntryWrapper. This is simply a marker to be later used to retry the cleanup. """ # If the io_handle is a function that opens a stream we are to read from, # open that stream. if upload_type == UploadType.IO_STREAM_BUILDER: io_handle, upload_type = io_handle(), UploadType.IO_STREAM try: # Acquire the upload semaphore _UPLOAD_SEM.acquire() start = time.time() # Upload the file directly to the REST API server. _upload_stream_api(vio_file, io_handle, upload_type) LOG.debug("Upload took %.2fs", time.time() - start) finally: # Must release the semaphore _UPLOAD_SEM.release() # Allow the exception to be raised up...if there was one. ret_vio = _delete_vio_file(vio_file) return ret_vio @contextlib.contextmanager def _rest_api_pipe(file_writer): """A piping context manager to allow "local" uploads from a remote user. Usage: with _rest_api_pipe(file_writer) as read_stream: upload(read_stream) :param file_writer: A method in the spirit of: def file_writer(file_path): with open(file_path, 'w') as out_stream: while ...: out_stream.write(...) """ fifo_reader, file_path, temp_dir = None, None, None try: # Make the file path temp_dir = tempfile.mkdtemp() file_path = os.path.join(temp_dir, 'REST_API_Pipe') os.mkfifo(file_path) # Spawn the writer thread with futures.ThreadPoolExecutor(1) as th_pool: writer_f = th_pool.submit(file_writer, file_path) # Create a readable stream on the FIFO pipe. fifo_reader = util.retry_io_command(open, file_path, 'r') # Let the caller consume the pipe contents yield fifo_reader # Make sure the writer is finished. This will also raise any # exception the writer caused. writer_f.result() finally: # Close and clean up the FIFO, carefully. Any step could have raised. if fifo_reader: util.retry_io_command(fifo_reader.close) if file_path: os.remove(file_path) if temp_dir: os.rmdir(temp_dir) def _upload_stream_api(vio_file, io_handle, upload_type): # If using a FUNCtion-based upload remotely, we have to make that function # (which is passed in as io_handle) think it's writing to a local file. We # spoof this with _RestApiPipe, which uses a fifo (named pipe) that it # populates from d_stream in a separate thread. if upload_type == UploadType.FUNC: with _rest_api_pipe(io_handle) as in_stream: vio_file.adapter.upload_file(vio_file.element, in_stream) else: # We don't want to use the VIOS retry mechanism here. helpers = vio_file.adapter.helpers try: helpers.remove(vios_busy.vios_busy_retry_helper) except ValueError: pass # io_handle is already an open, readable stream vio_file.adapter.upload_file(vio_file.element, io_handle, helpers=helpers) def _create_file(adapter, f_name, f_type, v_uuid, sha_chksum=None, f_size=None, tdev_udid=None): """Creates a file on the VIOS, which is needed before the POST. :param adapter: The adapter to talk over the API. :param f_name: The name for the file. :param f_type: The type of the file, from vios_file.FileType. :param v_uuid: The UUID for the Virtual I/O Server that the file will reside on. :param sha_chksum: (OPTIONAL) The SHA256 checksum for the file. Useful for integrity checks. :param f_size: (OPTIONAL) The size of the file to upload. Useful for integrity checks. :param tdev_udid: The device UDID that the file will back into. :returns: The File Wrapper """ return vf.File.bld(adapter, f_name, f_type, v_uuid, sha_chksum=sha_chksum, f_size=f_size, tdev_udid=tdev_udid).create() def default_tier_for_ssp(ssp): """Find the default Tier for the given Shared Storage Pool. :param ssp: The SSP EntryWrapper whose default Tier is to be retrieved. :return: Tier EntryWrapper representing ssp's default Tier. :raise NoDefaultTierFoundOnSSP: If no default Tier is found on the specified Shared Storage Pool. """ tier = stor.Tier.search(ssp.adapter, parent=ssp, is_default=True, one_result=True) if tier is None: raise exc.NoDefaultTierFoundOnSSP(ssp_name=ssp.name) return tier def crt_lu_linked_clone(ssp, cluster, src_lu, new_lu_name, lu_size_gb=0): """Create a new LU as a linked clone to a backing image LU. :deprecated: Use crt_lu instead. :param ssp: The SSP EntryWrapper representing the SharedStoragePool on which to create the new LU. :param cluster: The Cluster EntryWrapper representing the Cluster against which to invoke the LULinkedClone Job. :param src_lu: The LU ElementWrapper or LUEnt EntryWrapper representing the link source. :param new_lu_name: The name to be given to the new LU. :param lu_size_gb: The size of the new LU in GB with decimal precision. If this is not specified or is smaller than the size of the image_lu, the size of the image_lu is used. :return: The updated SSP EntryWrapper containing the newly-created LU. :return: The newly created and linked LU. """ import warnings warnings.warn(_("The crt_lu_linked_clone method is deprecated! Please " "use the crt_lu method (clone=src_lu, size=lu_size_gb)."), DeprecationWarning) # Create the LU. No locking needed on this method, as the crt_lu handles # the locking. ssp, dst_lu = crt_lu(ssp, new_lu_name, lu_size_gb, thin=True, typ=stor.LUType.DISK, clone=src_lu) return ssp, dst_lu def _image_lu_for_clone(lus, clone_lu): """Given a Disk LU linked clone, find the Image LU to which it is linked. :param lus: List of LUs (LU or LUEnt) to search. :param clone_lu: The LU EntryWrapper representing the Disk LU linked clone whose backing Image LU is to be found. :return: The LU EntryWrapper representing the Image LU backing the clone_lu. None if no such Image LU can be found. """ # Check if the clone never happened if clone_lu.cloned_from_udid is None: return None # When comparing udid/cloned_from_udid, disregard the 2-digit 'type' prefix image_udid = clone_lu.cloned_from_udid[2:] for lu in lus: if lu.lu_type != stor.LUType.IMAGE: continue if lu.udid[2:] == image_udid: return lu return None def _image_lu_in_use(lus, image_lu): """Determine whether an Image LU still has any Disk LU linked clones. :param lus: List of all the LUs in the SSP/Tier. They must have UDIDs (i.e. must have been retrieved from the server, not created locally). :param image_lu: LU EntryWrapper representing the Image LU. :return: True if the SSP contains any Disk LU linked clones backed by the image_lu; False otherwise. """ # When comparing udid/cloned_from_udid, disregard the 2-digit 'type' prefix image_udid = image_lu.udid[2:] for lu in lus: if lu.lu_type != stor.LUType.DISK: continue cloned_from = lu.cloned_from_udid if cloned_from is None: LOG.warning( _("Disk Logical Unit %(luname)s has no backing image LU. " "(UDID: %(udid)s) "), {'luname': lu.name, 'udid': lu.udid}) continue if cloned_from[2:] == image_udid: return True return False def find_vg(adapter, vg_name, vios_name=None): """Returns the VIOS and VG wrappers for the volume group. :param adapter: pypowervm.adapter.Adapter for REST communication. :param vg_name: Name of the volume group to find. :param vios_name: The name of the VIOS on which to search for the volume group. If not specified, all VIOSes are searched. :return vios_wrap: The VIOS wrapper representing the Virtual I/O Server on which the volume group was found. :return vg_wrap: The VG wrapper representing the volume group. :raise VIOSNotFound: If vios_name was specified and no such VIOS exists. :raise VGNotFound: If no volume group of the specified vg_name could be found. """ if vios_name: # Search for the VIOS by name if specified. vios_wraps = vios.VIOS.search(adapter, name=vios_name) if not vios_wraps: raise exc.VIOSNotFound(vios_name=vios_name) else: # Get all VIOSes. vios_wraps = vios.VIOS.get(adapter) # Loop through each VIOS's VGs to find the one with the appropriate name. for vios_wrap in vios_wraps: # Search the feed for the volume group for vg_wrap in stor.VG.get(adapter, parent=vios_wrap): LOG.debug('Volume group: %s', vg_wrap.name) if vg_name == vg_wrap.name: return vios_wrap, vg_wrap raise exc.VGNotFound(vg_name=vg_name) @lock.synchronized(_LOCK_VOL_GRP) def crt_vdisk(adapter, v_uuid, vol_grp_uuid, d_name, d_size_gb, base_image=None, file_format=None): """Creates a new Virtual Disk in the specified volume group. :param adapter: The pypowervm.adapter.Adapter through which to request the change. :param v_uuid: The UUID of the Virtual I/O Server that will host the disk. :param vol_grp_uuid: The volume group that will host the new Virtual Disk. :param d_name: The name that should be given to the disk on the Virtual I/O Server that will contain the file. :param d_size_gb: The size of the disk in GB. :param base_image: (Optional) The UDID of a VDisk to copy data from. :param file_format: (Optional) File format of the new VirtualDisk. See stor.FileFormatType enumeration for valid formats. :return: VDisk ElementWrapper representing the new VirtualDisk from the server response (i.e. UDID will be populated). :raise exc.Error: If the server response from attempting to add the VDisk does not contain the new VDisk. """ # Get the existing volume group vol_grp_data = adapter.read(vios.VIOS.schema_type, v_uuid, stor.VG.schema_type, vol_grp_uuid) vol_grp = stor.VG.wrap(vol_grp_data.entry) new_vdisk = stor.VDisk.bld(adapter, d_name, d_size_gb, base_image=base_image, file_format=file_format) # Append it to the list. vol_grp.virtual_disks.append(new_vdisk) # Now perform an update on the adapter. vol_grp = vol_grp.update() # The new Virtual Disk should be created. Find the one we created. for vdisk in vol_grp.virtual_disks: # Vdisk name can be either disk_name or /path/to/disk_name if vdisk.name.split('/')[-1] == d_name.split('/')[-1]: return vdisk # This should never occur since the update went through without error, # but adding just in case as we don't want to create the file meta # without a backing disk. raise exc.Error(_("Unable to locate new vDisk on file upload.")) def rescan_vstor(vio, vstor, adapter=None): """Update the internal metadata for a virtual storage object. :param vio: A VIOS wrapper or UUID string of the VIOS on which to perform the rescan. :param vstor: The VDisk wrapper or udid of the storage object to rescan. :param adapter: A pypowervm.adapter.Adapter for REST API communication. Required if neither vio nor vstor is a wrapper, optional otherwise. :raises AdapterNotFound: If no adapter attribute can be found. :raises JobRequestFailed: If the rescan failed. :raises JobRequestTimedOut: If the rescan Job timed out. """ adapter = (adapter or getattr(vio, 'adapter', None) or getattr(vstor, 'adapter', None)) if not adapter: raise exc.AdapterNotFound() vio_uuid = getattr(vio, 'uuid', vio) stor_udid = getattr(vstor, 'udid', vstor) job_w = job.Job.wrap(adapter.read( vios.VIOS.schema_type, root_id=vio_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_RESCAN_VSTOR)) job_p = [job_w.create_job_parameter('VirtualDiskUDID', stor_udid)] # Exceptions raise up. Otherwise, no news is good news. try: job_w.run_job(vio_uuid, job_parms=job_p) except exc.JobRequestFailed: results = job_w.get_job_results_as_dict() if results.get("RETURN_CODE") == VOLUME_NOT_FOUND: raise exc.VstorNotFound(stor_udid=stor_udid, vios_uuid=vio_uuid) else: raise @lock.synchronized(_LOCK_VOL_GRP) @retry.retry(argmod_func=retry.refresh_wrapper, tries=60, delay_func=retry.STEPPED_RANDOM_DELAY) def rm_vg_storage(vg_wrap, vdisks=None, vopts=None): """Remove storage elements from a volume group. Changes are flushed back to the REST server. :param vg_wrap: VG wrapper representing the Volume Group to update. :param vdisks: Iterable of VDisk wrappers representing the Virtual Disks to delete. Ignored if None or empty. :param vopts: Iterable of VOptMedia wrappers representing Virtual Optical devices to delete. Ignored if None or empty. :return: The (possibly) updated vg_wrap. """ changes = 0 if vdisks: changes += len(_rm_vdisks(vg_wrap, vdisks)) if vopts: changes += len(_rm_vopts(vg_wrap, vopts)) if changes: # Update the volume group to remove the storage, if necessary. vg_wrap = vg_wrap.update() return vg_wrap def _rm_dev_by_udid(dev, devlist): """Use UDID matching to remove a device from a list. Use this method in favor of devlist.remove(dev) when the dev originates from somewhere other than the devlist, and may have some non-matching properties which would cause normal equality comparison to fail. For example, use this method when using a VSCSI mapping's backing_storage to decide which LogicalUnit to remove from the list of SSP.logical_units. Note: This method relies on UDIDs being present in both dev and the corresponding item in devlist. :param dev: The EntryWrapper representing the device to remove. May be VDisk, VOpt, PV, or LU. :param devlist: The list from which to remove the device. :return: The device removed, as it existed in the devlist. None if the device was not found by UDID. """ if not dev.udid: LOG.warning(_("Ignoring device because it lacks a UDID:\n%s"), dev.toxmlstring(pretty=True)) return None matches = [realdev for realdev in devlist if realdev.udid == dev.udid] if len(matches) == 0: LOG.warning(_("Device %s not found in list."), dev.name) return None if len(matches) > 1: raise exc.FoundDevMultipleTimes(devname=dev.name, count=len(matches)) LOG.debug("Removing %s from devlist.", dev.name) match = matches[0] devlist.remove(match) return match def _rm_vdisks(vg_wrap, vdisks): """Delete some number of virtual disks from a volume group wrapper. The wrapper is not updated back to the REST server. :param vg_wrap: VG wrapper representing the Volume Group to update. :param vdisks: Iterable of VDisk wrappers representing the Virtual Disks to delete. :return: The number of disks removed from vg_wrap. The consumer may use this to decide whether to run vg_wrap.update() or not. """ existing_vds = vg_wrap.virtual_disks changes = [] for removal in vdisks: # Can't just call direct on remove, because attribs are off. removed = _rm_dev_by_udid(removal, existing_vds) if removed is not None: LOG.info(_('Deleting virtual disk %(vdisk)s from volume group ' '%(vg)s'), {'vdisk': removed.name, 'vg': vg_wrap.name}) changes.append(removed) return changes def _rm_vopts(vg_wrap, vopts): """Delete some number of virtual optical media from a volume group wrapper. The wrapper is not updated back to the REST server. :param vg_wrap: VG wrapper representing the Volume Group to update. :param vopts: Iterable of VOptMedia wrappers representing the devices to delete. :return: The number of VOptMedia removed from vg_wrap. The consumer may use this to decide whether to run vg_wrap.update() or not. """ vg_om = vg_wrap.vmedia_repos[0].optical_media changes = [] for vopt in vopts: try: vg_om.remove(vopt) LOG.info(_('Deleting virtual optical device %(vopt)s from volume ' 'group %(vg)s'), {'vopt': vopt.name, 'vg': vg_wrap.name}) changes.append(vopt) except ValueError: # It's okay if the vopt was already absent. pass return changes def crt_lu(tier_or_ssp, name, size, thin=None, typ=None, clone=None): """Create a Logical Unit on the specified Tier. :param tier_or_ssp: Tier or SSP EntryWrapper denoting the Tier or Shared Storage Pool on which to create the LU. If an SSP is supplied, the LU is created on the default Tier. :param name: Name for the new Logical Unit. :param size: LU size in GB with decimal precision. :param thin: Provision the new LU as Thin (True) or Thick (False). If unspecified, use the server default. :param typ: The type of LU to create, one of the LUType values. If unspecified, use the server default. :param clone: If the new LU is to be a linked clone, this param is a LU(Ent) wrapper representing the backing image LU. :return: If the tier_or_ssp argument is an SSP, the updated SSP wrapper (containing the new LU and with a new etag) is returned. Otherwise, the first return value is the Tier. :return: LU ElementWrapper representing the Logical Unit just created. """ is_ssp = isinstance(tier_or_ssp, stor.SSP) tier = default_tier_for_ssp(tier_or_ssp) if is_ssp else tier_or_ssp lu = stor.LUEnt.bld(tier_or_ssp.adapter, name, size, thin=thin, typ=typ, clone=clone) lu = lu.create(parent=tier) if is_ssp: # Refresh the SSP to pick up the new LU and etag tier_or_ssp = tier_or_ssp.refresh() return tier_or_ssp, lu def _rm_lus(all_lus, lus_to_rm, del_unused_images=True): changes = [] backing_images = set() for lu in lus_to_rm: # Is it a linked clone? (We only care if del_unused_images.) if del_unused_images and lu.lu_type == stor.LUType.DISK: # Note: This can add None to the set backing_images.add(_image_lu_for_clone(all_lus, lu)) msgargs = {'lu_name': lu.name, 'lu_udid': lu.udid} removed = _rm_dev_by_udid(lu, all_lus) if removed: LOG.debug(_("Removing LU %(lu_name)s (UDID %(lu_udid)s)"), msgargs) changes.append(removed) else: # It's okay if the LU was already absent. LOG.info(_("LU %(lu_name)s was not found - it may have been " "deleted out of band. (UDID: %(lu_udid)s)"), msgargs) # Now remove any unused backing images. This set will be empty if # del_unused_images=False for back_img in backing_images: # Ignore None, which could have appeared if a clone existed with no # backing image. if back_img is None: continue msgargs = {'lu_name': back_img.name, 'lu_udid': back_img.udid} # Only remove backing images that are not in use. if _image_lu_in_use(all_lus, back_img): LOG.debug("Not removing Image LU %(lu_name)s because it is still " "in use. (UDID: %(lu_udid)s)", msgargs) else: removed = _rm_dev_by_udid(back_img, all_lus) if removed: LOG.info(_("Removing Image LU %(lu_name)s because it is no " "longer in use. (UDID: %(lu_udid)s)"), msgargs) changes.append(removed) else: # This would be wildly unexpected LOG.warning(_("Backing LU %(lu_name)s was not found. " "(UDID: %(lu_udid)s)"), msgargs) return changes def rm_tier_storage(lus_to_rm, tier=None, lufeed=None, del_unused_images=True): """Remove Logical Units from a Shared Storage Pool Tier. :param lus_to_rm: Iterable of LU ElementWrappers or LUEnt EntryWrappers representing the LogicalUnits to delete. :param tier: Tier EntryWrapper representing the SSP Tier on which the lus_to_rm (and their backing images) reside. Either tier or lufeed is required. If both are specified, tier is ignored. :param lufeed: Pre-fetched list of LUEnt (i.e. result of a GET of Tier/{uuid}/LogicalUnit) where we expect to find the lus_to_rm (and their backing images). Either tier or lufeed is required. If both are specified, tier is ignored. :param del_unused_images: If True, and a removed Disk LU was the last one linked to its backing Image LU, the backing Image LU is also removed. :raise ValueError: - If neither tier nor lufeed was supplied. - If lufeed was supplied but doesn't contain LUEnt EntryWrappers (e.g. the caller provided SSP.logical_units). """ if all(param is None for param in (tier, lufeed)): raise ValueError(_("Developer error: Either tier or lufeed is " "required.")) if lufeed is None: lufeed = stor.LUEnt.get(tier.adapter, parent=tier) elif any(not isinstance(lu, stor.LUEnt) for lu in lufeed): raise ValueError(_("Developer error: The lufeed parameter must " "comprise LUEnt EntryWrappers.")) # Figure out which LUs to delete and delete them; _rm_lus returns a list of # LUEnt, so they can be removed directly. for dlu in _rm_lus(lufeed, lus_to_rm, del_unused_images=del_unused_images): msg_args = dict(lu_name=dlu.name, lu_udid=dlu.udid) LOG.info(_("Deleting LU %(lu_name)s (UDID: %(lu_udid)s)"), msg_args) try: dlu.delete() except exc.HttpError as he: LOG.warning(he) LOG.warning(_("Ignoring HttpError for LU %(lu_name)s may have " "been deleted out of band. (UDID: %(lu_udid)s)"), msg_args) @tx.entry_transaction def rm_ssp_storage(ssp_wrap, lus, del_unused_images=True): """Remove some number of LogicalUnits from a SharedStoragePool. The changes are flushed back to the REST server. :param ssp_wrap: SSP EntryWrapper representing the SharedStoragePool to modify. :param lus: Iterable of LU ElementWrappers or LUEnt EntryWrappers representing the LogicalUnits to delete. :param del_unused_images: If True, and a removed Disk LU was the last one linked to its backing Image LU, the backing Image LU is also removed. :return: The (possibly) modified SSP wrapper. """ if _rm_lus(ssp_wrap.logical_units, lus, del_unused_images=del_unused_images): # Flush changes ssp_wrap = ssp_wrap.update() return ssp_wrap def _remove_orphan_maps(vwrap, type_str, lpar_id=None): """Remove orphan storage mappings (no client adapter) from a list. This works for both VSCSI and VFC mappings. :param vwrap: VIOS wrapper containing the mappings to inspect. If type_str is 'VFC', the VIOS wrapper must have been retrieved with the VIO_FMAP extended attribute group; if type_str is 'VSCSI', the VIO_SMAP extended attribute group must have been used. :param type_str: The type of mapping being removed. Must be either 'VFC' or 'VSCSI'. :param lpar_id: (Optional) Only orphan mappings associated with the specified LPAR ID will be removed. If None (the default), all LPARs' mappings will be considered. :return: The list of mappings removed. May be empty. """ # This will raise KeyError if type_str isn't one of 'VFC' or 'VSCSI' maps = dict(VSCSI=vwrap.scsi_mappings, VFC=vwrap.vfc_mappings)[type_str] msgargs = dict(vios_name=vwrap.name, stg_type=type_str) # Make a list of orphans first (since we can't remove while iterating). # If requested, limit candidates to those matching the specified LPAR ID. # Also don't remove "any" type server adapters which are server adapters # without a client adapter that can map to any client. removals = [mp for mp in maps if mp.client_adapter is None and ( lpar_id is None or mp.server_adapter.lpar_id == lpar_id) and ( mp.server_adapter.lpar_slot_num != stor.ANY_SLOT)] for rm_map in removals: maps.remove(rm_map) if removals: LOG.warning(_("Removing %(num_maps)d orphan %(stg_type)s mappings " "from VIOS %(vios_name)s."), dict(msgargs, num_maps=len(removals))) else: LOG.debug("No orphan %(stg_type)s mappings found on VIOS " "%(vios_name)s.", msgargs) return removals def _remove_portless_vfc_maps(vwrap, lpar_id=None): """Remove non-logged-in VFC mappings (no Port) from a list. :param vwrap: VIOS wrapper containing the mappings to inspect. Must have been retrieved with the VIO_FMAP extended attribute group. :param lpar_id: (Optional) Only port-less mappings associated with the specified LPAR ID will be removed. If None (the default), all LPARs' mappings will be considered. :return: The list of mappings removed. May be empty. """ # Make a list of removals first (since we can't remove while iterating). # If requested, limit candidates to those matching the specified LPAR ID. removals = [mp for mp in vwrap.vfc_mappings if mp.backing_port is None and (lpar_id is None or mp.server_adapter.lpar_id == lpar_id)] for rm_map in removals: vwrap.vfc_mappings.remove(rm_map) if removals: LOG.warning(_("Removing %(num_maps)d port-less VFC mappings from " "VIOS %(vios_name)s."), dict(num_maps=len(removals), vios_name=vwrap.name)) else: LOG.debug("No port-less VFC mappings found on VIOS %(vios_name)s.", dict(vios_name=vwrap.name)) return removals def _remove_lpar_maps(vwrap, lpar_ids, type_str): """Remove VFC or VSCSI mappings for the specified LPAR IDs. :param vwrap: VIOS EntryWrapper containing the mappings to scrub. :param lpar_ids: Iterable of short IDs (not UUIDs) of the LPARs whose mappings are to be removed. :param type_str: The type of mapping being removed. Must be either 'VFC' or 'VSCSI'. :return: The list of mappings removed. """ # This will raise KeyError if a bogus type_str is passed in rm_maps = dict(VSCSI=sm.remove_maps, VFC=fm.remove_maps)[type_str] msgargs = dict(stg_type=type_str, vios_name=vwrap.name) removals = [] for lpar_id in lpar_ids: msgargs['lpar_id'] = lpar_id _removals = rm_maps(vwrap, lpar_id) if _removals: LOG.warning(_("Removing %(num_maps)d %(stg_type)s mappings " "associated with LPAR ID %(lpar_id)d from VIOS " "%(vios_name)s."), dict(msgargs, num_maps=len(_removals))) removals.extend(_removals) else: LOG.debug("No %(stg_type)s mappings found for LPAR ID " "%(lpar_id)d on VIOS %(vios_name)s.", msgargs) return removals class _RemoveStorage(tf_tsk.Task): def __init__(self, tag): """Initialize the storage removal Task. :param tag: Added to the Task name to make it unique within a Flow. """ super(_RemoveStorage, self).__init__('rm_storage_%s' % tag) def execute(self, wrapper_task_rets): """Remove the storage elements associated with the deleted mappings. We remove storage elements for each VIOS, but only those we can be sure belong ONLY to that VIOS. That is, we do not remove SSP Logical Units because they may be mapped from some other VIOS in the cluster - one we don't even know about. """ # Accumulate removal tasks rmtasks = [] for vuuid, rets in wrapper_task_rets.items(): vwrap = rets['wrapper'] # VFC mappings don't have storage we can get to, so ignore those. # We may get removals from more than one subtask. All will have # the 'vscsi_removals_' prefix. There may be some overlap, but # the removal methods will ignore duplicates. vscsi_rms = [] for vrk in (k for k in rets if k.startswith('vscsi_removals_')): vscsi_rms.extend(rets[vrk]) # We can short out of this VIOS if no vscsi mappings were removed # from it. if not vscsi_rms: continue # Index remaining VSCSI mappings to isolate still-in-use storage. smindex = sm.index_mappings(vwrap.scsi_mappings) # Figure out which storage elements need to be removed. # o Some VSCSI mappings may not have backing storage. # o Ignore any storage elements that are still in use (still have # mappings associated with them). stg_els_to_remove = [ rmap.backing_storage for rmap in vscsi_rms if rmap.backing_storage is not None and rmap.backing_storage.udid not in smindex['by-storage-udid']] # If there's nothing left, we're done with this VIOS if not stg_els_to_remove: continue # Extract lists of each type of storage vopts_to_rm = [] vdisks_to_rm = [] for stg in stg_els_to_remove: if isinstance(stg, (stor.LU, stor.PV)): LOG.warning( _("Not removing storage %(stg_name)s of type " "%(stg_type)s because it cannot be determined " "whether it is still in use. Manual verification " "and cleanup may be necessary."), {'stg_name': stg.name, 'stg_type': stg.schema_type}) elif isinstance(stg, stor.VOptMedia): vopts_to_rm.append(stg) elif isinstance(stg, stor.VDisk): vdisks_to_rm.append(stg) else: LOG.warning( _("Storage scrub ignoring storage element " "%(stg_name)s because it is of unexpected type " "%(stg_type)s."), {'stg_name': stg.name, 'stg_type': stg.schema_type}) # Any storage to be deleted? if not any((vopts_to_rm, vdisks_to_rm)): continue # If we get here, we have storage that needs to be deleted from one # or more volume groups. We don't have a way of knowing which ones # without REST calls, so get all VGs for this VIOS and delete from # all of them. POST will only be done on VGs which actually need # updating. vgftsk = tx.FeedTask('scrub_vg_vios_%s' % vuuid, stor.VG.getter( vwrap.adapter, parent=vwrap)) if vdisks_to_rm: vgftsk.add_functor_subtask( _rm_vdisks, vdisks_to_rm, logspec=(LOG.warning, _( "Scrubbing the following %(vdcount)d Virtual Disks " "from VIOS %(vios)s: %(vdlist)s"), { 'vdcount': len(vdisks_to_rm), 'vios': vwrap.name, 'vdlist': ["%s (%s)" % (vd.name, vd.udid) for vd in vdisks_to_rm]})) if vopts_to_rm: vgftsk.add_functor_subtask( _rm_vopts, vopts_to_rm, logspec=(LOG.warning, _( "Scrubbing the following %(vocount)d Virtual Opticals " "from VIOS %(vios)s: %(volist)s"), { 'vocount': len(vopts_to_rm), 'vios': vwrap.name, 'volist': ["%s (%s)" % (vo.name, vo.udid) for vo in vopts_to_rm]})) rmtasks.append(vgftsk) # We only created removal Tasks if we found something to remove. if rmtasks: # Execute any storage removals in parallel, max 8 threads. tf_eng.run( tf_uf.Flow('remove_storage').add(*rmtasks), engine='parallel', executor=tx.ContextThreadPoolExecutor(max(8, len(rmtasks)))) def add_lpar_storage_scrub_tasks(lpar_ids, ftsk, lpars_exist=False, remove_storage=True): """Delete storage mappings and elements associated with an LPAR ID. This should typically be used to clean leftovers from an LPAR that has been deleted, since stale storage artifacts can cause conflicts with a new LPAR recycling that ID. This operates by inspecting mappings first, since we have no other way to associate a mapping-less storage element with an LPAR ID. Storage elements are deleted if their only mappings are to the LPAR ID being scrubbed (and remove_storage=True). This method only adds subtasks/post-execs to the passed-in FeedTask. The caller is responsible for executing that FeedTask in an appropriate Flow or other context. :param lpar_ids: List of integer short IDs (not UUIDs) of the LPAR whose storage artifacts are to be scrubbed. :param ftsk: FeedTask to which the scrubbing actions should be added, for execution by the caller. The FeedTask must be built for all the VIOSes from which mappings and storage should be scrubbed. The feed/getter must use the VIO_SMAP and VIO_FMAP xags. :param lpars_exist: (Optional) If set to False (the default), storage artifacts associated with an extant LPAR will be ignored (NOT scrubbed). Otherwise, we will scrub whether the LPAR exists or not. Thus, set to True only if intentionally removing mappings associated with extant LPARs. :param remove_storage: If True (the default), storage elements associated with stale mappings are removed, assuming it can be verified that they were only in use by this LPAR. If False, no storage removal is attempted. """ tag = '_'.join((str(lpar_id) for lpar_id in lpar_ids)) def remove_chain(vwrap, stg_type): """_remove_lpar_maps with an additional check for existing LPARs.""" lpar_id_set = set(lpar_ids) if not lpars_exist: # Restrict scrubbing to LPARs that don't exist on the system. ex_lpar_ids = {lwrap.id for lwrap in lpar.LPAR.get( vwrap.adapter, parent_type=sys.System, parent_uuid=vwrap.assoc_sys_uuid)} ex_lpar_ids.update(vioswrap.id for vioswrap in vios.VIOS.get( vwrap.adapter, parent_type=sys.System, parent_uuid=vwrap.assoc_sys_uuid)) # The list of IDs of the LPARs whose mappings (and storage) are to # be preserved (not scrubbed) is the intersection of # {the IDs we we were asked to scrub} # and # {the IDs of all the LPARs on the system} lpar_ids_to_preserve = lpar_id_set & ex_lpar_ids if lpar_ids_to_preserve: LOG.warning(_("Skipping scrub of %(stg_type)s mappings from " "VIOS %(vios_name)s for the following LPAR IDs " "because those LPARs exist: %(lpar_ids)s"), dict(stg_type=stg_type, vios_name=vwrap.name, lpar_ids=list(lpar_ids_to_preserve))) lpar_id_set -= lpar_ids_to_preserve return _remove_lpar_maps(vwrap, lpar_id_set, stg_type) ftsk.add_functor_subtask(remove_chain, 'VSCSI', provides='vscsi_removals_' + tag) ftsk.add_functor_subtask(remove_chain, 'VFC') if remove_storage: ftsk.add_post_execute(_RemoveStorage(tag)) def add_orphan_storage_scrub_tasks(ftsk, lpar_id=None): """Delete orphan mappings (no client adapter) and their storage elements. :param ftsk: FeedTask to which the scrubbing actions should be added, for execution by the caller. The FeedTask must be built for all the VIOSes from which mappings and storage should be scrubbed. The feed/getter must use the VIO_SMAP and VIO_FMAP xags. :param lpar_id: (Optional) Only orphan mappings associated with the specified LPAR ID will be removed. If None (the default), all LPARs' mappings will be considered. """ ftsk.add_functor_subtask(_remove_orphan_maps, 'VSCSI', lpar_id=lpar_id, provides='vscsi_removals_orphans') ftsk.add_functor_subtask(_remove_orphan_maps, 'VFC', lpar_id=lpar_id) ftsk.add_post_execute(_RemoveStorage('orphans')) def find_stale_lpars(vios_w): """Find orphan LPAR IDs in a Virtual I/O Server's VSCSI/VFC mappings. This method collates all client LPAR IDs from the VSCSI/VFC mappings of the specified VIOS wrapper and compares to the list of LPAR IDs on that VIOS's host, returning the list of any IDs which exist in the former but not the latter. :param vios_w: VIOS EntryWrapper. To be effective, this must have been retrieved with the VIO_SMAP and VIO_FMAP extended attribute groups. :return: List of LPAR IDs (integer short IDs, not UUIDs) which don't exist on the system. The list is guaranteed to contain no duplicates. """ ex_lpar_ids = {lwrap.id for lwrap in lpar.LPAR.get( vios_w.adapter, parent_type=sys.System, parent_uuid=vios_w.assoc_sys_uuid)} vios_ids = {vioswrap.id for vioswrap in vios.VIOS.get( vios_w.adapter, parent_type=sys.System, parent_uuid=vios_w.assoc_sys_uuid)} ex_lpar_ids.update(vios_ids) map_lpar_ids = {smp.server_adapter.lpar_id for smp in (list(vios_w.scsi_mappings) + list(vios_w.vfc_mappings))} return list(map_lpar_ids - ex_lpar_ids) class ComprehensiveScrub(tx.FeedTask): """Scrub all the stale/orphan mappings/storage we can find. A FeedTask which does the following: For all VIOSes (on the host): For each stale LPAR Scrub mappings & storage Scrub all orphan mappings (those without client adapters) """ def __init__(self, adapter, host_uuid=None): """Create the FeedTask to scrub stale/orphan mappings/storage. :param adapter: A pypowervm.adapter.Adapter for REST API communication. :param host_uuid: (Optional) If specified, limit to VIOSes on this one host. Otherwise, scrub across all VIOSes known to the adapter. """ getter_kwargs = {'xag': [c.XAG.VIO_FMAP, c.XAG.VIO_SMAP]} if host_uuid is not None: getter_kwargs = dict(getter_kwargs, parent_class=sys.System, parent_uuid=host_uuid) super(ComprehensiveScrub, self).__init__( 'comprehensive_scrub', vios.VIOS.getter(adapter, **getter_kwargs)) self.add_functor_subtask(find_stale_lpars, provides='stale_lpar_ids', flag_update=False) # Wrap _remove_lpar_maps to get the stale LPAR IDs from the above # find_stale_lpars Subtask. def remove_chain(vwrap, stg_type, provided): return _remove_lpar_maps( vwrap, provided['stale_lpar_ids'], stg_type) self.add_functor_subtask(remove_chain, 'VSCSI', provides='vscsi_removals_bylparid') self.add_functor_subtask(remove_chain, 'VFC') self.add_functor_subtask(_remove_orphan_maps, 'VSCSI', provides='vscsi_removals_orphans') self.add_functor_subtask(_remove_orphan_maps, 'VFC') self.add_post_execute(_RemoveStorage('comprehensive')) class ScrubOrphanStorageForLpar(tx.FeedTask): """Scrub orphan mappings and their storage for one specific LPAR.""" def __init__(self, adapter, lpar_id, host_uuid=None): """Create the FeedTask to scrub orphan mappings/storage by LPAR ID. :param adapter: A pypowervm.adapter.Adapter for REST API communication. :param lpar_id: The integer short ID (not UUID) of the LPAR to be examined and scrubbed of orphan mappings and their storage. :param host_uuid: (Optional) If specified, limit to VIOSes on this one host. Otherwise, scrub across all VIOSes known to the adapter. """ getter_kwargs = {'xag': [c.XAG.VIO_FMAP, c.XAG.VIO_SMAP]} if host_uuid is not None: getter_kwargs = dict(getter_kwargs, parent_class=sys.System, parent_uuid=host_uuid) super(ScrubOrphanStorageForLpar, self).__init__( 'scrub_orphans_for_lpar_%d' % lpar_id, vios.VIOS.getter( adapter, **getter_kwargs)) self.add_functor_subtask(_remove_orphan_maps, 'VSCSI', lpar_id=lpar_id, provides='vscsi_removals_orphans_lpar_id_%d' % lpar_id) self.add_functor_subtask(_remove_orphan_maps, 'VFC', lpar_id=lpar_id) self.add_post_execute(_RemoveStorage('orphans_for_lpar_%d' % lpar_id)) class ScrubPortlessVFCMaps(tx.FeedTask): """Scrub virtual fibre channel mappings which have no backing port.""" def __init__(self, adapter, lpar_id=None, host_uuid=None): """Create the FeedTask to scrub VFC mappings with no backing port. :param adapter: A pypowervm.adapter.Adapter for REST API communication. :param lpar_id: (Optional) The integer short ID (not UUID) of the LPAR to be examined and scrubbed of portless VFC mappings. If unspecified, all LPARs' mappings will be examined. :param host_uuid: (Optional) If specified, limit to VIOSes on this one host. Otherwise, scrub across all VIOSes known to the adapter. """ getter_kwargs = {'xag': [c.XAG.VIO_FMAP]} if host_uuid is not None: getter_kwargs = dict(getter_kwargs, parent_class=sys.System, parent_uuid=host_uuid) name = 'scrub_portless_vfc_maps_for_' + ('all_lpars' if lpar_id is None else 'lpar_%d' % lpar_id) super(ScrubPortlessVFCMaps, self).__init__( name, vios.VIOS.getter(adapter, **getter_kwargs)) self.add_functor_subtask(_remove_portless_vfc_maps, lpar_id=lpar_id) pypowervm-1.1.24/pypowervm/tasks/cna.py0000664000175000017500000004736013571367171017572 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tasks around ClientNetworkAdapter.""" from oslo_concurrency import lockutils from pypowervm import exceptions as exc from pypowervm.i18n import _ from pypowervm.tasks import partition from pypowervm.wrappers import logical_partition as lpar from pypowervm.wrappers import managed_system as pvm_ms from pypowervm.wrappers import network as pvm_net VLAN_LOCK = "reserve_vlan" def crt_cna(adapter, host_uuid, lpar_uuid, pvid, vswitch=pvm_net.VSW_DEFAULT_VSWITCH, crt_vswitch=False, slot_num=None, mac_addr=None, addl_vlans=None): """Puts a new ClientNetworkAdapter on a given LPAR. This will update the LPAR and put a new CNA on it. If the LPAR is active can only perform if there is an active RMC connection. If the LPAR is powered off, then it will update it offline. :param adapter: The pypowervm adapter to perform the update through. :param host_uuid: Not used. :param lpar_uuid: The lpar UUID to update. :param pvid: The primary VLAN ID. :param vswitch: The name of the virtual switch that this CNA will be attached to. :param crt_vswitch: A boolean to indicate that if the vSwitch can not be found, the system should attempt to create one (with the default parameters - ex: Veb mode). :param slot_num: Optional slot number to use for the CNA. If not specified, will utilize the next available slot on the LPAR. :param mac_addr: The optional mac address. If not specified, one will be auto generated. :param addl_vlans: Optional list of (up to 18) additional VLANs. Can be a list of Ints or Strings (that parse down to ints). :return: The CNA Wrapper that was created. """ # Join the additional VLANs addl_tagged_vlans = None if addl_vlans is not None: addl_tagged_vlans = " ".join(addl_vlans) # Sanitize the pvid pvid = str(pvid) # Find the appropriate virtual switch. vswitch_w = _find_or_create_vswitch(adapter, vswitch, crt_vswitch) # Find the virtual network. Ensures that the system is ready for this. if adapter.traits.vnet_aware: _find_or_create_vnet(adapter, pvid, vswitch_w) # Build and create the CNA net_adpt = pvm_net.CNA.bld( adapter, pvid, vswitch_w.related_href, slot_num=slot_num, mac_addr=mac_addr, addl_tagged_vlans=addl_tagged_vlans) return net_adpt.create(parent_type=lpar.LPAR, parent_uuid=lpar_uuid) def _find_or_create_vnet(adapter, vlan, vswitch): # Read the existing virtual networks. Try to locate... vnets = pvm_net.VNet.get(adapter, parent_type=pvm_ms.System.schema_type, parent_uuid=adapter.sys_uuid) for vnet in vnets: if vlan == str(vnet.vlan) and vnet.vswitch_id == vswitch.switch_id: return vnet # Must not have found it. Lets try to create it. name = '%(vswitch)s-%(vlan)s' % {'vswitch': vswitch.name, 'vlan': str(vlan)} # VLAN 1 is not allowed to be tagged. All others are. VLAN 1 would be # used for 'Flat' networks most likely. tagged = (vlan != '1') vnet = pvm_net.VNet.bld(adapter, name, vlan, vswitch.related_href, tagged) return vnet.create(parent_type=pvm_ms.System, parent_uuid=adapter.sys_uuid) def _find_or_create_vswitch(adapter, vs_name, crt_vswitch): """Finds (or creates) the appropriate virtual switch. :param adapter: The pypowervm adapter to perform the update through. :param vs_name: The name of the virtual switch that this CNA will be attached to. :param crt_vswitch: A boolean to indicate that if the vSwitch can not be found, the system should attempt to create one (with the default parameters - ex: Veb mode). """ vswitch_w = pvm_net.VSwitch.search(adapter, parent_type=pvm_ms.System, parent_uuid=adapter.sys_uuid, one_result=True, name=vs_name) if vswitch_w is None: if crt_vswitch: vswitch_w = pvm_net.VSwitch.bld(adapter, vs_name) vswitch_w = vswitch_w.create(parent_type=pvm_ms.System, parent_uuid=adapter.sys_uuid) else: raise exc.Error(_('Unable to find the Virtual Switch %s on the ' 'system.') % vs_name) return vswitch_w def _find_free_vlan(adapter, vswitch_w): """Finds a free VLAN on the vswitch specified.""" # A Virtual Network (VNet) will exist for every PowerVM vSwitch / VLAN # combination in the system. Getting the feed is a quick way to determine # which VLANs are in use. vnets = pvm_net.VNet.get(adapter, parent_type=pvm_ms.System.schema_type, parent_uuid=adapter.sys_uuid) # Use that feed to get the VLANs in use, but only get the ones in use for # the vSwitch passed in. used_vids = [x.vlan for x in vnets if x.associated_switch_uri == vswitch_w.related_href] # Walk through the VLAN range, and as soon as one is found that is not in # use, return it to the user. for x in range(1, 4094): if x not in used_vids: return x raise exc.Error(_('Unable to find a valid VLAN for Virtual Switch %s.') % vswitch_w.name) @lockutils.synchronized(VLAN_LOCK) def assign_free_vlan(adapter, host_uuid, vswitch_w, cna, ensure_enabled=False): """Assigns a free vlan to a given cna. Also ensure the CNA is enabled. :param adapter: The adapter to read the vnet information from. :param host_uuid: Not used. :param vswitch_w: The vswitch wrapper to find the free vlan on. :param cna: The CNA wrapper to be updated with a new vlan. :param ensure_enabled: (Optional, Default: False) If true, enable the CNA before updating. :return: The updated CNA. """ vlan = _find_free_vlan(adapter, vswitch_w) cna.pvid = vlan if ensure_enabled: cna.enabled = True cna = cna.update() return cna @lockutils.synchronized(VLAN_LOCK) def crt_trunk_with_free_vlan( adapter, host_uuid, src_io_host_uuids, vs_name, crt_vswitch=True, dev_name=None, ovs_bridge=None, ovs_ext_ids=None, configured_mtu=None): """Creates a trunk adapter(s) with a free VLAN on the system. :param adapter: The pypowervm adapter to perform the update through. :param host_uuid: Not used. :param src_io_host_uuids: The list of UUIDs of the LPARs that will host the Trunk Adapters. At least one UUID is required. Multiple will be supported, and the Trunk Priority will increment per adapter (in the order that the I/O hosts are specified). :param pvid: The port VLAN ID. :param vs_name: The name of the PowerVM Hypervisor Virtual Switch to create the p2p connection on. This is required because it is not recommended to create it on the default (ETHERNET0) virtual switch. :param crt_vswitch: (Optional, Default: True) A boolean to indicate that if the vSwitch can not be found, the system should attempt to create one (with the default parameters - ex: Veb mode). :param dev_name: (Optional, Default: None) The device name. Only valid if the src_io_host_uuids is a single entity and the uuid matches the mgmt lpar UUID. Otherwise leave as None. If set, the name of the trunk adapter created on the mgmt lpar will be set to this value. :param ovs_bridge: (Optional, Default: None) If hosting through mgmt partition, this attribute specifies which Open vSwitch to connect to. :param ovs_ext_ids: (Optional, Default: None) Comma-delimited list of key=value pairs that get set as external-id metadata attributes on the OVS port. Only valid if ovs_bridge is set. :param configured_mtu: (Optional, Default: None) Sets the MTU on the adapter. May only be valid if adapter is being created against mgmt partition. :return: The CNA Wrapper that was created. :return: The TrunkAdapters that were created. Match the order that the src_io_host_uuids were passed in. """ # Make sure we have the appropriate vSwitch vswitch_w = _find_or_create_vswitch(adapter, vs_name, crt_vswitch) # Find the free VLAN vlan = _find_free_vlan(adapter, vswitch_w) # Need to get the VIOS uuids to determine if the src_io_host_uuid is a VIOS iohost_wraps = partition.get_partitions( adapter, lpars=False, vioses=True, mgmt=True) io_uuid_to_wrap = {w.uuid: w for w in iohost_wraps if w.uuid in src_io_host_uuids} # Now create the corresponding Trunk trunk_adpts = [] trunk_pri = 1 for io_uuid in src_io_host_uuids: trunk_adpt = pvm_net.CNA.bld( adapter, vlan, vswitch_w.related_href, trunk_pri=trunk_pri, dev_name=dev_name, ovs_bridge=ovs_bridge, ovs_ext_ids=ovs_ext_ids, configured_mtu=configured_mtu) trunk_adpts.append(trunk_adpt.create(parent=io_uuid_to_wrap[io_uuid])) trunk_pri += 1 return trunk_adpts def crt_p2p_cna(adapter, host_uuid, lpar_uuid, src_io_host_uuids, vs_name, crt_vswitch=True, mac_addr=None, slot_num=None, dev_name=None, ovs_bridge=None, ovs_ext_ids=None, configured_mtu=None): """Creates a 'point-to-point' Client Network Adapter. A point to point connection is one that has a VLAN that is shared only between the lpar and the appropriate trunk adapter(s). The system will determine what a free VLAN is on the virtual switch and use that for the point to point connection. The method will return the Client Network Adapter and the corresponding Trunk Adapter that it has created. There may be multiple Trunk Adapters created if multiple src_io_host_uuids are passed in. The Trunk Adapters can be created against the Virtual I/O Servers or the NovaLink partition. Nothing prevents the system from allowing another Client Network Adapter from being created and attaching to the connection. The point-to-point connection is only guaranteed at the point in time at which it was created. NOTE: See the note in src_io_host_uuids. Currently this API will only support the NovaLink partition. Others will be added. This parameter is there for future facing compatibility. :param adapter: The pypowervm adapter to perform the update through. :param host_uuid: Not used. :param lpar_uuid: The lpar UUID to update. :param src_io_host_uuids: The list of UUIDs of the LPARs that will host the Trunk Adapters. At least one UUID is required. Multiple will be supported, and the Trunk Priority will increment per adapter (in the order that the I/O hosts are specified). :param pvid: The primary VLAN ID. :param vs_name: The name of the PowerVM Hypervisor Virtual Switch to create the p2p connection on. This is required because it is not recommended to create it on the default (ETHERNET0) virtual switch. :param crt_vswitch: (Optional, Default: True) A boolean to indicate that if the vSwitch can not be found, the system should attempt to create one (with the default parameters - ex: Veb mode). :param mac_addr: (Optional, Default: None) The mac address. If not specified, one will be auto generated. :param slot_num: (Optional, Default: None) The slot number to use for the CNA. If not specified, will utilize the next available slot on the LPAR. :param dev_name: (Optional, Default: None) The device name. Only valid if the src_io_host_uuids is a single entity and the uuid matches the mgmt lpar UUID. Otherwise leave as None. If set, the trunk adapter created on the mgmt lpar will be set to this value. :param ovs_bridge: (Optional, Default: None) If hosting through mgmt partition, this attribute specifies which Open vSwitch to connect to. :param ovs_ext_ids: (Optional, Default: None) Comma-delimited list of key=value pairs that get set as external-id metadata attributes on the OVS port. Only valid if ovs_bridge is set. :param configured_mtu: (Optional, Default: None) Sets the MTU on the adapter. May only be valid if adapter is being created against mgmt partition. :return: The CNA Wrapper that was created. :return: The TrunkAdapters that were created. Match the order that the src_io_host_uuids were passed in. """ trunk_adpts = crt_trunk_with_free_vlan( adapter, None, src_io_host_uuids, vs_name, crt_vswitch=crt_vswitch, dev_name=dev_name, ovs_bridge=ovs_bridge, ovs_ext_ids=ovs_ext_ids, configured_mtu=configured_mtu) # Darn lack of re-entrant locks with lockutils.lock(VLAN_LOCK): vswitch_w = _find_or_create_vswitch(adapter, vs_name, crt_vswitch) client_adpt = pvm_net.CNA.bld( adapter, trunk_adpts[0].pvid, vswitch_w.related_href, slot_num=slot_num, mac_addr=mac_addr) client_adpt = client_adpt.create(parent_type=lpar.LPAR, parent_uuid=lpar_uuid) return client_adpt, trunk_adpts def find_trunks(adapter, cna_w): """Returns the Trunk Adapters associated with the CNA. :param adapter: The pypowervm adapter to perform the search with. :param cna_w: The Client Network Adapter to find the Trunk Adapters for. :return: A list of Trunk Adapters (sorted by Trunk Priority) that host the Client Network Adapter. """ # VIOS and Management Partitions can host Trunk Adapters. host_wraps = partition.get_partitions( adapter, lpars=False, vioses=True, mgmt=True) # Find the corresponding trunk adapters. trunk_list = [] for host_wrap in host_wraps: trunk = _find_trunk_on_lpar(adapter, host_wrap, cna_w) if trunk: trunk_list.append(trunk) # Sort by the trunk priority trunk_list.sort(key=lambda x: x.trunk_pri) return trunk_list def _find_trunk_on_lpar(adapter, parent_wrap, client_vea): cna_wraps = pvm_net.CNA.get(adapter, parent=parent_wrap) for cna in cna_wraps: if (cna.is_trunk and cna.pvid == client_vea.pvid and cna.vswitch_id == client_vea.vswitch_id): return cna return None def _find_all_trunks_on_lpar(adapter, parent_wrap, vswitch_id=None): """Returns all trunk adapters on a given vswitch. :param adapter: The pypowervm adapter to perform the search with. :param vswitch_id: The id of the vswitch to search for orphaned trunks on. :return: A list of trunk adapters that are associated with the given vswitch_id. """ cna_wraps = pvm_net.CNA.get(adapter, parent=parent_wrap) trunk_list = [] for cna in cna_wraps: if (cna.is_trunk and (vswitch_id is None or cna.vswitch_id == vswitch_id)): trunk_list.append(cna) return trunk_list def _find_cna_wraps(adapter, vswitch_id=None): """Returns all CNAs. :param adapter: The pypowervm adapter to perform the search with. :param vswitch_id: This param is optional. If specified, the method will only return CNAs associated with the given vswitch. :return: A list of CNAs that are optionally associated with the given vswitch_id. """ # All lpars should be searched, including VIOSes lpar_wraps = partition.get_partitions(adapter) cna_wraps = [] filtered_cna_wraps = [] for lpar_wrap in lpar_wraps: cna_wraps.extend(pvm_net.CNA.get(adapter, parent=lpar_wrap)) # If a vswitch_id is passed in then filter to only cnas on that vswitch if (vswitch_id): for cna in cna_wraps: if(cna.vswitch_id == vswitch_id): filtered_cna_wraps.append(cna) cna_wraps = filtered_cna_wraps return cna_wraps def find_cnas_on_trunk(trunk_w, cna_wraps=None): """Returns the CNAs associated with the Trunk Adapter. :param trunk_w: The Trunk Adapter to find the Client Network Adapters for. :param cna_wraps: Optional param for passing in the list of CNA wraps to search. If the list is none, queries will be done to build the list. :return: A list of Client Network Adapters that are hosted by the Trunk Adapter. """ adapter = trunk_w.adapter # Find all the CNAs on the system if cna_wraps is None: cna_wraps = _find_cna_wraps(adapter) # Search the CNA wraps for matching CNAs cna_list = [] for cna in cna_wraps: if ((not cna.uuid == trunk_w.uuid) and cna.pvid == trunk_w.pvid and cna.vswitch_id == trunk_w.vswitch_id): cna_list.append(cna) return cna_list def find_orphaned_trunks(adapter, vswitch_name): """Returns all orphaned trunk adapters on a given vswitch. An orphaned trunk is a trunk adapter that does not have any associated CNAs. :param adapter: The pypowervm adapter to perform the search with. :param vswitch_name: The name of the vswitch to search for orphaned trunks on. :return: A list of trunk adapters that do not have any associated CNAs """ vswitch = pvm_net.VSwitch.search( adapter, parent_type=pvm_ms.System, one_result=True, name=vswitch_name) # May occur if the system does not host the vswitch passed in. if vswitch is None: return [] vswitch_id = vswitch.switch_id # VIOS and Management Partitions can host Trunk Adapters. host_wraps = partition.get_partitions( adapter, lpars=False, vioses=True, mgmt=True) # Get all the CNA wraps on the vswitch cna_wraps = _find_cna_wraps(adapter, vswitch_id=vswitch_id) # Find all trunk adapters on the vswitch. trunk_list = [] for host_wrap in host_wraps: trunks = _find_all_trunks_on_lpar(adapter, parent_wrap=host_wrap, vswitch_id=vswitch_id) trunk_list.extend(trunks) # Check if the trunk adapters are orphans orphaned_trunk_list = [] for trunk in trunk_list: if not find_cnas_on_trunk(trunk, cna_wraps=cna_wraps): orphaned_trunk_list.append(trunk) return orphaned_trunk_list pypowervm-1.1.24/pypowervm/tasks/network_bridger.py0000664000175000017500000011660013571367171022212 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Manage NetworkBridge, TrunkAdapter, LoadGroup, SEA, etc.""" import abc import copy import six from oslo_concurrency import lockutils as lock from pypowervm import const as c from pypowervm import exceptions as pvm_exc from pypowervm import util as pvm_util from pypowervm.utils import retry as pvm_retry from pypowervm.wrappers import managed_system as pvm_ms from pypowervm.wrappers import network as pvm_net from pypowervm.wrappers import virtual_io_server as pvm_vios _MAX_VLANS_PER_VEA = 20 _ENSURE_VLAN_LOCK = 'ensure_vlans_nb' def ensure_vlans_on_nb(adapter, host_uuid, nb_uuid, vlan_ids): """Will make sure that the VLANs are assigned to the Network Bridge. This method will reorder the arbitrary VLAN IDs as needed (those which are the PVID of the VEAs, but not the primary VEA). VLANs are always added to VEAs that are 'non-primary' (not the first VEA). However, if the VLAN is on the primary VEA then it is left on the system. The only 'untagged' VLAN that is allowed is the primary VEA's PVID. If the VLAN specified is on another Network Bridge's VEA (which happens to be on the same virtual switch): - An error will be thrown if it is on the primary VEA. - It will be removed off the Network Bridge if it is on the non-primary VEA. This method will not remove VLAN IDs from the network bridge that aren't part of the vlan_ids list. Instead, each VLAN is simply added to the Network Bridge's VLAN list. :param adapter: The pypowervm Adapter. :param host_uuid: The Server's UUID :param nb_uuid: The Network Bridge UUID. :param vlan_ids: The list of VLANs to ensure are on the Network Bridge. """ _get_bridger(adapter, host_uuid).ensure_vlans_on_nb(nb_uuid, vlan_ids) def ensure_vlan_on_nb(adapter, host_uuid, nb_uuid, vlan_id): """Will make sure that the VLAN is assigned to the Network Bridge. This method will reorder the arbitrary VLAN IDs as needed (those which are the PVID of the VEAs, but not the primary VEA). VLANs are always added to VEAs that are 'non-primary' (not the first VEA). However, if the VLAN is on the primary VEA then it is left on the system. The only 'untagged' VLAN that is allowed is the primary VEA's PVID. If the VLAN specified is on another Network Bridge's VEA (which happens to be on the same virtual switch): - An error will be thrown if it is on the primary VEA. - It will be removed off the Network Bridge if it is on the non-primary VEA. :param adapter: The pypowervm Adapter. :param host_uuid: The Server's UUID :param nb_uuid: The Network Bridge UUID. :param vlan_id: The VLAN identifier to ensure is on the system. """ ensure_vlans_on_nb(adapter, host_uuid, nb_uuid, [vlan_id]) def remove_vlan_from_nb(adapter, host_uuid, nb_uuid, vlan_id, fail_if_pvid=False, existing_nbs=None): """Will remove the VLAN from a given Network Bridge. :param adapter: The pypowervm Adapter. :param host_uuid: The host system UUID. :param nb_uuid: The Network Bridge UUID. :param vlan_id: The VLAN identifier. :param fail_if_pvid: If set to true, will raise an exception if this is the PVID on a Network Bridge. :param existing_nbs: Optional. If set, should be the existing network bridge wrappers. If not provided, will gather from the system directly. """ _get_bridger(adapter, host_uuid).remove_vlan_from_nb(nb_uuid, vlan_id, fail_if_pvid, existing_nbs) def _get_bridger(adapter, host_uuid): """Returns the appropriate bridger for the action.""" if adapter.traits.vnet_aware: return NetworkBridgerVNET(adapter, host_uuid) else: return NetworkBridgerTA(adapter, host_uuid) @six.add_metaclass(abc.ABCMeta) class NetworkBridger(object): """Defines the high level flows for the VLAN provisioning. This class has the generic flows, subclasses extend this for the derivations of vnet_aware and direct VLAN application. """ def __init__(self, adapter, host_uuid): """Creates the bridger. :param adapter: The pypowervm Adapter. :param host_uuid: The host systems's UUID """ self.adapter = adapter self.host_uuid = host_uuid self._orphan_map = None @lock.synchronized(_ENSURE_VLAN_LOCK) def ensure_vlans_on_nb(self, nb_uuid, vlan_ids): """Will make sure that the VLANs are assigned to the Network Bridge. This method will reorder the arbitrary VLAN IDs as needed (those which are the PVID of the TrunkAdapter, but not the primary TrunkAdapter). VLANs are always added to TrunkAdapters that are 'non-primary' (not the first TrunkAdapter). However, if the VLAN is on the primary TrunkAdapter then it is left on the system. The only 'untagged' VLAN that is allowed is the primary TrunkAdapter's PVID. If the VLAN specified is on another Network Bridge's TrunkAdapter (which happens to be on the same virtual switch): - An error will be thrown if it is on the primary TrunkAdapter. - It will be removed off the Network Bridge if it is on the non-primary TrunkAdapter. This method will not remove VLAN IDs from the network bridge that aren't part of the vlan_ids list. Instead, each VLAN is simply added to the Network Bridge's VLAN list. :param nb_uuid: The Network Bridge UUID. :param vlan_ids: The list of VLANs to ensure are on the Network Bridge. """ self._ensure_vlans_on_nb_synch(nb_uuid, vlan_ids) @pvm_retry.retry(tries=60, delay_func=pvm_retry.STEPPED_RANDOM_DELAY) def _ensure_vlans_on_nb_synch(self, nb_uuid, vlan_ids): # Ensure the VLANs are ints, not strings. vlan_ids = [int(x) for x in vlan_ids] # Get the updated feed of NetworkBridges nb_wraps = pvm_net.NetBridge.get( self.adapter, parent_type=pvm_ms.System, parent_uuid=self.host_uuid) # Find the appropriate Network Bridge req_nb = pvm_util.find_wrapper(nb_wraps, nb_uuid) # Call down to the ensure_vlan_on_nb method only for the additions. new_vlans = [] peer_nbs = self._find_peer_nbs(nb_wraps, req_nb) all_nbs_on_vs = [req_nb] all_nbs_on_vs.extend(peer_nbs) # Need to evaluate the status of each VLAN. for vlan_id in vlan_ids: # No action required. The VLAN is already part of the bridge. if req_nb.supports_vlan(vlan_id): continue # If its supported by a peer... for peer_nb in peer_nbs: if peer_nb.supports_vlan(vlan_id): # Remove the VLAN. self._remove_vlan_from_nb_synch(peer_nb.uuid, vlan_id, fail_if_pvid=True, existing_nbs=nb_wraps) break # If it is an arbitrary VLAN ID on our network. This should be # very rare. But if it does happen, we should re-order the VLANs # and then retry this whole method. if self._is_arbitrary_vid(vlan_id, all_nbs_on_vs): # Find a new arbitrary VLAN ID, and re-assign the original # value to this new one. other_vlans = ( vlan_ids + self._get_orphan_vlans(req_nb.vswitch_id)) new_a_vid = self._find_new_arbitrary_vid( all_nbs_on_vs, others=other_vlans) self._reassign_arbitrary_vid(vlan_id, new_a_vid, req_nb) return self._ensure_vlans_on_nb_synch(nb_uuid, vlan_ids) # At this point, we've done all the easy checks. Next up is to # detect if it is an orphan. self._validate_orphan_on_ensure(vlan_id, req_nb.vswitch_id) # Lastly, if we're here...it must be a completely new VLAN. new_vlans.append(vlan_id) # If there are no new VLANs, no need to continue. if len(new_vlans) == 0: return # At this point, all of the new VLANs that need to be added are in the # new_vlans list. Now we need to put them on load groups. self._add_vlans_to_nb(req_nb, all_nbs_on_vs, new_vlans) # At this point, the network bridge should just need to be updated. # The Load Groups on the Network Bridge should be correct. req_nb.update() @lock.synchronized(_ENSURE_VLAN_LOCK) def remove_vlan_from_nb(self, nb_uuid, vlan_id, fail_if_pvid=False, existing_nbs=None): """Will remove the VLAN from a given Network Bridge. :param nb_uuid: The Network Bridge UUID. :param vlan_id: The VLAN identifier. :param fail_if_pvid: If set to true, will raise an exception if this is the PVID on a Network Bridge. :param existing_nbs: Optional. If set, should be the existing network bridge wrappers. If not provided, will gather from the system directly. """ self._remove_vlan_from_nb_synch(nb_uuid, vlan_id, fail_if_pvid, existing_nbs) @pvm_retry.retry(tries=60, delay_func=pvm_retry.STEPPED_RANDOM_DELAY) def _remove_vlan_from_nb_synch(self, nb_uuid, vlan_id, fail_if_pvid=False, existing_nbs=None): # Ensure we're working with an integer vlan_id = int(vlan_id) if existing_nbs is not None: nb_wraps = existing_nbs else: # Get the updated feed of NetworkBridges nb_wraps = pvm_net.NetBridge.get( self.adapter, parent_type=pvm_ms.System, parent_uuid=self.host_uuid) # Find our Network Bridge req_nb = pvm_util.find_wrapper(nb_wraps, nb_uuid) # Determine if we're trying to remove an arbitrary PVID off of the # network bridge. If so, we need to get a new, available arbitrary # PVID and swap that in. if vlan_id in req_nb.arbitrary_pvids: # Need to find the peers on this vSwitch. Arbitrary PVIDs can # only be used once per vSwitch all_nbs_on_vs = self._find_peer_nbs(nb_wraps, req_nb, include_self=True) # Find a new arbitrary VLAN ID and swap it to a new, available # value. Need to get the orphans so that we do not assign to an # existing orphan VLAN. other_vlans = [vlan_id] + self._get_orphan_vlans(req_nb.vswitch_id) new_a_vid = self._find_new_arbitrary_vid(all_nbs_on_vs, others=other_vlans) self._reassign_arbitrary_vid(vlan_id, new_a_vid, req_nb) return # If the VLAN is not on the bridge, no action if not req_nb.supports_vlan(vlan_id): return # Fail if we're the PVID. if fail_if_pvid and req_nb.load_grps[0].pvid == vlan_id: raise pvm_exc.PvidOfNetworkBridgeError(vlan_id=vlan_id) # If this is on the first load group/trunk adapter, we leave it. if (req_nb.load_grps[0].pvid == vlan_id or vlan_id in req_nb.load_grps[0].tagged_vlans or len(req_nb.load_grps) == 1): return # Rip the VLAN out of the wrapper element. self._remove_vlan_from_nb(req_nb, vlan_id) # Now update the network bridge. req_nb.update() def _is_arbitrary_vid(self, vlan, all_nbs): """Returns if the VLAN is an arbitrary PVID on any passed in network. :param vlan: The VLAN to check. :param all_nbs: All of the network bridges on a given vSwitch. :return: The network bridge that this is an arbitrary VLAN on. """ for nb in all_nbs: if vlan in nb.arbitrary_pvids: return nb return None def _find_new_arbitrary_vid(self, all_nbs, others=()): """Returns a new VLAN ID that can be used as an arbitrary VID. :param all_nbs: All of the impacted network bridges. Should all be on the same vSwitch. :param others: List of other VLANs that should not be used as an arbitrary. :return: A new VLAN ID that is not in use by any network bridge on this vSwitch. """ all_vlans = [] for i_nb in all_nbs: all_vlans.extend(i_nb.list_vlans(pvid=True, arbitrary=True)) all_vlans.extend(others) # Start at 4094, and walk down to find one that isn't already used. # Stop right before VLAN 1 as that is special in the system. for i in range(4094, 1, -1): if i not in all_vlans: return i return None @staticmethod def _find_peer_nbs(nb_wraps, nb, include_self=False): """Finds all of the peer (same vSwitch) Network Bridges. :param nb_wraps: List of pypowervm NetBridge wrappers. :param nb: The NetBridge to find. :param include_self: (Optional, Default False) If set to true, will include the nb in the response list. :return: List of Network Bridges on the same vSwitch as the seed. Does not include the nb element. """ # Find the vswitch to search for. vs_search_id = nb.seas[0].primary_adpt.vswitch_id ret = [] for nb_elem in nb_wraps: # Don't include self. if nb.uuid == nb_elem.uuid and not include_self: continue # See if the vswitches match other_vs_id = nb_elem.seas[0].primary_adpt.vswitch_id if other_vs_id == vs_search_id: ret.append(nb_elem) return ret def _validate_orphan_on_ensure(self, vlan, vswitch_id): """Will throw an error if there is collision with VLAN and vSwitch. An orphan VLAN is defined as a VLAN (on a specific vSwitch) that is part of a VIOS, but not attached to a Network Bridge (ex. Shared Ethernet Adapter). :param vlan: The VLAN to query for. :param vswitch_id: The virtual switch identifier. This is the short number (0-15). :raises: OrphanVLANFoundOnProvision """ orphan_map = self._get_orphan_map() # If no oprhans on the vSwitch, then we're fine. if not orphan_map.get(vswitch_id): return # Walk through each element. for vios_name, devices in orphan_map[vswitch_id].items(): for dev_name, vlans in devices.items(): if vlan in vlans: raise pvm_exc.OrphanVLANFoundOnProvision( dev_name=dev_name, vlan_id=vlan, vios=vios_name) def _get_orphan_vlans(self, vswitch_id): """Returns the list of orphan VLANs for a given vSwitch. See _validate_orphan_on_ensure for a definition of an orphan VLAN. :param vswitch_id: The virtual switch identifier. This is the short number (0-15). :return: List of orphan VLANs for the given vSwitch. """ orphan_map = self._get_orphan_map() # If no orphans on the vSwitch, then return an empty list if orphan_map.get(vswitch_id) is None: return [] orphan_vlans = set() for devices in orphan_map[vswitch_id].values(): for dev_key in devices: orphan_vlans.update(devices[dev_key]) return list(orphan_vlans) def _get_orphan_map(self): """Returns the orphan map. See _build_orphan_map for format.""" if self._orphan_map is None: self._orphan_map = self._build_orphan_map() return self._orphan_map def _build_orphan_map(self): """Builds the map of orphan VLANs per vSwitch. Will set the orphan_map variable. The result will be of the following format: { vswitch_id: {'vios_name': { 'dev_name': [vlan_id1, vlan_id2]} } } Note: vswitch_id and vlan_id are int type. This call should be used sparingly. The map is only built if provisioning a new VLAN or removing one. The calls that this makes are expensive, but necessary for correctness. This is why they are lazy loaded, as many calls may not even need this map. Also note that only trunk adapters are considered as orphans. While there certainly could be non-trunked adapters present as well, they will not conflict. So while strange, it's functional. :return: The orphan map. """ # Wipe out the existing map. orphan_map = {} # Loop through all the VIOSes. vios_wraps = pvm_vios.VIOS.get(self.adapter, parent_type=pvm_ms.System, parent_uuid=self.host_uuid, xag=[c.XAG.VIO_NET]) for vios_w in vios_wraps: # List all of the trunk adapters that are not part of the SEAs orphan_trunks = [] for trunk in vios_w.trunk_adapters: # If the trunk has the same device ID as any of the SEAs # children, then it is not an orphan. for sea in vios_w.seas: if sea.contains_device(trunk.dev_name): break else: orphan_trunks.append(trunk) # At this point, we know all the orphans for this VIOS. Add them # to the map. for orphan_trunk in orphan_trunks: vlans = [orphan_trunk.pvid] + orphan_trunk.tagged_vlans self._put_orphan_in_map( orphan_map, vios_w, orphan_trunk.vswitch_id, orphan_trunk.dev_name, vlans) return orphan_map def _put_orphan_in_map(self, orphan_map, vios_w, vswitch_id, dev_name, vlan_ids): # Make sure the orphan map is initialized and ready. if vswitch_id not in orphan_map: orphan_map[vswitch_id] = {} if vios_w.name not in orphan_map[vswitch_id]: orphan_map[vswitch_id][vios_w.name] = {} # We can't just replace the device name. The name may be 'Unknown', # so we just keep appending. vio_part = orphan_map[vswitch_id][vios_w.name] if dev_name not in vio_part: vio_part[dev_name] = [] vio_part[dev_name].extend(vlan_ids) def _reassign_arbitrary_vid(self, old_vid, new_vid, impacted_nb): """Moves the arbitrary VLAN ID from one Load Group to another. Should perform the actual update to the API. :param old_vid: The original arbitrary VLAN ID. :param new_vid: The new arbitrary VLAN ID. :param impacted_nb: The network bridge that is impacted. """ raise NotImplementedError() def _add_vlans_to_nb(self, req_nb, all_nbs_on_vs, new_vlans): """Adds the VLANs to the Network Bridge Wrapper. :param req_nb: The NetworkBridge to add the VLANs to. After this method is complete, this req_nb will have the appropriate information to perform an update to the API. :param all_nbs_on_vs: List of all the network bridges on the virtual switch. :param new_vlans: List of the new VLANs to put on the network bridge. """ raise NotImplementedError() def _remove_vlan_from_nb(self, req_nb, vlan_id): """Removes the VLAN from the Network Bridge wrapper. :param req_nb: The Network Bridge. Upon return, the wrapper should not support the VLAN. :param vlan_id: The VLAN ID to remove. """ raise NotImplementedError() class NetworkBridgerVNET(NetworkBridger): """The virtual network aware NetworkBridger.""" def _add_vlans_to_nb(self, req_nb, all_nbs_on_vs, new_vlans): """Adds the VLANs to the Network Bridge Wrapper. :param req_nb: The NetworkBridge to add the VLANs to. After this method is complete, this req_nb will have the appropriate information to perform an update to the API. :param all_nbs_on_vs: List of all the network bridges on the virtual switch. :param new_vlans: List of the new VLANs to put on the network bridge. """ # At this point, all of the new VLANs that need to be added are in the # new_vlans list. Now we need to put them on load groups. vswitch_w = pvm_net.VSwitch.search( self.adapter, parent_type=pvm_ms.System, parent_uuid=self.host_uuid, one_result=True, switch_id=req_nb.vswitch_id) vnets = pvm_net.VNet.get(self.adapter, parent_type=pvm_ms.System, parent_uuid=self.host_uuid) for vlan_id in new_vlans: ld_grp = self._find_available_ld_grp(req_nb) vid_vnet = self._find_or_create_vnet(vnets, vlan_id, vswitch_w, tagged=True) if ld_grp is None: # No load group means they're all full. Need to create a new # Load Group. # # First, create a new 'non-tagging' virtual network other_vlans = (new_vlans + self._get_orphan_vlans(req_nb.vswitch_id)) arb_vid = self._find_new_arbitrary_vid(all_nbs_on_vs, others=other_vlans) arb_vnet = self._find_or_create_vnet(vnets, arb_vid, vswitch_w, tagged=False) # Now create the new load group... vnet_uris = [arb_vnet.related_href, vid_vnet.related_href] ld_grp = pvm_net.LoadGroup.bld(self.adapter, arb_vid, vnet_uris) # Append to network bridge... req_nb.load_grps.append(ld_grp) else: # There was a Load Group. Just need to append this vnet to it. ld_grp.vnet_uri_list.append(vid_vnet.related_href) def _remove_vlan_from_nb(self, req_nb, vlan_id): """Removes the VLAN from the Network Bridge wrapper. :param req_nb: The Network Bridge. Upon return, the wrapper should not support the VLAN. :param vlan_id: The VLAN ID to remove. """ # Find the matching load group. Since the 'supports_vlan' passed # before, this will always find a value. matching_lg = None for lg in req_nb.load_grps[1:]: if vlan_id in lg.tagged_vlans: matching_lg = lg break # A load balanced bridge requires at least two load groups. We can't # remove a load group from the network bridge if it is load balanced, # but only has two load groups... Make sure if it is load balanced # we wouldn't be deleting a required load group. can_remove_for_lb = (len(req_nb.load_grps) > 2 or not req_nb.load_balance) if can_remove_for_lb and len(matching_lg.tagged_vlans) == 1: # Remove the load group req_nb.load_grps.remove(matching_lg) else: # Else just remove that virtual network. In the case of load # balancing, you may end up with the second load group being # just a place holder. But this is required by the system. vnet_uri = self._find_vnet_uri_from_lg(matching_lg, vlan_id) matching_lg.vnet_uri_list.remove(vnet_uri) def _reassign_arbitrary_vid(self, old_vid, new_vid, impacted_nb): """Moves the arbitrary VLAN ID from one Load Group to another. :param old_vid: The original arbitrary VLAN ID. :param new_vid: The new arbitrary VLAN ID. :param impacted_nb: The network bridge that is impacted. """ # Find the Load Group that has this arbitrary VID impacted_lg = None for ld_grp in impacted_nb.load_grps: if ld_grp.pvid == old_vid: impacted_lg = ld_grp break # For the _find_or_create_vnet, we need to query all the virtual # networks vswitch_w = pvm_net.VSwitch.search( self.adapter, parent_type=pvm_ms.System, parent_uuid=self.host_uuid, one_result=True, switch_id=impacted_nb.vswitch_id) vnets = pvm_net.VNet.get(self.adapter, parent_type=pvm_ms.System, parent_uuid=self.host_uuid) # Read the old virtual network old_uri = self._find_vnet_uri_from_lg(impacted_lg, old_vid) # Need to create the new Virtual Network new_vnet = self._find_or_create_vnet(vnets, new_vid, vswitch_w, tagged=False) # Now we need to clone the load group uris = copy.copy(impacted_lg.vnet_uri_list) if old_uri is not None: uris.remove(old_uri) uris.insert(0, new_vnet.related_href) new_lg_w = pvm_net.LoadGroup.bld(self.adapter, new_vid, uris) impacted_nb.load_grps.remove(impacted_lg) # Need two updates. One to remove the load group. impacted_nb = impacted_nb.update() # A second to add the new load group in impacted_nb.load_grps.append(new_lg_w) impacted_nb = impacted_nb.update() # Now that the old vid is detached from the load group, need to delete # the Virtual Network (because it was 'tagged' = False). if old_uri is not None: self.adapter.delete_by_href(old_uri) def _find_or_create_vnet(self, vnets, vlan, vswitch, tagged=True): """Will find (or create) the VNet. If the VirtualNetwork already exists but has a different tag attribute, this method will delete the old virtual network, and then recreate with the specified tagged value. :param vnets: The virtual network wrappers on the system. :param vlan: The VLAN to find. :param vswitch: The vSwitch wrapper. :param tagged: True if tagged traffic will flow through this network. :return: The VNet wrapper for this element. """ # Look through the list of vnets passed in for vnet in vnets: if vnet.vlan == vlan and vnet.vswitch_id == vswitch.switch_id: if tagged == vnet.tagged: return vnet else: # We found a matching vNet, but the tag was wrong. Need to # delete it. self.adapter.delete_by_href(vnet.href) break # Could not find one. Time to create it. name = 'VLAN%(vid)s-%(vswitch)s' % {'vid': str(vlan), 'vswitch': vswitch.name} vnet_elem = pvm_net.VNet.bld( self.adapter, name, vlan, vswitch.related_href, tagged) return vnet_elem.create(parent_type=pvm_ms.System, parent_uuid=self.host_uuid) def _find_available_ld_grp(self, nb): """Will return the Load Group that can support a new VLAN. This will be the load group with the lowest number of virtual networks on it. :param nb: The NetBridge to search through. :return: The 'best' LoadGroup within the NetBridge that can support a new VLAN. If all are full, will return None. Best is determined by 'one with fewest Virtual Networks'. """ # Never provision to the first load group. We do this to keep # consistency with how projects have done in past. if len(nb.load_grps) == 1: return None # Find the load group with the fewest VLANs. avail_count = 0 cur_lg = None ld_grps = nb.load_grps[1:] for ld_grp in ld_grps: # If this Load Group is full, skip to the next. if len(ld_grp.vnet_uri_list) >= _MAX_VLANS_PER_VEA: continue avail_count += 1 # If the load group hasn't been set - OR - this load group has # less than the previously set, update which we'll return if (cur_lg is None or len(ld_grp.vnet_uri_list) < len(cur_lg.vnet_uri_list)): cur_lg = ld_grp # If load balancing is turned on, we have some further inspection to # do. # # When load balancing is enabled, the goal is to have the VLANs spread # evenly across the 'additional Load Groups'. So you create pairs of # Load Groups and add VLANs back and forth between them. if nb.load_balance and cur_lg is not None: # If there is only one Load Group available, but we have an 'odd' # amount of load groups. That signals that we need to create a new # Load Group because we're unbalanced. Returning None will flag # to create a new Load Group to put the virtual network (VLAN) on. # # Being unbalanced will naturally occur once a pair is full of # VLANs. In that case, the cur_lg would have been None and the # code would have created a single new Load Group for that new # VLAN. The code does not want to create the pair at that time # because an empty Load Group supporting nothing is a waste. So # this code only gets used when the second VLAN is being added, # thus balancing the Load Groups again. if avail_count == 1 and len(ld_grps) % 2 == 1: return None return cur_lg def _find_vnet_uri_from_lg(self, lg, vlan): """Finds the Virtual Network for a VLAN within a LoadGroup. :param lg: The LoadGroup wrapper. :param vlan: The VLAN within the Load Group to look for. :return: The Virtual Network URI for the vlan. If not found within the Load Group, None will be returned. """ for vnet_uri in lg.vnet_uri_list: vnet_net = pvm_net.VNet.get_by_href(self.adapter, vnet_uri) if vnet_net.vlan == vlan: return vnet_net.related_href return None class NetworkBridgerTA(NetworkBridger): """The Trunk Adapter aware NetworkBridger.""" def _reassign_arbitrary_vid(self, old_vid, new_vid, impacted_nb): """Moves the arbitrary VLAN ID from one Load Group to another. Should perform the actual update to the API. :param old_vid: The original arbitrary VLAN ID. :param new_vid: The new arbitrary VLAN ID. :param impacted_nb: The network bridge that is impacted. """ # Find the Trunk Adapters that has this arbitrary VID impacted_tas = (None, None) for ta in impacted_nb.seas[0].addl_adpts: if ta.pvid == old_vid: impacted_tas = self._trunk_list(impacted_nb, ta) break # For each Trunk Adapter, change the VID to the new value. for ta in impacted_tas: ta.pvid = new_vid # Call the update impacted_nb = impacted_nb.update() def _add_vlans_to_nb(self, req_nb, all_nbs_on_vs, new_vlans): """Adds the VLANs to the Network Bridge Wrapper. :param req_nb: The NetworkBridge to add the VLANs to. After this method is complete, this req_nb will have the appropriate information to perform an update to the API. :param all_nbs_on_vs: List of all the network bridges on the virtual switch. :param new_vlans: List of the new VLANs to put on the network bridge. """ # At this point, all of the new VLANs that need to be added are in the # new_vlans list. Now we need to put them on trunk adapters. vswitch_w = pvm_net.VSwitch.search( self.adapter, parent_type=pvm_ms.System, parent_uuid=self.host_uuid, one_result=True, switch_id=req_nb.vswitch_id) for vlan_id in new_vlans: trunks = self._find_available_trunks(req_nb) if trunks is None: # No trunk adapter list means they're all full. Need to create # a new Trunk Adapter (or pair) for the new VLAN. other_vlans = (new_vlans + self._get_orphan_vlans(req_nb.vswitch_id)) arb_vid = self._find_new_arbitrary_vid(all_nbs_on_vs, others=other_vlans) for sea in req_nb.seas: trunk = pvm_net.TrunkAdapter.bld( self.adapter, arb_vid, [vlan_id], vswitch_w, trunk_pri=sea.primary_adpt.trunk_pri) sea.addl_adpts.append(trunk) else: # Available trunks were found. Add the VLAN to each for trunk in trunks: trunk.tagged_vlans.append(vlan_id) def _remove_vlan_from_nb(self, req_nb, vlan_id): """Removes the VLAN from the Network Bridge wrapper. :param req_nb: The Network Bridge. Upon return, the wrapper should not support the VLAN. :param vlan_id: The VLAN ID to remove. """ # Find the matching trunk adapter. matching_tas = None for trunk in req_nb.seas[0].addl_adpts: if vlan_id in trunk.tagged_vlans: matching_tas = self._trunk_list(req_nb, trunk) break # A load balanced SEA requires at least a primary adapter and at least # one additional adapter. We can't remove a trunk from the SEA if it # is load balanced, but only has a single additional can_remove_for_lb = (len(req_nb.seas[0].addl_adpts) > 1 if req_nb.load_balance else True) for matching_ta in matching_tas: if len(matching_ta.tagged_vlans) == 1 and can_remove_for_lb: # Last VLAN, so it can be removed from the SEA. for sea in req_nb.seas: if matching_ta in sea.addl_adpts: sea.addl_adpts.remove(matching_ta) break else: # Otherwise, we just remove it from the list. matching_ta.tagged_vlans.remove(vlan_id) def _find_peer_trunk(self, nb, ta): """Finds the peer adapter when the network bridge is failover ready. When a Network Bridge is set up for failover, there are two SEAs. Each is essentially a mirror of each other, but are on different Virtual I/O Servers. This means identical Trunk Adapters - but a different physical adapters. This method finds the 'peer' adapter, that happens to be on a different I/O Server. :param nb: The network bridge wrapper. :param ta: The Trunk Adapter from the first SEA in the network bridge. :return: The peer adapter per the above criteria. If the network bridge is not set up for failover, then None is returned. """ if len(nb.seas) <= 1: return None sea = nb.seas[1] if sea.primary_adpt.pvid == ta.pvid: return sea.primary_adpt for addl_adpt in sea.addl_adpts: if addl_adpt.pvid == ta.pvid: return addl_adpt return None def _trunk_list(self, nb, ta): """For a given trunk adapter, builds the list of trunks to modify. :param nb: The network bridge wrapper. :param ta: The Trunk Adapter from the first SEA in the network bridge. :return: List of trunk adapters. Includes the peer. If no peer, then only one element is returned in the list. """ peer = self._find_peer_trunk(nb, ta) if peer: return [ta, peer] else: return [ta] def _find_available_trunks(self, nb): """Will return a list of Trunk Adapters that can support a new VLAN. Finds the set of trunk adapters with the lowest number of VLANs on it. :param nb: The NetBridge to search through. :return: A set of trunk adapters that can support the new VLAN. A set is returned as there may be multiple Virtual I/O Servers that support it. Each I/O Server may have a trunk to update. If No available Trunk Adapters are found, then None is returned. """ # Find a trunk with the lowest amount of VLANs on it. cur_min = None avail_count = 0 for trunk in nb.seas[0].addl_adpts: # If this trunk has maxed out its VLANs, skip to next. if len(trunk.tagged_vlans) >= _MAX_VLANS_PER_VEA: continue # This could definitely support it... avail_count += 1 # But, is it the best? if (cur_min is None or len(trunk.tagged_vlans) < len(cur_min.tagged_vlans)): cur_min = trunk # If load balancing is turned on, we have some further inspection to # do. # # When load balancing is enabled, the goal is to have the VLANs spread # evenly across the 'additional trunk adapters'. So you create pair # and add VLANs back and forth between them. if nb.load_balance and cur_min is not None: # If there is only one set of trunk adapters available, but we have # an 'odd' amount of trunk adapters, that signals that we need to # create a new Trunk Adapter because we're unbalanced. Returning # None will flag to create a new Trunk Adapter to put the VLAN on. # # Being unbalanced will naturally occur once a pair is full of # VLANs. In that case, the cur_min would have been None and the # code would have created a single set of Trunk Adapters for that # new VLAN. The code does not want to create the pair at that time # because an empty Trunk Adapter supporting nothing is a waste. So # this code only gets used when the second VLAN is being added, # thus balancing the Trunk Adapters again. if avail_count == 1 and len(nb.seas[0].addl_adpts) % 2 == 1: return None # Return the trunk list if we have a trunk adapter, otherwise just # return None return self._trunk_list(nb, cur_min) if cur_min is not None else None pypowervm-1.1.24/pypowervm/tasks/vfc_mapper.py0000664000175000017500000010423313571367171021144 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Specialized tasks for NPIV World-Wide Port Names (WWPNs).""" from oslo_log import log as logging from pypowervm import const as c from pypowervm import exceptions as e from pypowervm.i18n import _ from pypowervm import util as u from pypowervm.utils import uuid from pypowervm.wrappers import base_partition as bp from pypowervm.wrappers import job as pvm_job from pypowervm.wrappers import managed_system as pvm_ms from pypowervm.wrappers import virtual_io_server as pvm_vios import six LOG = logging.getLogger(__name__) _ANY_WWPN = '-1' _FUSED_ANY_WWPN = '-1 -1' _GET_NEXT_WWPNS = 'GetNextWWPNs' def build_wwpn_pair(adapter, host_uuid, pair_count=1): """Builds a WWPN pair that can be used for a VirtualFCAdapter. Note: The API will only generate up to 8 pairs at a time. Any more will cause the API to raise an error. :param adapter: The adapter to talk over the API. :param host_uuid: The host system for the generation. :param pair_count: (Optional, Default: 1) The number of WWPN pairs to generate. Can not be more than 8 or else the API will fail. :return: Non-mutable WWPN Pairs (list) """ # Build up the job & invoke resp = adapter.read( pvm_ms.System.schema_type, root_id=host_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_GET_NEXT_WWPNS) job_w = pvm_job.Job.wrap(resp) job_p = [job_w.create_job_parameter('numberPairsRequested', str(pair_count))] job_w.run_job(host_uuid, job_parms=job_p) # Get the job result, and parse the output. job_result = job_w.get_job_results_as_dict() return job_result['wwpnList'].split(',') def find_vios_for_wwpn(vios_wraps, p_port_wwpn): """Will find the VIOS that has a PhysFCPort for the p_port_wwpn. :param vios_wraps: A list or set of VIOS wrappers. :param p_port_wwpn: The physical port's WWPN. :return: The VIOS wrapper that contains a physical port with the WWPN. If there is not one, then None will be returned. :return: The port (which is a PhysFCPort wrapper) on the VIOS wrapper that represents the physical port. """ # Sanitize our input s_p_port_wwpn = u.sanitize_wwpn_for_api(p_port_wwpn) for vios_w in vios_wraps: for port in vios_w.pfc_ports: # No need to sanitize the API WWPN, it comes from the API. if u.sanitize_wwpn_for_api(port.wwpn) == s_p_port_wwpn: return vios_w, port return None, None def find_vios_for_vfc_wwpns(vios_wraps, vfc_wwpns): """Will find the VIOS that is hosting the vfc_wwpns. :param vios_wraps: A list or set of VIOS wrappers. :param vfc_wwpns: The list or set of virtual fibre channel WWPNs. :return: The VIOS wrapper that supports the vfc adapters. If there is not one, then None will be returned. :return: The VFCMapping on the VIOS that supports the client adapters. """ # Sanitize our input vfc_wwpns = {u.sanitize_wwpn_for_api(x) for x in vfc_wwpns} for vios_w in vios_wraps: for vfc_map in vios_w.vfc_mappings: # If the map has no client adapter...then move on if not vfc_map.client_adapter: continue # Maps without backing ports are effectively stale. We shouldn't # consider them. if vfc_map.backing_port is None: continue # If the WWPNs match, return it if vfc_wwpns == set(vfc_map.client_adapter.wwpns): return vios_w, vfc_map return None, None def intersect_wwpns(wwpn_set1, wwpn_set2): """Will return the intersection of WWPNs between the two sets. :param wwpn_set1: A list of WWPNs. :param wwpn_set2: A list of WWPNs. :return: The intersection of the WWPNs. Will maintain the WWPN format of wwpn_set1, but the comparison done will be agnostic of formats (ex. colons and/or upper/lower case). """ wwpn_set2 = [u.sanitize_wwpn_for_api(x) for x in wwpn_set2] return [y for y in wwpn_set1 if u.sanitize_wwpn_for_api(y) in wwpn_set2] def derive_base_npiv_map(vios_wraps, p_port_wwpns, v_port_count): """Builds a blank NPIV port mapping, without any known vFC WWPNs. This method is functionally similar to the derive_npiv_map. However, the derive_npiv_map method assumes knowledge of the Virtual Fibre Channel mappings beforehand. This method will generate a similar map, but when sent to the add_map method, that method will allow the API to generate the globally unique WWPNs rather than pre-seeding them. :param vios_wraps: A list of VIOS wrappers. Can be built using the extended attribute group (xag) of VIO_FMAP. :param p_port_wwpns: A list of the WWPNs (strings) that can be used to map the ports to. These WWPNs should reside on Physical FC Ports on the VIOS wrappers that were passed in. :param v_port_count: The number of virtual ports to create. :return: A list of sets. The format will be similar to that of the derive_npiv_map method. However, instead of a fused_vfc_port_wwpn a marker will be used to indicate that the API should generate the WWPN. """ # Double the count of the markers. Should result in -1 -1 as the WWPN. v_port_markers = [_ANY_WWPN] * v_port_count * 2 return derive_npiv_map(vios_wraps, p_port_wwpns, v_port_markers) def derive_npiv_map(vios_wraps, p_port_wwpns, v_port_wwpns, preserve=True): """This method will derive a NPIV map. A NPIV map is the linkage between an NPIV virtual FC Port and the backing physical port. Two v_port_wwpns get tied to an individual p_port_wwpn. A list of the 'mappings' will be returned. One per pair of v_port_wwpns. The mappings will first attempt to spread across the VIOSes. Within each VIOS, the port with the most available free NPIV ports will be selected. There are scenarios where ports on a single VIOS could be reused. - 4 v_port_wwpns, all p_port_wwpns reside on single VIOS - 8 v_port_wwpns, only two VIOSes - Etc... In these scenarios, the ports will be spread such that they're running across all the physical ports (that were passed in) on a given VIOS. In even rarer scenarios, the same physical port may be re-used if the v_port_wwpn pairs exceed the total number of p_port_wwpns. :param vios_wraps: A list of VIOS wrappers. Can be built using the extended attribute group (xag) of VIO_FMAP. :param p_port_wwpns: A list of the WWPNs (strings) that can be used to map the ports to. These WWPNs should reside on Physical FC Ports on the VIOS wrappers that were passed in. :param v_port_wwpns: A list of the virtual fibre channel port WWPNs. Must be an even number of ports. :param preserve: (Optional, Default=True) If True, existing mappings with matching virtual fibre channel ports are preserved. Else new mappings are generated. :return: A list of tuples representing both new and preserved mappings. The format will be: [ (p_port_wwpn1, fused_vfc_port_wwpn1), (p_port_wwpn2, fused_vfc_port_wwpn2), etc... ] A 'fused_vfc_port_wwpn' is simply taking two v_port_wwpns, sanitizing them and then putting them into a single string separated by a space. """ # Fuse all the v_port_wwpns together. fused_v_port_wwpns = _fuse_vfc_ports(v_port_wwpns) # Up front sanitization of all the p_port_wwpns p_port_wwpns = list(map(u.sanitize_wwpn_for_api, p_port_wwpns)) existing_maps = [] new_fused_wwpns = [] # Detect if any mappings already exist on the system. for fused_v_wwpn in fused_v_port_wwpns: # If the mapping already exists, then add it to the existing maps. vfc_map = has_client_wwpns(vios_wraps, fused_v_wwpn.split(" "))[1] # Preserve an existing mapping if preserve=True. Otherwise, the # backing_port may not be set and this is not an error condition if # the vfc mapping is getting rebuilt. if vfc_map is not None and preserve: # Maps without backing ports are effectively stale. We shouldn't # need to preserve them. if vfc_map.backing_port is not None: mapping = (vfc_map.backing_port.wwpn, fused_v_wwpn) existing_maps.append(mapping) else: new_fused_wwpns.append(fused_v_wwpn) LOG.debug("Add new map for client wwpns %s. Existing map=%s, " "preserve=%s", fused_v_wwpn, vfc_map, preserve) return _derive_npiv_map( vios_wraps, new_fused_wwpns, p_port_wwpns, existing_maps) def _derive_npiv_map(vios_wraps, new_fused_wwpns, p_port_wwpns, existing_maps): # Determine how many mappings are needed. needed_maps = len(new_fused_wwpns) newly_built_maps = [] next_vio_pos = 0 fuse_map_pos = 0 loops_since_last_add = 0 # This loop will continue through each VIOS (first set of load balancing # should be done by VIOS) and if there are ports on that VIOS, will add # them to the mapping. # # There is a rate limiter here though. If none of the VIOSes are servicing # the request, then this has potential to be infinite loop. The rate # limiter detects such a scenario and will prevent it from occurring. In # these cases the UnableToFindFCPortMap exception is raised. # # As such, limit it such that if no VIOS services the request, we break # out of the loop and throw error. while len(newly_built_maps) < needed_maps: # Walk through each VIOS. vio = vios_wraps[next_vio_pos] loops_since_last_add += 1 # If we've been looping more than the VIOS count, we need to exit. # Something has gone amuck. if loops_since_last_add > len(vios_wraps): raise e.UnableToFindFCPortMap() # This increments the VIOS position for the next loop next_vio_pos = (next_vio_pos + 1) % len(vios_wraps) # Find the FC Ports that are on this system. potential_ports = _find_ports_on_vio(vio, p_port_wwpns) if len(potential_ports) == 0: # No ports on this VIOS. Continue to next. continue # Next, from the potential ports, find the PhysFCPort that we should # use for the mapping. new_map_port = _find_map_port(potential_ports, newly_built_maps + existing_maps) if new_map_port is None: # If there was no mapping port, then we should continue on to # the next VIOS. continue # Add the mapping! mapping = (new_map_port.wwpn, new_fused_wwpns[fuse_map_pos]) fuse_map_pos += 1 newly_built_maps.append(mapping) loops_since_last_add = 0 # Mesh together the existing mapping lists plus the newly built ports. return newly_built_maps + existing_maps def _find_map_port(potential_ports, mappings): """Will determine which port to use for a new mapping. :param potential_ports: List of PhysFCPort wrappers that are candidate ports. :param mappings: The existing mappings, as generated by derive_npiv_map. :return: The PhysFCPort that should be used for this mapping. """ # The first thing that we need is to understand how many physical ports # have been used by mappings already. This is important for the scenarios # where we're looping across the same set of physical ports on this VIOS. # We should avoid reusing those same physical ports as our previous # mappings as much as possible, to allow for as much physical multi pathing # as possible. # # This dictionary will have a key of every port's WWPN, within that it will # have a dictionary which contains 'port_mapping_use' and then the 'port' # which is the port passed in. port_dict = dict((p.wwpn, {'port_mapping_use': 0, 'port': p}) for p in potential_ports) for mapping in mappings: p_wwpn = mapping[0] # If this physical WWPN is not in our port_dict, then we know that # this port is not on the VIOS. Therefore, we can't take it into # consideration. if p_wwpn not in port_dict.keys(): continue # Increment our counter to indicate that a previous mapping already # has used this physical port. port_dict[p_wwpn]['port_mapping_use'] += 1 # Now find the set of ports with the lowest count of usage by mappings. # The first time through this method, this will be all the physical ports. # This is only interesting once the previous mappings come into play. # This is where we reduce the 'candidate physical ports' down to those # that are least used by our existing mappings. # # There is a reasonable upper limit of 128 mappings (which should be # beyond what admins will want to map vFC's to a single pFC). We simply # put that in to avoid infinite loops in the extremely, almost unimaginable # event that we've reached an upper boundary. :-) # # Subsequent logic should be OK with this, as we simply will return None # for the next available port. starting_count = 0 list_of_cand_ports = [] while len(list_of_cand_ports) == 0 and starting_count < 128: for port_info in port_dict.values(): if port_info['port_mapping_use'] == starting_count: list_of_cand_ports.append(port_info['port']) # Increment the count, in case we have to loop again. starting_count += 1 # At this point, the list_of_cand_ports is essentially a list of ports # least used by THIS mapping. Now, we need to narrow that down to the # port that has the most npiv_available_ports. The one with the most # available ports is the least used. Therefore the best candidate (that # we can choose with limited info). high_avail_port = None for port in list_of_cand_ports: # If this is the first port, or this new port has more available NPIV # slots, then use that. if (high_avail_port is None or high_avail_port.npiv_available_ports < port.npiv_available_ports): high_avail_port = port return high_avail_port def _find_ports_on_vio(vio_w, p_port_wwpns): """Will return a list of Physical FC Ports on the vio_w. :param vio_w: The VIOS wrapper. :param p_port_wwpns: The list of all physical ports. May exceed the ports on the VIOS. :return: List of the physical FC Port wrappers that are on the VIOS for the WWPNs that exist on this system. """ return [port for port in vio_w.pfc_ports if u.sanitize_wwpn_for_api(port.wwpn) in p_port_wwpns] def _fuse_vfc_ports(wwpn_list): """Returns a list of fused VFC WWPNs. See derive_npiv_map.""" l = list(map(u.sanitize_wwpn_for_api, wwpn_list)) return list(map(' '.join, zip(l[::2], l[1::2]))) def find_pfc_wwpn_by_name(vios_w, pfc_name): """Returns the physical port wwpn within a VIOS based off the FC port name. :param vios_w: VIOS wrapper. :param pfc_name: The physical fibre channel port name. """ for port in vios_w.pfc_ports: if port.name == pfc_name: return port.wwpn return None def find_maps(mapping_list, client_lpar_id, client_adpt=None, port_map=None): """Filter a list of VFC mappings by LPAR ID. This is based on scsi_mapper.find_maps, but does not yet provide all the same functionality. :param mapping_list: The mappings to filter. Iterable of VFCMapping. :param client_lpar_id: Integer short ID or string UUID of the LPAR on the client side of the mapping. Note that the UUID form relies on the presence of the client_lpar_href field. Some mappings lack this field, and would therefore be ignored. :param client_adpt: (Optional, Default=None) If set, will only include the mapping if the client adapter's WWPNs match as well. :param port_map: (Optional, Default=None) If set, will look for a matching mapping based off the client WWPNs as specified by the port mapping. The format of this is defined by the derive_npiv_map method. :return: A list comprising the subset of the input mapping_list whose client LPAR IDs match client_lpar_id. """ is_uuid, client_id = uuid.id_or_uuid(client_lpar_id) matching_maps = [] if port_map: v_wwpns = [u.sanitize_wwpn_for_api(x) for x in port_map[1].split()] for vfc_map in mapping_list: # If to a different VM, continue on. href = vfc_map.client_lpar_href if is_uuid and (not href or client_id != u.get_req_path_uuid( href, preserve_case=True)): continue elif not is_uuid and vfc_map.server_adapter.lpar_id != client_id: # Use the server adapter ^^ in case this is an orphan. continue # If there is a client adapter, and it is not a 'ANY WWPN', then # check to see if the mappings match. if client_adpt and client_adpt.wwpns != {_ANY_WWPN}: # If they passed in a client adapter, but the map doesn't have # one, then we have to ignore if not vfc_map.client_adapter: continue # Check to make sure the WWPNs between the two match. This should # be an order independence check (as this query shouldn't care... # but the API itself does care about order). if set(client_adpt.wwpns) != set(vfc_map.client_adapter.wwpns): continue # If the user had a port map, do the virtual WWPNs from that port # map match the client adapter wwpn map. if port_map: if vfc_map.client_adapter is None: continue # If it is a new mapping with generated WWPNs, then the client # adapter can't have WWPNs. if v_wwpns == [_ANY_WWPN, _ANY_WWPN]: if vfc_map.client_adapter.wwpns != []: continue elif set(vfc_map.client_adapter.wwpns) != set(v_wwpns): continue # Found a match! matching_maps.append(vfc_map) return matching_maps def remove_maps(v_wrap, client_lpar_id, client_adpt=None, port_map=None): """Remove one or more VFC mappings from a VIOS wrapper. The changes are not flushed back to the REST server. :param v_wrap: VIOS EntryWrapper representing the Virtual I/O Server whose VFC mappings are to be updated. :param client_lpar_id: The integer short ID or string UUID of the client VM :param client_adpt: (Optional, Default=None) If set, will only add the mapping if the client adapter's WWPNs match as well. :param port_map: (Optional, Default=None) If set, will look for a matching mapping based off the client WWPNs as specified by the port mapping. The format of this is defined by the derive_npiv_map method. :return: The mappings removed from the VIOS wrapper. """ resp_list = [] for matching_map in find_maps(v_wrap.vfc_mappings, client_lpar_id, client_adpt=client_adpt, port_map=port_map): v_wrap.vfc_mappings.remove(matching_map) resp_list.append(matching_map) return resp_list def find_vios_for_port_map(vios_wraps, port_map): """Finds the appropriate VIOS wrapper for a given port map. Note that the algorithm first checks based off of the client WWPNs. If the client WWPNs can not be found (perhaps the map is still -1 -1 from the derive_base_npiv_map) then the physical port WWPN will be checked. :param vios_wraps: A list of Virtual I/O Server wrapper objects. :param port_map: The port mapping (as defined by the derive_npiv_map method). :return: The Virtual I/O Server wrapper that supports the port map. """ # Check first based off the client WWPNs. Note that this may be -1 -1 # in which case it will return nothing vios_w = find_vios_for_vfc_wwpns(vios_wraps, port_map[1].split())[0] if vios_w: return vios_w # If we had nothing, check based off the physical port WWPN. The # reason this is not the first check is because the mapping may be mid # live migration, thus pointing to a source. But if that was the case # then the first check would have returned the right WWPNs. The only # time this should be hit is in the middle of a create operation. return find_vios_for_wwpn(vios_wraps, port_map[0])[0] def add_map(vios_w, host_uuid, lpar_uuid, port_map, error_if_invalid=True, lpar_slot_num=None): """Adds a vFC mapping to a given VIOS wrapper. These changes are not flushed back to the REST server. The wrapper itself is simply modified. :param vios_w: VIOS EntryWrapper representing the Virtual I/O Server whose VFC mappings are to be updated. :param host_uuid: The pypowervm UUID of the host. :param lpar_uuid: The pypowervm UUID of the client LPAR to attach to. :param port_map: The port mapping (as defined by the derive_npiv_map method). :param error_if_invalid: (Optional, Default: True) If the port mapping physical port can not be found, raise an error. :param lpar_slot_num: (Optional, Default: None) The client adapter VirtualSlotNumber to be set. If None the next available slot would be used. :return: The VFCMapping that was added or updated with a missing backing port. If the mapping already existed then None is returned. """ # This is meant to find the physical port. Can run against a single # element. We assume invoker has passed correct VIOS. new_vios_w, p_port = find_vios_for_wwpn([vios_w], port_map[0]) if new_vios_w is None: if error_if_invalid: # Log the payload in the response. LOG.warning(_("Unable to find appropriate VIOS. The payload " "provided was likely insufficient. The payload " "data is:\n %s)"), vios_w.toxmlstring(pretty=True)) raise e.UnableToDerivePhysicalPortForNPIV(wwpn=port_map[0], vio_uri=vios_w.href) else: return None v_wwpns = None if port_map[1] != _FUSED_ANY_WWPN: v_wwpns = [u.sanitize_wwpn_for_api(x) for x in port_map[1].split()] if v_wwpns is not None: for vfc_map in vios_w.vfc_mappings: if (vfc_map.client_adapter is None or vfc_map.client_adapter.wwpns is None): continue if set(vfc_map.client_adapter.wwpns) != set(v_wwpns): continue # If we reach this point, we know that we have a matching map. # Check that the physical port is set in the mapping. if vfc_map.backing_port: LOG.debug("Matching existing vfc map found with backing port:" " %s", vfc_map.backing_port.wwpn) # The attach of this volume, for this vFC mapping is complete. # Nothing else needs to be done, exit the method. return None else: LOG.info(_("The matched VFC port map has no backing port set." " Adding %(port)s to mapping for client wwpns: " "%(wwpns)s"), {'port': p_port.name, 'wwpns': v_wwpns}) # Build the backing_port and add it to the vfc_map. vfc_map.backing_port = bp.PhysFCPort.bld_ref( vios_w.adapter, p_port.name, ref_tag='Port') return vfc_map # However, if we hit here, then we need to create a new mapping and # attach it to the VIOS mapping vfc_map = pvm_vios.VFCMapping.bld(vios_w.adapter, host_uuid, lpar_uuid, p_port.name, client_wwpns=v_wwpns, lpar_slot_num=lpar_slot_num) vios_w.vfc_mappings.append(vfc_map) return vfc_map def has_client_wwpns(vios_wraps, client_wwpn_pair): """Returns the vios wrapper and vfc map if the client WWPNs already exist. :param vios_wraps: The VIOS wrappers. Should be queried with the VIO_FMAP extended attribute. :param client_wwpn_pair: The pair (list or set) of the client WWPNs. :return vios_w: The VIOS wrapper containing the wwpn pair. None if none of the wrappers contain the pair. :return vfc_map: The mapping containing the client pair. May be None. """ client_wwpn_pair = set([u.sanitize_wwpn_for_api(x) for x in client_wwpn_pair]) for vios_wrap in vios_wraps: for vfc_map in vios_wrap.vfc_mappings: if vfc_map.client_adapter is None: continue pair = set([u.sanitize_wwpn_for_api(x) for x in vfc_map.client_adapter.wwpns]) if pair == client_wwpn_pair: return vios_wrap, vfc_map return None, None def build_migration_mappings_for_fabric(vios_wraps, p_port_wwpns, client_slots): """Builds the vFC migration mappings for a given fabric. This method will build the migration mappings for a given fabric. The response is a list of strings that can be used in the migration.py Note: If you have multiple fabrics, then each fabric will need to independently call this method with the appropriate p_port_wwpns. Note: This must be run on the destination server before the migration. It is typically input back to the source server for the migration call. :param vios_wraps: The VIOS wrappers for the target system. Must have the VIO_FMAP xag specified. :param p_port_wwpns: The physical port WWPNs that can be used for this specific fabric. May span multiple VIOSes, but each must be part of the vios_wraps. :param client_slots: A list of integers which represent the *source* system's LPAR virtual Fibre Channel slots that are participating in this fabric. :return: List of mappings that can be passed into the migration.py for the live migration. The format is defined within the migration.py, migrate_lpar method. """ basic_mappings = derive_base_npiv_map(vios_wraps, p_port_wwpns, len(client_slots)) resp = [] for basic_map, client_slot in zip(basic_mappings, client_slots): # Find the appropriate VIOS hosting this physical port. vios_w, port = find_vios_for_wwpn(vios_wraps, basic_map[0]) # The format is: # virtual-slot-number/vios-lpar-name/vios-lpar-ID # [/[vios-virtual-slot-number][/[vios-fc-port-name]]] # # We do not specify the vios-virtual-slot-number. resp.append(str(client_slot) + "/" + vios_w.name + "/" + str(vios_w.id) + "//" + port.name) return resp def _split_ports_per_fabric(slot_grouping, fabric_data): """Splits the slots per fabric which are to be placed on the same VIOS. :param slot_grouping: The slots which are to be placed in the same vios Ex: [3, 6] Here the slots 3 and 6 are to be placed on the same vios. :param fabric_data: Dictionary where the key is the fabric name. The value is another dictionary with the slots and the p_port_wwpns. Ex: { 'A': {'slots': [3, 4, 5], p_port_wwpns: [1, 2, 3] }, 'B': {'slots': [6, 7, 8], p_port_wwpns: [4, 5] } } The slot indicates which slots from the client slots align with which fabric. :return resp: The slots which can be placed on the same VIOS alone are returned. {'A': {'slots': [3], p_port_wwpns: [1, 2, 3] }, 'B': {'slots': [6], p_port_wwpns: [4, 5] } } """ resp = {} for fabric in fabric_data: slots = [x for x in fabric_data[fabric]['slots'] if x in slot_grouping] if not slots: continue resp[fabric] = {'slots': slots, 'p_port_wwpns': fabric_data[fabric]['p_port_wwpns']} return resp def _does_vios_support_split_map(vios_w, split_map): """Split_map provided by _split_ports_per_fabric. :param vios_w: The VIOS wrapper to validate if the split map can match :param split_map: This contains the physical ports and the slots to be matched on the VIOS Ex: {'A': {'p_port_wwpns': ['phy_port1'], 'slots': [3]} Check if the number of slots required can be satisfied by the given VIOS. """ for fabric_map in six.itervalues(split_map): needed_pports = len(fabric_map['slots']) fabric_pports_on_vios = _find_ports_on_vio( vios_w, fabric_map['p_port_wwpns']) if len(fabric_pports_on_vios) < needed_pports: return False return True def build_migration_mappings(vios_wraps, fabric_data, slot_peers): """Builds the vFC migration mappings. Looks holistically at the system. Should generally be used instead of build_migration_mappings_for_fabric. :param vios_wraps: The VIOS wrappers for the target system. Must have the VIO_FMAP xag specified. :param fabric_data: Dictionary where the key is the fabric name. The value is another dictionary with the slots and the p_port_wwpns. Ex: { 'A': {'slots': [3, 4, 5], p_port_wwpns: [1, 2, 3] }, 'B': {'slots': [6, 7, 8], p_port_wwpns: [4, 5] } } The slot indicates which slots from the client slots align with which fabric. :param slot_peers: An array of arrays. Indicates all of the slots that need to be grouped together on a given VIOS. Ex: [ [3, 6, 7], [4, 8], [5] ] Indicates that (based on the data from fabric_data) one VIOS must host client slot 3 (from fabric A) and slots 6 and 7 (from fabric B). Then another VIOS must host client slot 4 (from fabric A) and client slot 8 (from fabric B). And the third VIOS must only host client slot 5 (from fabric A). :return: List of mappings that can be passed into the migration.py for the live migration. The format is defined within the migration.py, migrate_lpar method. """ # First sort the slot peers. The one with the most peers needs to go first # then work down from there. slot_peers = sorted(slot_peers, key=len, reverse=True) vios_to_split_map = {} # We create a map of all the VIOSes and their corresponding slots for peer_grouping in slot_peers: split_map = _split_ports_per_fabric(peer_grouping, fabric_data) LOG.debug("split_map %s" % split_map) for vios_w in vios_wraps: LOG.debug("Checking vios name %(name)s vios fc ports %(port)s" % dict(name=vios_w.name, port=[port.wwpn for port in vios_w.pfc_ports])) if vios_w in vios_to_split_map.keys(): continue if not _does_vios_support_split_map(vios_w, split_map): continue vios_to_split_map[vios_w] = split_map break else: # When no vios match is found for peer group error raise e.UnableToFindFCPortMap() LOG.debug("vios_to_split_map %s" % vios_to_split_map) resp = [] # Each VIOS has a split map. the split map contains the fabric (as # the key), the physical port wwpns, and the slots. for vios_w, split_map in six.iteritems(vios_to_split_map): for fabric_map in six.itervalues(split_map): p_port_wwpns = fabric_map['p_port_wwpns'] slots = fabric_map['slots'] basic_mappings = derive_base_npiv_map([vios_w], p_port_wwpns, len(slots)) for basic_map, client_slot in zip(basic_mappings, slots): # Find the appropriate VIOS hosting this physical port. vios_w, port = find_vios_for_wwpn([vios_w], basic_map[0]) # The format is: # virtual-slot-number/vios-lpar-name/vios-lpar-ID # [/[vios-virtual-slot-number][/[vios-fc-port-name]]] # # We do not specify the vios-virtual-slot-number. items = (str(client_slot), vios_w.name, str(vios_w.id), '', port.name) resp.append("/".join(items)) return resp pypowervm-1.1.24/pypowervm/tasks/scsi_mapper.py0000664000175000017500000011037413571367171021332 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Manage mappings of virtual storage devices from VIOS to LPAR.""" from oslo_concurrency import lockutils as lock from oslo_log import log as logging from pypowervm import const as c from pypowervm import exceptions as exc from pypowervm.i18n import _ from pypowervm import util from pypowervm.utils import retry as pvm_retry from pypowervm.utils import uuid from pypowervm.wrappers import storage as pvm_stor from pypowervm.wrappers import virtual_io_server as pvm_vios LOG = logging.getLogger(__name__) def _argmod(this_try, max_tries, *args, **kwargs): """Retry argmod to change 'vios' arg from VIOS wrapper to a string UUID. This is so that etag mismatches trigger a fresh GET. """ LOG.warning(_('Retrying modification of SCSI Mapping.')) argl = list(args) # Second argument is vios. if isinstance(argl[1], pvm_vios.VIOS): argl[1] = argl[1].uuid return argl, kwargs @lock.synchronized('vscsi_mapping') @pvm_retry.retry(tries=60, argmod_func=_argmod, delay_func=pvm_retry.STEPPED_RANDOM_DELAY) def add_vscsi_mapping(host_uuid, vios, lpar_uuid, storage_elem, fuse_limit=32, lpar_slot_num=None, lua=None): """Will add a vSCSI mapping to a Virtual I/O Server. This method is used to connect a storage element (either a vDisk, vOpt, PV or LU) that resides on a Virtual I/O Server to a Virtual Machine. This is achieved using a 'vSCSI Mapping'. The invoker does not need to interact with the mapping. A given mapping is essentially a 'vSCSI bus', which can host multiple storage elements. This method has a fuse limit which throttles the number of devices on a given vSCSI bus. The throttle should be lower if the storage elements are high I/O, and higher otherwise. :param host_uuid: Not used. :param vios: The virtual I/O server to which the mapping should be added. This may be the VIOS's UUID string OR an existing VIOS EntryWrapper. If the latter, it must have been retrieved using the VIO_SMAP extended attribute group. :param lpar_uuid: The UUID of the LPAR that will have the connected storage. :param storage_elem: The storage element (either a vDisk, vOpt, LU or PV) that is to be connected. :param fuse_limit: (Optional, Default: 32) The max number of devices to allow on one scsi bus before creating a second SCSI bus. :param lpar_slot_num: (Optional, Default: None) The slot number for the client LPAR to use in the mapping. If None, the next available slot number is assigned by the server. :param lua: (Optional. Default: None) Logical Unit Address to set on the TargetDevice. If None, the LUA will be assigned by the server. Should be specified for all of the VSCSIMappings for a particular bus, or none of them. :return: The VIOS wrapper representing the updated Virtual I/O Server. This is current with respect to etag and SCSI mappings. """ adapter = storage_elem.adapter # If the 'vios' param is a string UUID, retrieve the VIOS wrapper. if not isinstance(vios, pvm_vios.VIOS): vios_w = pvm_vios.VIOS.wrap( adapter.read(pvm_vios.VIOS.schema_type, root_id=vios, xag=[c.XAG.VIO_SMAP])) else: vios_w = vios # If the storage element is already there, do nothing. if find_maps(vios_w.scsi_mappings, client_lpar_id=lpar_uuid, stg_elem=storage_elem): LOG.info(_("Found existing mapping of %(stg_type)s storage element " "%(stg_name)s from Virtual I/O Server %(vios_name)s to " "client LPAR %(lpar_uuid)s."), {'stg_type': storage_elem.schema_type, 'stg_name': storage_elem.name, 'vios_name': vios_w.name, 'lpar_uuid': lpar_uuid}) return vios_w # Build the mapping. scsi_map = build_vscsi_mapping(None, vios_w, lpar_uuid, storage_elem, fuse_limit=fuse_limit, lpar_slot_num=lpar_slot_num, lua=lua) # Add the mapping. It may have been updated to have a different client # and server adapter. It may be the original (which creates a new client # and server pair). vios_w.scsi_mappings.append(scsi_map) LOG.info(_("Creating mapping of %(stg_type)s storage element %(stg_name)s " "from Virtual I/O Server %(vios_name)s to client LPAR " "%(lpar_uuid)s."), {'stg_type': storage_elem.schema_type, 'stg_name': storage_elem.name, 'vios_name': vios_w.name, 'lpar_uuid': lpar_uuid}) return vios_w.update() def build_vscsi_mapping(host_uuid, vios_w, lpar_uuid, storage_elem, fuse_limit=32, lpar_slot_num=None, lua=None, target_name=None): """Will build a vSCSI mapping that can be added to a VIOS. This method is used to create a mapping element (for either a vDisk, vOpt, PV or LU) that connects a Virtual I/O Server to a LPAR. A given mapping is essentially a 'vSCSI bus', which can host multiple storage elements. This method has a fuse limit which throttles the number of devices on a given vSCSI bus. The throttle should be lower if the storage elements are high I/O, and higher otherwise. :param host_uuid: Not used. :param vios_w: The virtual I/O server wrapper that the mapping is intended to be attached to. The method will call the update against the API. It will only update the in memory wrapper. :param lpar_uuid: The UUID of the LPAR that will have the connected storage. :param storage_elem: The storage element (either a vDisk, vOpt, LU or PV) that is to be connected. :param fuse_limit: (Optional, Default: 32) The max number of devices to allow on one scsi bus before creating a second SCSI bus. :param lpar_slot_num: (Optional, Default: None) The slot number for the client LPAR to use in the mapping. If None, the next available slot number is assigned by the server. :param lua: (Optional. Default: None) Logical Unit Address to set on the TargetDevice. If None, the LUA will be assigned by the server. Should be specified for all of the VSCSIMappings for a particular bus, or none of them. :param target_name: (Optional, Default: None) The name of the Target mapping. If None, the target_name will be assigned by the server. :return: The SCSI mapping that can be added to the vios_w. This does not do any updates to the wrapper itself. """ adapter = storage_elem.adapter # Get the client lpar href lpar_href = pvm_vios.VSCSIMapping.crt_related_href(adapter, None, lpar_uuid) # Separate out the mappings into the applicable ones for this client. separated_mappings = _separate_mappings(vios_w, lpar_href) # Used if we need to clone an existing mapping clonable_map = None # What we need to figure out is, within the existing mappings, can we # reuse the existing client and server adapter (which we can only do if # below the fuse limit), or if we need to create a new adapter pair. for mapping_list in separated_mappings.values(): if len(mapping_list) < fuse_limit: # Swap in the first maps client/server adapters into the existing # map. We call the semi-private methods as this is not something # that an 'update' would do...this is part of the 'create' flow. clonable_map = mapping_list[0] break # If we have a clonable map, we can replicate that. Otherwise we need # to build from scratch. if clonable_map is not None: scsi_map = pvm_vios.VSCSIMapping.bld_from_existing( clonable_map, storage_elem, lpar_slot_num=lpar_slot_num, lua=lua, target_name=target_name) else: scsi_map = pvm_vios.VSCSIMapping.bld( adapter, None, lpar_uuid, storage_elem, lpar_slot_num=lpar_slot_num, lua=lua, target_name=target_name) return scsi_map def _separate_mappings(vios_w, client_href): """Separates out the systems existing mappings into silos. :param vios_w: The pypowervm wrapper for the VIOS. :param client_href: The REST URI of the client to separate the mappings for. May be a ROOT or CHILD URI. :return: A dictionary where the key is the server adapter (which is bound to the client). The value is the list mappings that use the server adapter. """ # The key is server_adapter.udid, the value is the list of applicable # mappings to the server adapter. resp = {} client_lpar_uuid = util.get_req_path_uuid(client_href) existing_mappings = vios_w.scsi_mappings for existing_map in existing_mappings: ex_lpar_uuid = util.get_req_path_uuid( existing_map.client_lpar_href or '') if (ex_lpar_uuid == client_lpar_uuid and # ignore orphaned mappings existing_map.client_adapter is not None): # Valid map to consider key = existing_map.server_adapter.udid if resp.get(key) is None: resp[key] = [] resp[key].append(existing_map) return resp def add_map(vios_w, scsi_mapping): """Will add the mapping to the VIOS wrapper, if not already included. This method has the logic in place to detect if the storage from the mapping is already part of a SCSI mapping. If so, it will not re-add the mapping to the VIOS wrapper. The new mapping is added to the wrapper, but it is up to the invoker to call the update method on the wrapper. :param vios_w: The Virtual I/O Server wrapping to add the mapping to. :param scsi_mapping: The scsi mapping to include in the VIOS. :return: The scsi_mapping that was added. None if the mapping was already on the vios_w. """ # Check to see if the mapping is already in the system. lpar_uuid = util.get_req_path_uuid(scsi_mapping.client_lpar_href, preserve_case=True) existing_mappings = find_maps(vios_w.scsi_mappings, client_lpar_id=lpar_uuid, stg_elem=scsi_mapping.backing_storage) if len(existing_mappings) > 0: return None vios_w.scsi_mappings.append(scsi_mapping) return scsi_mapping def remove_maps(vwrap, client_lpar_id, match_func=None, include_orphans=True): """Remove one or more SCSI mappings from a VIOS wrapper. The changes are not flushed back to the REST server. :param vwrap: VIOS EntryWrapper representing the Virtual I/O Server whose SCSI mappings are to be updated. :param client_lpar_id: The integer short ID or string UUID of the client VM :param match_func: (Optional) Matching function suitable for passing to find_maps. See that method's match_func parameter. Defaults to None (match only on client_lpar_id). :param include_orphans: (Optional) An "orphan" contains a server adapter but no client adapter. If this parameter is True, mappings with no client adapter will be considered for removal. If False, mappings with no client adapter will be left alone, regardless of any other criteria. Default: True (remove orphans). :return: The list of removed mappings. """ resp_list = [] for matching_map in find_maps( vwrap.scsi_mappings, client_lpar_id=client_lpar_id, match_func=match_func, include_orphans=include_orphans): vwrap.scsi_mappings.remove(matching_map) resp_list.append(matching_map) return resp_list def detach_storage(vwrap, client_lpar_id, match_func=None): """Detach the storage from all matching SCSI mappings. We do this by removing the Storage and TargetDevice child elements. This method only updates the vwrap. It does not POST back to the REST server. It does not lock. :param vwrap: VIOS EntryWrapper representing the Virtual I/O Server whose SCSI mappings are to be updated. :param client_lpar_id: The integer short ID or string UUID of the client VM :param match_func: (Optional) Matching function suitable for passing to find_maps. See that method's match_func parameter. Defaults to None (match only on client_lpar_id). :return: The list of SCSI mappings which were modified, in their original (storage-attached) form. """ # Rather than modifying the matching mappings themselves, we remove them # and recreate them without storage. resp_list = [] for match in find_maps( vwrap.scsi_mappings, client_lpar_id=client_lpar_id, match_func=match_func, include_orphans=True): vwrap.scsi_mappings.remove(match) resp_list.append(match) vwrap.scsi_mappings.append( pvm_vios.VSCSIMapping.bld_from_existing(match, None)) return resp_list @lock.synchronized('vscsi_mapping') @pvm_retry.retry(tries=60, argmod_func=_argmod, delay_func=pvm_retry.STEPPED_RANDOM_DELAY) def _modify_storage_elem(adapter, vios, client_lpar_id, match_func, new_media): """Replaces the storage element of a vSCSI mapping. Will change the vSCSI Mapping backing storage element if the match_func indicates that the mapping is a match. The match_func is only invoked if the client_lpar_id matches. If more than one match exists, the VIOS will not update, and exception will be raised. :param adapter: The pypowervm adapter for API communication. :param vios: The virtual I/O server where the mapping is being changed. This may be the VIOS's UUID string OR an existing VIOS EntryWrapper. If the latter, it must have been retrieved using the VIO_SMAP extended attribute group. :param client_lpar_id: The integer short ID or string UUID of the client VM :param match_func: Matching function suitable for passing to find_maps. See that method's match_func parameter. :param new_media: The replacement VOptMedia backing storage element. :return: The VIOS wrapper representing the updated Virtual I/O Server. This is current with respect to etag and SCSI mappings. :return: The SCSI mapping that was remapped. """ # If the 'vios' param is a string UUID, retrieve the VIOS wrapper. if not isinstance(vios, pvm_vios.VIOS): vios = pvm_vios.VIOS.get(adapter, root_id=vios, xag=[c.XAG.VIO_SMAP]) map_modified = find_maps( vios.scsi_mappings, client_lpar_id=client_lpar_id, match_func=match_func, include_orphans=True) new_media_maps = find_maps( vios.scsi_mappings, client_lpar_id=client_lpar_id, match_func=gen_match_func(pvm_stor.VOptMedia, names=[new_media.name])) # Ensure only one map match is returned for current stg element if len(map_modified) != 1: raise exc.SingleMappingNotFoundRemapError( num_mappings=len(map_modified)) # Ensure no mappings already exist for new stg element if len(new_media_maps) > 0: raise exc.StorageMapExistsRemapError( stg_name=new_media.name, lpar_uuid=client_lpar_id) map_modified[0].backing_storage = new_media vios = vios.update() return vios, map_modified[0] @lock.synchronized('vscsi_mapping') @pvm_retry.retry(tries=60, argmod_func=_argmod, delay_func=pvm_retry.STEPPED_RANDOM_DELAY) def _remove_storage_elem(adapter, vios, client_lpar_id, match_func): """Removes the storage element from a SCSI bus and clears out bus. Will remove the vSCSI Mappings from the VIOS if the match_func indicates that the mapping is a match. The match_func is only invoked if the client_lpar_id matches. :param adapter: The pypowervm adapter for API communication. :param vios: The virtual I/O server from which the mapping should be removed. This may be the VIOS's UUID string OR an existing VIOS EntryWrapper. If the latter, it must have been retrieved using the VIO_SMAP extended attribute group. :param client_lpar_id: The integer short ID or string UUID of the client VM :param match_func: Matching function suitable for passing to find_maps. See that method's match_func parameter. :return: The VIOS wrapper representing the updated Virtual I/O Server. This is current with respect to etag and SCSI mappings. :return: The list of the storage elements that were removed from the maps. """ # If the 'vios' param is a string UUID, retrieve the VIOS wrapper. if not isinstance(vios, pvm_vios.VIOS): vios = pvm_vios.VIOS.wrap( adapter.read(pvm_vios.VIOS.schema_type, root_id=vios, xag=[c.XAG.VIO_SMAP])) resp_list = remove_maps(vios, client_lpar_id, match_func=match_func) # Update the VIOS, but only if we actually removed mappings if resp_list: vios = vios.update() # return the (possibly updated) VIOS and the list of removed backing # storage elements. return vios, [rmap.backing_storage for rmap in resp_list if rmap.backing_storage is not None] def gen_match_func(wcls, name_prop='name', names=None, prefixes=None, udids=None): """Generate a matching function for find_maps' match_func param. :param wcls: The Wrapper class of the object being matched. :param name_prop: The property of the Wrapper class on which to match. :param names: (Optional) A list of names to match. If names and prefixes are both None or empty, all inputs of the specified wcls will be matched. :param prefixes: (Optional) A list of prefixes that can be specified to serve as identifiers for potential matches. Ignored if names is specified. If names and prefixes are both None or empty, all inputs of the specified wcls will be matched. :param udids: (Optional) A list of UDIDs that can be specified to serve as identifiers for potential matches. Ignored if names or prefixes are specified. If all three are None or empty, all inputs of the specified wcls will be matched. :return: A callable matching function suitable for passing to the match_func parameter of the find_maps method. """ def match_func(existing_elem): if not isinstance(existing_elem, wcls): return False if names: return getattr(existing_elem, name_prop) in names if prefixes: for prefix in prefixes: if getattr(existing_elem, name_prop).startswith(prefix): return True # prefixes specified, but none matched return False if udids: return existing_elem.udid in udids # No names, prefixes, or UDIDs specified - hit everything return True return match_func def find_maps(mapping_list, client_lpar_id=None, match_func=None, stg_elem=None, include_orphans=False): """Filter a list of scsi mappings by LPAR ID/UUID and a matching function. :param mapping_list: The mappings to filter. Iterable of VSCSIMapping. :param client_lpar_id: Integer short ID or string UUID of the LPAR on the client side of the mapping. Note that the UUID form relies on the presence of the client_lpar_href field. Some mappings lack this field, and would therefore be ignored. If client_lpar_id is not passed it will return matching mappings for all the lpar_ids. :param match_func: Callable with the following specification: def match_func(storage_elem) param storage_elem: A backing storage element wrapper (VOpt, VDisk, PV, or LU) to be analyzed. May be None (some mappings have no backing storage). return: True if the storage_elem's mapping should be included; False otherwise. If neither match_func nor stg_elem is specified, the default is to match everything - that is, find_maps will return all mappings for the specified client_lpar_id. It is illegal to specify both match_func and stg_elem. :param stg_elem: Match mappings associated with a specific storage element. Effectively, this generates a default match_func which matches on the type and name of the storage element. If neither match_func nor stg_elem is specified, the default is to match everything - that is, find_maps will return all mappings for the specified client_lpar_id. It is illegal to specify both match_func and stg_elem. :param include_orphans: An "orphan" contains a server adapter but no client adapter. If this parameter is True, mappings with no client adapter will still be considered for inclusion. If False, mappings with no client adapter will be skipped entirely, regardless of any other criteria. :return: A list comprising the subset of the input mapping_list whose client LPAR IDs match client_lpar_id and whose backing storage elements satisfy match_func. :raise ValueError: If both match_func and stg_elem are specified. """ if match_func and stg_elem: raise ValueError(_("Must not specify both match_func and stg_elem.")) if not match_func: # Default no filter match_func = lambda x: True if stg_elem: # Match storage element on type and name match_func = lambda stg_el: ( stg_el is not None and stg_el.schema_type == stg_elem.schema_type and stg_el.name == stg_elem.name) is_uuid = False client_id = None if client_lpar_id: is_uuid, client_id = uuid.id_or_uuid(client_lpar_id) matching_maps = [] for existing_scsi_map in mapping_list: # No client, continue on unless including orphans. if not include_orphans and existing_scsi_map.client_adapter is None: continue # If to a different VM, continue on. href = existing_scsi_map.client_lpar_href if is_uuid and (not href or client_id != util.get_req_path_uuid( href, preserve_case=True)): continue elif (client_lpar_id and not is_uuid and # Use the server adapter in case this is an orphan. existing_scsi_map.server_adapter.lpar_id != client_id): continue if match_func(existing_scsi_map.backing_storage): # Found a match! matching_maps.append(existing_scsi_map) return matching_maps def index_mappings(maps): """Create an index dict of SCSI mappings to facilitate reverse lookups. :param maps: Iterable of VSCSIMapping to index. :return: A dict of the form: { 'by-lpar-id': { str(lpar_id): [VSCSIMapping, ...], ... }, 'by-lpar-uuid': { lpar_uuid: [VSCSIMapping, ...], ... }, 'by-storage-udid': { storage_udid: [VSCSIMapping, ...], ... } } ...where: - lpar_id is the short integer ID (not UUID) of the LPAR, stringified. - lpar_uuid is the UUID of the LPAR. - storage_udid is the Unique Device Identifier (UDID) of the backing Storage element associated with the mapping. While the outermost dict is guaranteed to have all keys, the inner dicts may be empty. However, if an inner dict has a member, its list of mappings is guaranteed to be nonempty. """ ret = {'by-lpar-id': {}, 'by-lpar-uuid': {}, 'by-storage-udid': {}} def add(key, ident, smap): """Add a mapping to an index. :param key: The top-level key name ('by-lpar-uuid', etc.) :param ident: The lower-level key name (e.g. the lpar_uuid) :param smap: The mapping to add to the index. """ ident = str(ident) if not ident: return if ident not in ret[key]: ret[key][ident] = [] ret[key][ident].append(smap) for smap in maps: clhref = smap.client_lpar_href if clhref: add('by-lpar-uuid', util.get_req_path_uuid(clhref, preserve_case=True), smap) clid = None # Mapping may not have a client adapter, but will always have a server # adapter - so get the LPAR ID from the server adapter. if smap.server_adapter: clid = smap.server_adapter.lpar_id add('by-lpar-id', clid, smap) stg = smap.backing_storage if stg: add('by-storage-udid', stg.udid, smap) return ret def modify_vopt_mapping(adapter, vios, client_lpar_id, new_media, media_name=None, udid=None): """Will remap VOpt media mapping with another backing storage element. This method will change the VOptMedia storage element associated with a specific SCSI mapping. This is found by searching media name or udid for existing storage, and will raise exception if zero or more than one is returned. :param adapter: The pypowervm adapter for API communication. :param vios: The virtual I/O server on which the mapping should be modified. This may be the VIOS's UUID string OR an existing VIOS EntryWrapper. If the latter, it must have been retrieved using the VIO_SMAP extended attribute group. :param client_lpar_id: The integer short ID or string UUID of the client VM :param new_media: VOptMedia wrapper representing the new storage element to be associated with the specified SCSI mapping. :param media_name: (Optional) The name of the current virtual optical media to replace on the SCSI bus. :param udid: (Optional) The UDID of the current virtual optical media to replace on the SCSI bus. Ignored if media_name is specified. If neither is specified, search will return all mappings on LPAR and fail if there's more than one. :return: The VIOS wrapper representing the updated Virtual I/O Server. This is current with respect to etag and SCSI mappings. :return: The remapped SCSI mapping with new backing storage. :raises: SingleMappingNotFoundRemapError: If the number of VOptMedia matches found for the given media name and/or udid is not one. """ names = [media_name] if media_name else None udids = [udid] if udid else None return _modify_storage_elem( adapter, vios, client_lpar_id, gen_match_func( pvm_stor.VOptMedia, name_prop='media_name', names=names, udids=udids), new_media) def remove_vopt_mapping(adapter, vios, client_lpar_id, media_name=None, udid=None): """Will remove the mapping for VOpt media. This method will remove the mapping between the virtual optical media and the client partition. It does not delete the virtual optical media. Will leave other elements on the vSCSI bus intact. :param adapter: The pypowervm adapter for API communication. :param vios: The virtual I/O server from which the mapping should be removed. This may be the VIOS's UUID string OR an existing VIOS EntryWrapper. If the latter, it must have been retrieved using the VIO_SMAP extended attribute group. :param client_lpar_id: The integer short ID or string UUID of the client VM :param media_name: (Optional) The name of the virtual optical media to remove from the SCSI bus. If both media_name and udid are None, will remove all virtual optical media mappings associated with the specified client_lpar_id :param udid: (Optional) The UDID of the virtual optical media to remove from the SCSI bus. Ignored if media_name is specified. If both media_name and udid are None, will remove all virtual optical media mappings associated with the client_lpar_id. :return: The VIOS wrapper representing the updated Virtual I/O Server. This is current with respect to etag and SCSI mappings. :return: A list of the backing VOpt media that was removed. """ names = [media_name] if media_name else None udids = [udid] if udid else None return _remove_storage_elem( adapter, vios, client_lpar_id, gen_match_func( pvm_stor.VOptMedia, name_prop='media_name', names=names, udids=udids)) def remove_vdisk_mapping(adapter, vios, client_lpar_id, disk_names=None, disk_prefixes=None, udids=None): """Will remove the mapping for VDisk media. This method will remove the mapping between the virtual disk and the client partition. It does not delete the virtual disk. Will leave other elements on the vSCSI bus intact. :param adapter: The pypowervm adapter for API communication. :param vios: The virtual I/O server from which the mapping should be removed. This may be the VIOS's UUID string OR an existing VIOS EntryWrapper. If the latter, it must have been retrieved using the VIO_SMAP extended attribute group. :param client_lpar_id: The integer short ID or string UUID of the client VM :param disk_names: (Optional) A list of names of the virtual disk to remove from the SCSI bus. If disk_names, disk_prefixes, and udids are all None/empty, will remove all virtual disk mappings associated with the specified client_lpar_id. :param disk_prefixes: (Optional) A list of prefixes that can be specified to serve as identifiers for potential disks. Ignored if disk_names is specified. If disk_names, disk_prefixes, and udids are all None/empty, will remove all virtual disk mappings associated with the specified client_lpar_id. :param udids: (Optional) A list of UDIDs of the virtual disks to remove from the SCSI bus. Ignored if disk_names or disk_prefixes are specified. If all three are None/empty, will remove all virtual disk mappings associated with the specified client_lpar_id. :return: The VIOS wrapper representing the updated Virtual I/O Server. This is current with respect to etag and SCSI mappings. :return: A list of the backing VDisk objects that were removed. """ return _remove_storage_elem( adapter, vios, client_lpar_id, gen_match_func( pvm_stor.VDisk, names=disk_names, prefixes=disk_prefixes, udids=udids)) def remove_lu_mapping(adapter, vios, client_lpar_id, disk_names=None, disk_prefixes=None, udids=None): """Remove mappings for one or more SSP LUs associated with an LPAR. This method will remove the mapping between the Logical Unit and the client partition. It does not delete the LU. Will leave other elements on the vSCSI bus intact. :param adapter: The pypowervm adapter for API communication. :param vios: The virtual I/O server from which the mapping should be removed. This may be the VIOS's UUID string OR an existing VIOS EntryWrapper. If the latter, it must have been retrieved using the VIO_SMAP extended attribute group. :param client_lpar_id: The integer short ID or string UUID of the client VM :param disk_names: (Optional) A list of names of the LUs to remove from the SCSI bus. If disk_names, disk_prefixes, and udids are all None/empty, will remove all logical unit mappings associated with the specified client_lpar_id. :param disk_prefixes: (Optional) A list of prefixes that can be specified to serve as identifiers for potential disks. Ignored if disk_names is specified. If disk_names, disk_prefixes, and udids are all None/empty, will remove all logical unit mappings associated with the specified client_lpar_id. :param udids: (Optional) A list of UDIDs of the logical units to remove from the SCSI bus. Ignored if disk_names or disk_prefixes are specified. If all three are None/empty, will remove all logical unit mappings associated with the specified client_lpar_id. :return: The VIOS wrapper representing the updated Virtual I/O Server. This is current with respect to etag and SCSI mappings. :return: A list of LU EntryWrappers representing the mappings that were removed. """ return _remove_storage_elem( adapter, vios, client_lpar_id, gen_match_func( pvm_stor.LU, names=disk_names, prefixes=disk_prefixes, udids=udids)) def remove_pv_mapping(adapter, vios, client_lpar_id, backing_dev, udid=None): """Will remove the PV mapping. This method will remove the pv mapping. It does not delete the device. Will leave other elements on the vSCSI bus intact. :param adapter: The pypowervm adapter for API communication. :param vios: The virtual I/O server from which the mapping should be removed. This may be the VIOS's UUID string OR an existing VIOS EntryWrapper. If the latter, it must have been retrieved using the VIO_SMAP extended attribute group. :param client_lpar_id: The integer short ID or string UUID of the client VM :param backing_dev: The physical volume name to be removed. If both backing_dev and udid are None, will remove all physical volume mappings associated with the specfied client_lpar_id. :param udid: (Optional) UDID of the physical volume to remove from the SCSI bus. Ignored if backing_dev is not None. If backing_dev and udid are both None, will remove all physical volume mappings associated with the specified client_lpar_id. :return: The VIOS wrapper representing the updated Virtual I/O Server. This is current with respect to etag and SCSI mappings. :return: A list of the backing physical device objects that were removed. """ names = [backing_dev] if backing_dev else None udids = [udid] if udid else None return _remove_storage_elem( adapter, vios, client_lpar_id, gen_match_func( pvm_stor.PV, names=names, udids=udids)) pypowervm-1.1.24/pypowervm/tasks/__init__.py0000664000175000017500000000000013571367171020544 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tasks/cluster_ssp.py0000664000175000017500000002467013571367171021376 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tasks around Cluster/SharedStoragePool.""" from oslo_log import log as logging from random import randint import time import uuid import pypowervm.const as c from pypowervm.i18n import _ import pypowervm.tasks.storage as tsk_stg import pypowervm.util as u import pypowervm.wrappers.cluster as clust from pypowervm.wrappers import job import pypowervm.wrappers.storage as stor LOG = logging.getLogger(__name__) IMGTYP = stor.LUType.IMAGE MKRSZ = 0.001 SLEEP_U_MIN = 30 SLEEP_U_MAX = 60 def crt_cluster_ssp(clust_name, ssp_name, repos_pv, first_node, data_pv_list): """Creates a Cluster/SharedStoragePool via the ClusterCreate Job. The Job takes two parameters: clusterXml and sspXml. :param clust_name: String name for the Cluster. :param ssp_name: String name for the SharedStoragePool. :param repos_pv: storage.PV representing the repository hdisk. The name and udid properties must be specified. :param first_node: cluster.Node representing the initial VIOS in the cluster. (Cluster creation must be done with a single node; other nodes may be added later.) The Node wrapper must contain either - mtms, lpar_id, AND hostname; or - vios_uri The indicated node must be able to see each disk. :param data_pv_list: Iterable of storage.PV instances to use as the data volume(s) for the SharedStoragePool. """ adapter = repos_pv.adapter # Pull down the ClusterCreate Job template jresp = adapter.read(clust.Cluster.schema_type, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm='Create') jwrap = job.Job.wrap(jresp.entry) cluster = clust.Cluster.bld(adapter, clust_name, repos_pv, first_node) ssp = stor.SSP.bld(adapter, ssp_name, data_pv_list) # Job parameters are CDATA containing XML of above jparams = [ jwrap.create_job_parameter( 'clusterXml', cluster.toxmlstring(), cdata=True), jwrap.create_job_parameter( 'sspXml', ssp.toxmlstring(), cdata=True)] jwrap.run_job(None, job_parms=jparams) return jwrap def _find_lus(tier, luname): """Finds image LUs whose name contains the specified luname. :param tier: Tier EntryWrapper representing the Tier to search. :param luname: The LU name substring to search for. :return: All LUs in the tier a) of type image; and b) whose names contain luname. """ lufeed = stor.LUEnt.search(tier.adapter, parent=tier, lu_type=IMGTYP) return [lu for lu in lufeed if luname in lu.name] def _upload_in_progress(lus, luname, first): """Detect whether another host has an upload is in progress. :param lus: List of LUs to be considered (i.e. whose names contain the name of the LU we intend to upload). :param luname: The name of the LU we intend to upload. :param first: Boolean indicating whether this is this the first time we detected an upload in progress. Should be True the first and until the first time this method returns True. Thereafter, should be False. :return: True if another host has an upload in progress; False otherwise. """ mkr_lus = [lu for lu in lus if lu.name != luname and lu.name.endswith(luname)] if mkr_lus: # Info the first time; debug thereafter to avoid flooding the log. if first: LOG.info(_('Waiting for in-progress upload(s) to complete. ' 'Marker LU(s): %s'), str([lu.name for lu in mkr_lus])) else: LOG.debug('Waiting for in-progress upload(s) to complete. ' 'Marker LU(s): %s', str([lu.name for lu in mkr_lus])) return True return False def _upload_conflict(tier, luname, mkr_luname): """Detect an upload conflict with another host (our thread should bail). :param tier: Tier EntryWrapper representing the Tier to search. :param luname: The name of the LU we intend to upload. :param mkr_luname: The name of the marker LU we use to signify our upload is in progress. :return: True if we find a winning conflict and should abandon our upload; False otherwise. """ # Refetch the feed. We must do this in case one or more other threads # created their marker LU since our last feed GET. lus = _find_lus(tier, luname) # First, if someone else already started the upload, we clean up # and wait for that one. if any([lu for lu in lus if lu.name == luname]): LOG.info(_('Abdicating in favor of in-progress upload.')) return True # The lus list should be all markers at this point. If there's # more than one (ours), then the first (by alpha sort) wins. if len(lus) > 1: lus.sort(key=lambda l: l.name) winner = lus[0].name if winner != mkr_luname: # We lose. Delete our LU and let the winner proceed LOG.info(_('Abdicating upload in favor of marker %s.'), winner) # Remove just our LU - other losers take care of theirs return True return False def get_or_upload_image_lu(tier, luname, vios_uuid, io_handle, b_size, upload_type=tsk_stg.UploadType.IO_STREAM_BUILDER): """Ensures our SSP has an LU containing the specified image. If an LU of type IMAGE with the specified luname already exists in our SSP, return it. Otherwise, create it, prime it with the image contents provided via stream_func, and return it. This method assumes that consumers employ a naming convention such that an LU with a given name represents the same data (size and content) no matter where/when it's created/uploaded - for example, by including the image's MD5 checksum in the name. This method is designed to coordinate the upload of a particular image LU across multiple hosts which use the same SSP, but otherwise can not communicate with each other. :param tier: Tier EntryWrapper of the Shared Storage Pool Tier on which the image LU is to be hosted. :param luname: The name of the image LU. Note that the name may be shortened to satisfy length restrictions. :param vios_uuid: The UUID of the Virtual I/O Server through which the upload should be performed, if necessary. :param io_handle: The I/O handle (as defined by the upload_type). This is only used if the image_lu needs to be uploaded. :param b_size: Integer size, in bytes, of the image provided by stream_func's return value. :param upload_type: (Optional, Default: IO_STREAM_BUILDER) Defines the way in which the LU should be uploaded. Refer to the UploadType enumeration for valid upload mechanisms. It defaults to IO_STREAM_BUILDER for legacy reasons. :return: LUEnt EntryWrapper representing the image LU. """ # Marker (upload-in-progress) LU name prefixed with 'partxxxxxxxx' prefix = 'part%s' % uuid.uuid4().hex[:8] # Ensure the marker LU name won't be too long luname = u.sanitize_file_name_for_api( luname, max_len=c.MaxLen.FILENAME_DEFAULT - len(prefix)) mkr_luname = prefix + luname first = True while True: # (Re)fetch the list of image LUs whose name *contains* luname. lus = _find_lus(tier, luname) # Does the LU already exist in its final, uploaded form? If so, then # only that LU will exist, with an exact name match. if len(lus) == 1 and lus[0].name == luname: LOG.info(_('Using already-uploaded image LU %s.'), luname) return lus[0] # Is there an upload in progress? if _upload_in_progress(lus, luname, first): first = False _sleep_for_upload() continue # No upload in progress (at least as of when we grabbed the feed). LOG.info(_('Creating marker LU %s'), mkr_luname) tier, mkrlu = tsk_stg.crt_lu(tier, mkr_luname, MKRSZ, typ=IMGTYP) # We must remove the marker LU if # a) anything fails beyond this point; or # b) we successfully upload the image LU. try: # If another process (possibly on another host) created a marker LU # at the same time, there could be multiple marker LUs out there. # We all use _upload_conflict to decide which one of us gets to do # the upload. if _upload_conflict(tier, luname, mkr_luname): _sleep_for_upload() continue # Okay, we won. Do the actual upload. LOG.info(_('Uploading to image LU %(lu)s (marker %(mkr)s).'), {'lu': luname, 'mkr': mkr_luname}) # Create the new Logical Unit. The LU size needs to be decimal GB. tier, new_lu = tsk_stg.crt_lu( tier, luname, u.convert_bytes_to_gb(b_size, dp=2), typ=IMGTYP) try: tsk_stg.upload_lu(vios_uuid, new_lu, io_handle, b_size, upload_type=upload_type) except Exception as exc: LOG.exception(exc) # We need to remove the LU so it doesn't block others # attempting to use the same one. LOG.exception(_('Removing failed LU %s.'), luname) new_lu.delete() raise return new_lu finally: # Signal completion, or clean up, by removing the marker LU. mkrlu.delete() def _sleep_for_upload(): """Sleeps if a conflict was found during the SSP upload.""" time.sleep(randint(SLEEP_U_MIN, SLEEP_U_MAX)) pypowervm-1.1.24/pypowervm/tasks/migration.py0000664000175000017500000002353413571367171021017 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import pypowervm.const as c from pypowervm.wrappers import job import pypowervm.wrappers.logical_partition as wlpar LOG = logging.getLogger(__name__) CONF = cfg.CONF _SUFFIX_PARM_MIGRATE = 'Migrate' _SUFFIX_PARM_MIGRATE_VALIDATE = 'MigrateValidate' _SUFFIX_PARM_MIGRATE_ABORT = 'MigrateAbort' _SUFFIX_PARM_MIGRATE_RECOVER = 'MigrateRecover' TGT_MGD_SYS = 'TargetManagedSystemName' TGT_RMT_HMC = 'TargetRemoteHMCIPAddress' TGT_RMT_HMC_USR = 'TargetRemoteHMCUserID' VFC_MAPPINGS = 'VirtualFCMappings' VSCSI_MAPPINGS = 'VirtualSCSIMappings' VLAN_MAPPINGS = 'VlanMappings' DEST_MSP = 'DestMSPIPaddr' SRC_MSP = 'SourceMSPIPaddr' SPP_ID = 'SharedProcPoolID' OVS_OVERRIDE = 'OVSOverride' VLAN_BRIDGE_OVERRIDE = 'VLANBridgeOverride' AFFINITY = 'Affinity' _OVERRIDE_OK = '2' def migrate_lpar( lpar, tgt_mgd_sys, validate_only=False, tgt_mgmt_svr=None, tgt_mgmt_usr=None, virtual_fc_mappings=None, virtual_scsi_mappings=None, dest_msp_name=None, source_msp_name=None, spp_id=None, timeout=CONF.pypowervm_job_request_timeout * 4, sdn_override=False, vlan_check_override=False, vlan_mappings=None, check_affinity_score=False): """Method to migrate a logical partition. :param lpar: The LPAR wrapper of the logical partition to migrate. :param tgt_mgd_sys: The name of the managed system to migrate to. :param validate_only: Indication of whether to just validate the migration or actually perform it. :param tgt_mgmt_svr: The ip of the PowerVM management platform managing the target host. :param tgt_mgmt_usr: The user id to use on the target PowerVM management platform. :param virtual_fc_mappings: The virtual fiber channel mappings to move during the migration. See information below. :param virtual_scsi_mappings: The virtual scsi mappings to move during the migration. See information below. :param dest_msp_name: A comma-separated list of destination VIOS IP addresses identifying which interface(s) the Mover Service Partition should use. :param source_msp_name: A comma-separated list of source VIOS IP addresses identifying which interface(s) the Mover Service Partition should use. :param spp_id: The shared processor pool id to use on the target system. :param timeout: maximum number of seconds for job to complete :param sdn_override: (Optional, Default: False) If set to True, will allow a migration where the networking is hosted on a non- traditional VIOS partition (ex. the NovaLink) :param vlan_check_override: (Optional, Default: False) If set to True, will tell the Virtual I/O Server not to validate that the other VIOS has the VLAN pre-provisioned. :param vlan_mappings: The vlan mappings that indicate what the VLAN should be on the target system for a given MAC address. If not provided, the original VLANs will be used. See information below. :param check_affinity_score: (Optional, Default: False) If set to True, will require a check that the LPAR's affinity score is not lower on the destination host. virtual_fc_mappings: List of virtual fibre channel adapter mappings, with each mapping having the following format: virtual-slot-number/vios-lpar-name/vios-lpar-ID [/[vios-virtual-slot-number][/[vios-fc-port-name]]] The first two '/' characters must be present. The third '/' character is optional, but it must be present if vios-virtual-slot-number or vios-fc-port-name is specified. The last '/' character is optional but it must be present if vios-fc-port-name is specified. Optional values may be omitted. Optional values are vios-lpar-name or vios-lpar-ID (one of those values is required, but not both), vios-virtual-slot-number, and vios-fc-port-name. For example: 4//1/14/fcs0 specifies a mapping of the virtual fibre channel client adapter with slot number 4 to the virtual fibre channel server adapter with slot number 14 in the VIOS partition with ID 1 on the destination managed system. In addition, the mapping specifies to use physical fibre channel port fcs0. virtual_scsi_mappings: List of virtual SCSI adapter mappings, with each mapping having the following format: virtual-slot-number/vios-lpar-name/vios-lpar-ID [/vios-virtual-slot-number] The first two '/' characters must be present. The last '/' character is optional, but it must be present if vios-virtual-slot-number is specified. Optional values may be omitted. Optional values are vios-lpar-name or vios-lpar-ID (one of those values is required, but not both), and vios-virtual-slot-number. For example: 12/vios1//16 specifies a mapping of the virtual SCSI adapter with slot number 12 to slot number 16 on the VIOS partition vios1 on the destination managed system. vlan_mappings: List of vlan mappings, with each mapping having the following format: MAC/PVID[/VLAN_A VLAN_B] The first '/' must be present. The first field is the MAC address of the adapter. The MAC address must be exactly 12 digits, case insensitive, without colons in it. The second is what the target PVID should be set to for that adapter. The remaining is a list of additional VLANs that could be specified for adapters that have additional VLANs. The list of additional VLANs is space delimited. For example: 001122334455/12 specifies a mapping where the adapter with MAC address 001122334455 should have a PVID of 12 on the target system. """ op = (_SUFFIX_PARM_MIGRATE_VALIDATE if validate_only else _SUFFIX_PARM_MIGRATE) resp = lpar.adapter.read(wlpar.LPAR.schema_type, lpar.uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=op) job_wrapper = job.Job.wrap(resp.entry) job_parms = [job_wrapper.create_job_parameter(TGT_MGD_SYS, str(tgt_mgd_sys))] # Generic 'raw' format job parameters. for kw, val in [(TGT_RMT_HMC, tgt_mgmt_svr), (TGT_RMT_HMC_USR, tgt_mgmt_usr), (DEST_MSP, dest_msp_name), (SRC_MSP, source_msp_name), (SPP_ID, spp_id)]: if val: job_parms.append( job_wrapper.create_job_parameter(kw, str(val))) # The SDN / VLAN overrides are...odd. Instead of passing in a 'True', we # must pass in the character of '2' to indicate that it is an override. for kw, val in [(OVS_OVERRIDE, sdn_override), (VLAN_BRIDGE_OVERRIDE, vlan_check_override)]: if val: job_parms.append(job_wrapper.create_job_parameter(kw, _OVERRIDE_OK)) # The mappings are special. They require a join so that they are comma # separated down to the API. for kw, val in [(VFC_MAPPINGS, virtual_fc_mappings), (VSCSI_MAPPINGS, virtual_scsi_mappings), (VLAN_MAPPINGS, vlan_mappings)]: if val: job_parms.append( job_wrapper.create_job_parameter(kw, ",".join(val))) # Set affinity flag to 'true' as part of LPM. If enabled for the VM, # an additional flag will be passed as part of migration parameters. # Otherwise, this flag will not be passed. The default behavior is # not to check for affinity score on the destination host. if check_affinity_score: job_parms.append( job_wrapper.create_job_parameter(AFFINITY, 'true')) job_wrapper.run_job(lpar.uuid, job_parms=job_parms, timeout=timeout) def migrate_recover(lpar, force=False, timeout=CONF.pypowervm_job_request_timeout): """Method to recover a failed logical partition migration. :param lpar: The LPAR wrapper of the logical partition to recover. :param force: Boolean specifying whether to force the migration to recover when errors are encountered. :param timeout: maximum number of seconds for job to complete """ resp = lpar.adapter.read(wlpar.LPAR.schema_type, lpar.uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_SUFFIX_PARM_MIGRATE_RECOVER) job_wrapper = job.Job.wrap(resp.entry) job_parms = [] if force: job_parms.append(job_wrapper.create_job_parameter('Force', 'true')) job_wrapper.run_job(lpar.uuid, job_parms=job_parms, timeout=timeout) def migrate_abort(lpar, timeout=CONF.pypowervm_job_request_timeout): """Method to abort a logical partition migration. :param lpar: The LPAR wrapper of the logical partition to abort the migration operation. :param timeout: maximum number of seconds for job to complete """ resp = lpar.adapter.read(wlpar.LPAR.schema_type, lpar.uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_SUFFIX_PARM_MIGRATE_ABORT) job_wrapper = job.Job.wrap(resp.entry) job_wrapper.run_job(lpar.uuid, job_parms=None, timeout=timeout) pypowervm-1.1.24/pypowervm/tasks/vopt.py0000664000175000017500000001570213571367171020014 0ustar neoneo00000000000000# Copyright 2016, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from pypowervm import exceptions as pvm_ex from pypowervm.i18n import _ from pypowervm.tasks import partition from pypowervm.utils import retry as pvm_retry from pypowervm.wrappers import storage as pvm_stg from pypowervm.wrappers import virtual_io_server as pvm_vios LOG = logging.getLogger(__name__) _cur_vios_uuid = None _cur_vg_uuid = None @pvm_retry.retry(tries=6) def validate_vopt_repo_exists( adapter, vopt_media_volume_group='rootvg', vopt_media_rep_size=1): """Will ensure that the virtual optical media repository exists. Checks to make sure that at least one Virtual I/O Server has a virtual optical media repository. If the volume group on an I/O Server goes down (perhaps due to maintenance), the system will rescan to determine if there is another I/O Server that can host the request. The very first invocation may be expensive. It may also be expensive to call if a Virtual I/O Server unexpectedly goes down. :param adapter: The pypowervm adapter. :param vopt_media_volume_group: (Optional, Default: rootvg) The volume group to use if the vopt media repo needs to be created. :param vopt_media_rep_size: (Optional, Default: 1) The size of the virtual optical media (in GB) if the repo needs to be created. :return vios_uuid: The VIOS uuid hosting the VG :return vg_uuid: The volume group uuid hosting the vopt. :raise NoMediaRepoVolumeGroupFound: Raised when there are no VIOSes that can support the virtual optical media. """ # If our static variables were set, then we should validate that the # repo is still running. Otherwise, we need to reset the variables # (as it could be down for maintenance). if _cur_vg_uuid is not None: vio_uuid = _cur_vios_uuid vg_uuid = _cur_vg_uuid try: vg_wrap = pvm_stg.VG.get( adapter, uuid=vg_uuid, parent_type=pvm_vios.VIOS, parent_uuid=vio_uuid) if vg_wrap is not None and len(vg_wrap.vmedia_repos) != 0: return vio_uuid, vg_uuid except Exception as exc: LOG.exception(exc) LOG.warning(_("An error occurred querying the virtual optical " "media repository. Attempting to re-establish " "connection with a virtual optical media repository.")) # Did not find the media repository. Need a deeper query return _find_or_rebuild_vopt_repo(adapter, vopt_media_volume_group, vopt_media_rep_size) def _find_or_rebuild_vopt_repo(adapter, vopt_media_volume_group, vopt_media_rep_size): # If we're hitting this: # a) It's our first time booting up; # b) The previously-used Volume Group went offline (e.g. VIOS went down # for maintenance); OR # c) The previously-used media repository disappeared. # # The next step is to create a Media Repository dynamically. found_vg, found_vios, conf_vg, conf_vios = _find_vopt_repo_data( adapter, vopt_media_volume_group) # If we didn't find a media repos OR an appropriate volume group, raise # the exception. Since vopt_media_volume_group defaults to rootvg, # which is always present, this should only happen if: # a) No media repos exists on any VIOS we can see; AND # b) The user specified a non-rootvg vopt_media_volume_group; AND # c) The specified volume group did not exist on any VIOS. if found_vg is None and conf_vg is None: raise pvm_ex.NoMediaRepoVolumeGroupFound( vol_grp=vopt_media_volume_group) # If no media repos was found, create it. if found_vg is None: found_vg, found_vios = conf_vg, conf_vios vopt_repo = pvm_stg.VMediaRepos.bld( adapter, 'vopt', vopt_media_rep_size) found_vg.vmedia_repos = [vopt_repo] found_vg = found_vg.update() # At this point, we know that we've successfully found or created the # volume group. Save to the static variables. global _cur_vg_uuid, _cur_vios_uuid _cur_vg_uuid = found_vg.uuid _cur_vios_uuid = found_vios.uuid return _cur_vios_uuid, _cur_vg_uuid def _find_vopt_repo_data(adapter, vopt_media_volume_group): """Finds the vopt repo defaults. :param adapter: pypowervm adapter :param vopt_media_volume_group: The name of the volume group to use. :return found_vg: Returned if a volume group already exists with a media repo within it. Is that corresponding volume group. :return found_vios: Returned if a volume group already exists with a media repo within it. This is the VIOS wrapper. :return conf_vg: Returned if a volume group does not exist with a media repo within it. This is the volume group wrapper (as defined by the vopt_media_volume_group) that the consumer code should create that media repo within. :return conf_vios: Returned if a volume group does not exist with a media repo within it. This is the VIOS wrapper that is the parent of the conf_vg """ vios_wraps = partition.get_active_vioses(adapter) # First loop through the VIOSes and their VGs to see if a media repos # already exists. found_vg, found_vios = None, None # And in case we don't find the media repos, keep track of the VG on # which we should create it. conf_vg, conf_vios = None, None for vio_wrap in vios_wraps: vg_wraps = pvm_stg.VG.get(adapter, parent=vio_wrap) for vg_wrap in vg_wraps: if len(vg_wrap.vmedia_repos) != 0: found_vg, found_vios = vg_wrap, vio_wrap break # In case no media repos exists, save a pointer to the # CONFigured vopt_media_volume_group if we find it. if (conf_vg is None and not vio_wrap.is_mgmt_partition and vg_wrap.name == vopt_media_volume_group): conf_vg, conf_vios = vg_wrap, vio_wrap # If we found it, don't keep looking if found_vg: break return found_vg, found_vios, conf_vg, conf_vios pypowervm-1.1.24/pypowervm/tasks/master_mode.py0000664000175000017500000000556613571367171021332 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tasks to request and release master mode.""" from oslo_config import cfg from oslo_log import log as logging import pypowervm.const as c import pypowervm.log as lgc from pypowervm.wrappers import job import pypowervm.wrappers.managed_system as ms LOG = logging.getLogger(__name__) CONF = cfg.CONF _SUFFIX_PARM_REQUEST_MASTER = 'RequestMaster' _SUFFIX_PARM_RELEASE_MASTER = 'ReleaseMaster' CO_MGMT_MASTER_STATUS = "coManagementMasterStatus" class MasterMode(object): """Valid master modes used when requesting master. NORMAL: Default mode TEMP: When released, the original master is immediately restored. """ NORMAL = "norm" TEMP = "temp" @lgc.logcall def request_master(msys, mode=MasterMode.NORMAL, timeout=CONF.pypowervm_job_request_timeout): """Request master mode for the provided Managed System. :param msys: Managed System wrapper requesting master mode :param mode: The requested master mode type. There are 2 options: MasterMode.NORMAL ("norm"): default MasterMode.TEMP ("temp"): when released, the original master is immediately restored :param timeout: maximum number of seconds for job to complete """ resp = msys.adapter.read(ms.System.schema_type, msys.uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_SUFFIX_PARM_REQUEST_MASTER) job_wrapper = job.Job.wrap(resp.entry) job_parms = [job_wrapper.create_job_parameter(CO_MGMT_MASTER_STATUS, mode)] job_wrapper.run_job(msys.uuid, job_parms=job_parms, timeout=timeout) @lgc.logcall def release_master(msys, timeout=CONF.pypowervm_job_request_timeout): """Release master mode for the provided Managed System. :param msys: Managed System wrapper requesting master mode :param timeout: maximum number of seconds for job to complete """ resp = msys.adapter.read(ms.System.schema_type, msys.uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_SUFFIX_PARM_RELEASE_MASTER) job_wrapper = job.Job.wrap(resp.entry) job_wrapper.run_job(msys.uuid, timeout=timeout) pypowervm-1.1.24/pypowervm/tasks/power.py0000664000175000017500000005501013571367171020154 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tasks to start, stop, and reboot partitions.""" from oslo_config import cfg from oslo_log import log as logging import six import pypowervm.const as c import pypowervm.exceptions as pexc from pypowervm.i18n import _ import pypowervm.log as lgc import pypowervm.tasks.power_opts as popts import pypowervm.wrappers.base_partition as bp from pypowervm.wrappers import job LOG = logging.getLogger(__name__) CONF = cfg.CONF # Error codes indicate osshutdown is not supported _OSSHUTDOWN_RMC_ERRS = ['HSCL0DB4', 'PVME01050905', 'PVME01050402'] # Error codes indicate partition is already powered off _ALREADY_POWERED_OFF_ERRS = ['HSCL1558', 'PVME04000005', 'PVME01050901'] # Error codes indicate partition is already powered on _ALREADY_POWERED_ON_ERRS = ['HSCL3681', 'PVME01042026'] BootMode = popts.BootMode KeylockPos = popts.KeylockPos RemoveOptical = popts.RemoveOptical Force = popts.Force class PowerOp(object): """Provides granular control over a partition PowerOn/Off Job. Use the start or stop @classmethod to invoke the appropriate Job. Jobs invoked through these methods are never retried. If they fail or time out, they raise relevant exceptions - see the methods' docstrings for details. """ @classmethod def start(cls, part, opts=None, timeout=CONF.pypowervm_job_request_timeout, synchronous=True): """Power on a partition. :param part: Partition (LPAR or VIOS) wrapper indicating the partition to power on. :param opts: An instance of power_opts.PowerOnOpts indicating additional options to specify to the PowerOn operation. By default, no additional options are used. :param timeout: value in seconds for specifying how long to wait for the Job to complete. :param synchronous: If True, this method will not return until the Job completes (whether success or failure) or times out. If False, this method will return as soon as the Job has started on the server (that is, achieved any state beyond NOT_ACTIVE). Note that timeout is still possible in this case. :raise VMPowerOnTimeout: If the Job timed out. :raise VMPowerOnFailure: If the Job failed for some reason other than that the partition was already powered on. """ try: cls._run(part, opts or popts.PowerOnOpts(), timeout, synchronous=synchronous) except pexc.JobRequestTimedOut as error: LOG.exception(error) raise pexc.VMPowerOnTimeout(lpar_nm=part.name, timeout=timeout) except pexc.JobRequestFailed as error: emsg = six.text_type(error) # If already powered on, don't send exception if (any(err_prefix in emsg for err_prefix in _ALREADY_POWERED_ON_ERRS)): LOG.warning(_("Partition %s already powered on."), part.name) return LOG.exception(error) raise pexc.VMPowerOnFailure(lpar_nm=part.name, reason=emsg) @classmethod def stop(cls, part, opts=None, timeout=CONF.pypowervm_job_request_timeout, synchronous=True): """Power off a partition. :param part: LPAR/VIOS wrapper indicating the partition to power off. :param opts: An instance of power_opts.PowerOffOpts indicating the type of shutdown to perform, and any additional options. If not specified, PowerOffOpts.soft_detect is used, with no restart. :param timeout: value in seconds for specifying how long to wait for the Job to complete. :param synchronous: If True, this method will not return until the Job completes (whether success or failure) or times out. If False, this method will return as soon as the Job has started on the server (that is, achieved any state beyond NOT_ACTIVE). Note that timeout is still possible in this case. :raise VMPowerOffTimeout: If the Job timed out. :raise VMPowerOffFailure: If the Job failed for some reason other than that the partition was already powered off, and restart was not requested. :return: A PowerOp instance which can be invoked via the run method. :raise OSShutdownNoRMC: OP_PWROFF_OS was requested on a non-IBMi partition with no RMC connection. """ if opts is None: opts = popts.PowerOffOpts().soft_detect(part) if opts.is_os and not opts.can_os_shutdown(part): raise pexc.OSShutdownNoRMC(lpar_nm=part.name) try: cls._run(part, opts, timeout, synchronous=synchronous) except pexc.JobRequestTimedOut as error: LOG.exception(error) raise pexc.VMPowerOffTimeout(lpar_nm=part.name, timeout=timeout) except pexc.JobRequestFailed as error: emsg = six.text_type(error) # If already powered off and not a reboot, don't send exception if (any(err_prefix in emsg for err_prefix in _ALREADY_POWERED_OFF_ERRS) and not opts.is_restart): LOG.warning(_("Partition %s already powered off."), part.name) return LOG.exception(error) raise pexc.VMPowerOffFailure(lpar_nm=part.name, reason=emsg) @classmethod def _run(cls, part, opts, timeout, synchronous=True): """Fetch, fill out, and run a Power* Job for this PowerOp. Do not invoke this method directly; it is used by the start and stop class methods. :param part: LPAR/VIOS wrapper of the partition to power on/off. :param opts: Instance of power_opts.PowerOnOpts or PowerOffOpts indicating the type of operation to perform, and any additional options. :param timeout: value in seconds for specifying how long to wait for the Job to complete. :param synchronous: If True, this method will not return until the Job completes (whether success or failure) or times out. If False, this method will return as soon as the Job has started on the server (that is, achieved any state beyond NOT_ACTIVE). Note that timeout is still possible in this case. :raise VMPowerOffTimeout: If the Job timed out. :raise VMPowerOffFailure: If the Job failed. """ # Fetch the Job template wrapper jwrap = job.Job.wrap(part.adapter.read( part.schema_type, part.uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=opts.JOB_SUFFIX)) LOG.debug("Executing power operation for partition %(lpar_nm)s with " "timeout=%(timeout)d and synchronous=%(synchronous)s: " "%(opts)s", dict(lpar_nm=part.name, timeout=timeout, synchronous=synchronous, opts=str(opts))) # Run the Job, letting exceptions raise up. jwrap.run_job(part.uuid, job_parms=opts.bld_jparms(), timeout=timeout, synchronous=synchronous) def _legacy_power_opts(klass, add_parms): """Detect (and warn) if add_parms is a legacy dict vs. a Power*Opts. Usage: opts, legacy = _legacy_power_opts(PowerOnOpts, add_parms) if legacy: # Do other stuff based on legacy behavior else: # Do other stuff based on new behavior :param klass: The class we expect, either PowerOnOpts or PowerOffOpts. :param add_parms: The add_parms argument to check. :return: An instance of klass, which is either add_parms, constructed by passing it to the klass __init__'s legacy_add_parms. :return: True if add_parms was a legacy dict; False otherwise. """ if isinstance(add_parms, klass): return add_parms, False else: if add_parms is not None: import warnings warnings.warn(_("Specifying add_parms as a dict is deprecated. " "Please specify a %s instance instead.") % klass.__name__, DeprecationWarning) return klass(legacy_add_parms=add_parms), True @lgc.logcall_args def power_on(part, host_uuid, add_parms=None, synchronous=True): """Will Power On a Logical Partition or Virtual I/O Server. :param part: The LPAR/VIOS wrapper of the partition to power on. :param host_uuid: Not used. Retained for backward compatibility. :param add_parms: A power_opts.PowerOnOpts instance; or (deprecated) a dict of parameters to pass directly to the job template. If unspecified, a default PowerOnOpts instance is used, with no additional parameters. :param synchronous: If True (the default), this method will not return until the PowerOn Job completes (whether success or failure) or times out. If False, this method will return as soon as the Job has started on the server (that is, achieved any state beyond NOT_ACTIVE). Note that timeout is still possible in this case. :raise VMPowerOnFailure: If the operation failed. :raise VMPowerOnTimeout: If the operation timed out. """ PowerOp.start( part, opts=_legacy_power_opts(popts.PowerOnOpts, add_parms)[0], synchronous=synchronous) def _pwroff_soft_ibmi_flow(part, restart, immediate, timeout): """Normal (non-hard) power-off retry flow for IBMi partitions. ================== opts.is_immediate? <--START ================== | | NO YES V V ========= ============ ========== ============ OS normal -FAIL*-> OS immediate -FAIL*-> VSP normal -FAIL*-> return False ========= ============ ========== ============ |_________________ | ___________________| ||| SUCCESS V *VMPowerOffTimeout OR =========== VMPowerOffFailure return True =========== :param part restart timeout: See power_off. :param immediate: Boolean. Indicates whether to try os-normal first (False, the default) before progressing to os-immediate. If True, skip trying os-normal shutdown. :return: True if the power-off succeeded; False otherwise. :raise VMPowerOffTimeout: If the last power-off attempt timed out. :raise VMPowerOffFailure: If the last power-off attempt failed. """ opts = popts.PowerOffOpts().restart(value=restart) # If immediate was already specified, skip OS-normal. if not immediate: # ==> OS normal try: PowerOp.stop(part, opts=opts.os_normal(), timeout=timeout) return True except pexc.VMPowerOffFailure: LOG.warning(_("IBMi OS normal shutdown failed. Trying OS " "immediate shutdown. Partition: %s"), part.name) # Fall through to OS immediate, with default timeout timeout = CONF.pypowervm_job_request_timeout # ==> OS immediate try: PowerOp.stop(part, opts=opts.os_immediate(), timeout=timeout) return True except pexc.VMPowerOffFailure: LOG.warning(_("IBMi OS immediate shutdown failed. Trying VSP normal " "shutdown. Partition: %s"), part.name) # Fall through to VSP normal # ==> VSP normal try: PowerOp.stop(part, opts=opts.vsp_normal(), timeout=timeout) return True except pexc.VMPowerOffFailure: LOG.warning("IBMi VSP normal shutdown failed. Partition: %s", part.name) return False def _pwroff_soft_standard_flow(part, restart, timeout): """Normal (non-hard) power-off retry flow for non-IBMi partitions. START | +---VMPowerOffTimeout-------------------------------------+ V | V ======== VMPowerOffFailure ========== VMPowerOffFailure ============ OS immed ---- or ---> VSP normal ---- or ---> return False ======== OSShutdownNoRMC ========== VMPowerOffTimeout ============ | _________________________/ |/ SUCCESS V =========== return True =========== :param part restart timeout: See power_off. :return: True if the power-off succeeded; False otherwise. :raise VMPowerOffTimeout: If the last power-off attempt timed out. :raise VMPowerOffFailure: If the last power-off attempt failed. """ # For backward compatibility, OS shutdown is always immediate. We don't # let PowerOn decide whether to use OS or VSP; instead we trap # OSShutdownNoRMC (which is very quick) so we can keep this progression # linear. opts = popts.PowerOffOpts().restart(value=restart) # ==> OS immediate try: PowerOp.stop(part, opts=opts.os_immediate(), timeout=timeout) return True except pexc.VMPowerOffTimeout: LOG.warning(_("Non-IBMi OS immediate shutdown timed out. Trying VSP " "hard shutdown. Partition: %s"), part.name) return False except pexc.VMPowerOffFailure: LOG.warning(_("Non-IBMi OS immediate shutdown failed. Trying VSP " "normal shutdown. Partition: %s"), part.name) # Fall through to VSP normal, but with default timeout timeout = CONF.pypowervm_job_request_timeout except pexc.OSShutdownNoRMC as error: LOG.warning(error.args[0]) # Fall through to VSP normal # ==> VSP normal try: PowerOp.stop(part, opts.vsp_normal(), timeout=timeout) return True except pexc.VMPowerOffFailure: LOG.warning("Non-IBMi VSP normal shutdown failed. Partition: %s", part.name) return False def _power_off_progressive(part, timeout, restart, ibmi_immed=False): # Do the progressive-retry sequence appropriate to the partition type. if part.env == bp.LPARType.OS400: # The IBMi progression. if _pwroff_soft_ibmi_flow(part, restart, ibmi_immed, timeout): return # Fall through to VSP hard else: # The non-IBMi progression. if _pwroff_soft_standard_flow(part, restart, timeout): return # Fall through to VSP hard # If we got here, force_immediate == ON_FAILURE, so fall back to VSP hard. # Let this one finish or raise. # ==> VSP hard LOG.warning(_("VSP hard shutdown with default timeout. Partition: %s"), part.name) PowerOp.stop(part, popts.PowerOffOpts().vsp_hard().restart(value=restart)) def _power_off_single(part, opts, force_immediate, timeout): """No-retry single power-off operation. :param part force_immediate timeout: See power_off. force_immediate is either TRUE or NO_RETRY. :param opts: A PowerOffOpts instance. The operation and immediate params are overwritten by this method. Any other options (such as restart) remain unaffected. :raise VMPowerOffFailure: If the operation failed. :raise VMPowerOffTimeout: If the operation timed out. """ # If force_immediate=TRUE, always VSP hard shutdown. if force_immediate == Force.TRUE: PowerOp.stop(part, opts=opts.vsp_hard(), timeout=timeout) # If no retries, just do the single "soft" power-off requested elif force_immediate == Force.NO_RETRY: # opts is already set up for soft_detect PowerOp.stop(part, opts=opts, timeout=timeout) return @lgc.logcall_args def power_off(part, host_uuid, force_immediate=Force.ON_FAILURE, restart=False, timeout=CONF.pypowervm_job_request_timeout, add_parms=None): """Will Power Off a Logical Partition or Virtual I/O Server. DEPRECATED. Use PowerOp.stop() for single power-off. Use power_off_progressive for soft-retry flows. Depending on the force_immediate flag and the partition's type and RMC state, this method may attempt increasingly aggressive mechanisms for shutting down the OS if initial attempts fail or time out. :param part: The LPAR/VIOS wrapper of the instance to power off. :param host_uuid: Not used. Retained for backward compatibility. :param force_immediate: DEPRECATED. - If you want Force.NO_RETRY behavior, use PowerOp.stop() with the specific operation/immediate settings desired. - If you want Force.TRUE behavior, use PowerOp.stop(..., opts=PowerOffOpts().vsp_hard()) - If add_parms is a PowerOffOpts with an operation set, force_immediate (and restart) is ignored - the method call is equivalent to: PowerOp.stop(part, opts=add_parms, timeout=timeout) - This flag retains its legacy behavior only if add_parms is either a legacy dict or a PowerOffOpts with no operation set: - Force.TRUE: The force-immediate option is included on the first pass. - Force.NO_RETRY: The force-immediate option is not included. If the power-off fails or times out, VMPowerOffFailure is raised immediately. - Force.ON_FAILURE: The force-immediate option is not included on the first pass; but if the power-off fails (including timeout), it is retried with the force-immediate option added. :param restart: DEPRECATED: Use a PowerOffOpts instance for add_parms, with restart specified therein. Boolean. Perform a restart after the power off. If add_parms is a PowerOffOpts instance, this parameter is ignored. :param timeout: Time in seconds to wait for the instance to stop. :param add_parms: A power_opts.PowerOffOpts instance; or (deprecated) a dict of parameters to pass directly to the job template. If unspecified, a default PowerOffOpts instance is used, with operation/immediate/restart depending on the force_immediate and restart parameters, and no additional options. :raise VMPowerOffFailure: If the operation failed (possibly after retrying) :raise VMPowerOffTimeout: If the operation timed out (possibly after retrying). """ import warnings warnings.warn("The power_off method is deprecated. Please use either " "PowerOp.stop or power_off_progressive.", DeprecationWarning) opts, legacy = _legacy_power_opts(popts.PowerOffOpts, add_parms) if legacy: # Decide whether to insist on 'immediate' for OS shutdown. Do that # only if add_parms explicitly included immediate=true. Otherwise, let # soft_detect decide. opts.soft_detect(part, immed_if_os=opts.is_immediate or None) # Add the restart option if necessary. opts.restart(value=restart) elif opts.is_param_set(popts.PowerOffOperation.KEY): # If a PowerOffOpt was provided with no operation, it's just being used # to specify e.g. restart, and we should fall through to the soft # flows. But if an operation was specified, we just want to do that # single operation. Setting NO_RETRY results in using whatever hard/ # immediate setting is in the PowerOffOpt. force_immediate = Force.NO_RETRY if force_immediate != Force.ON_FAILURE: return _power_off_single(part, opts, force_immediate, timeout) # Do the progressive-retry sequence appropriate to the partition type and # the force_immediate flag. _power_off_progressive(part, timeout, restart, ibmi_immed=opts.is_immediate) def power_off_progressive(part, restart=False, ibmi_immed=False, timeout=CONF.pypowervm_job_request_timeout): """Attempt soft power-off, retrying with increasing aggression on failure. IBMi partitions always start with OS shutdown. If ibmi_immed == False, os-normal shutdown is tried first; then os-immediate; then vsp-normal; then vsp-hard. If ibmi_immed == True, os-normal is skipped, but the rest of the progression is the same. For non-IBMi partitions: If RMC is up, os-immediate is tried first. If this times out, vsp hard is performed next; otherwise, vsp-normal is attempted before vsp-hard. If RMC is down, vsp-normal is tried first, then vsp-hard. :param part: The LPAR/VIOS wrapper of the instance to power off. :param restart: Boolean. Perform a restart after the power off. :param ibmi_immed: Boolean. Indicates whether to try os-normal first (False, the default) before progressing to os-immediate. If True, skip trying os-normal shutdown. Only applies to IBMi partitions. :param timeout: Time in seconds to wait for the instance to stop. This is only applied to the first attempt in the progression. :raise VMPowerOffFailure: If the last attempt in the progression failed. :raise VMPowerOffTimeout: If the last attempt in the progression timed out. """ _power_off_progressive(part, timeout, restart, ibmi_immed=ibmi_immed) pypowervm-1.1.24/pypowervm/tasks/vterm.py0000664000175000017500000007722513571367171020171 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Manage LPAR virtual terminals.""" import re import select import six import socket import ssl import struct import subprocess import threading import time from oslo_concurrency import lockutils as lock from oslo_log import log as logging from oslo_utils import encodeutils import pypowervm.const as c from pypowervm import exceptions as pvm_exc from pypowervm.i18n import _ from pypowervm.wrappers import job import pypowervm.wrappers.logical_partition as pvm_lpar LOG = logging.getLogger(__name__) _SUFFIX_PARM_CLOSE_VTERM = 'CloseVterm' # Used to track of the mapping between the ports and the Listeners/Repeaters # that we construct for those and also keeping track of which local port # is for a given LPAR UUID and want VNC Path String is provided for the LPAR. # # These are global variables used below. Since they are defined up here, need # to use global as a way for modification of the fields to stick. We do this # so that we keep track of all of the connections. _VNC_REMOTE_PORT_TO_LISTENER = {} _VNC_LOCAL_PORT_TO_REPEATER = {} _VNC_UUID_TO_LOCAL_PORT = {} _VNC_PATH_TO_UUID = {} # For the single remote port case, we will hard-code that to 5901 for now _REMOTE_PORT = 5901 def close_vterm(adapter, lpar_uuid): """Close the vterm associated with an lpar :param adapter: The adapter to talk over the API. :param lpar_uuid: partition uuid """ if adapter.traits.local_api: _close_vterm_local(adapter, lpar_uuid) else: _close_vterm_non_local(adapter, lpar_uuid) def _close_vterm_non_local(adapter, lpar_uuid): """Job to force the close of the terminal when the API is remote. :param adapter: The adapter to talk over the API. :param lpar_uuid: partition uuid """ # Close vterm on the lpar resp = adapter.read(pvm_lpar.LPAR.schema_type, lpar_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_SUFFIX_PARM_CLOSE_VTERM) job_wrapper = job.Job.wrap(resp.entry) try: job_wrapper.run_job(lpar_uuid) except Exception: LOG.exception(_('Unable to close vterm.')) raise def _close_vterm_local(adapter, lpar_uuid): """Forces the close of the terminal on a local system. Will check for a VNC server as well in case it was started via that mechanism. :param adapter: The adapter to talk over the API. :param lpar_uuid: partition uuid """ lpar_id = _get_lpar_id(adapter, lpar_uuid) _run_proc(['rmvterm', '--id', lpar_id]) # Stop the port. with lock.lock('powervm_vnc_term'): vnc_port = _VNC_UUID_TO_LOCAL_PORT.get(lpar_uuid, 0) if vnc_port in _VNC_LOCAL_PORT_TO_REPEATER: _VNC_LOCAL_PORT_TO_REPEATER[vnc_port].stop() def open_localhost_vnc_vterm(adapter, lpar_uuid, force=False): """Opens a VNC vTerm to a given LPAR. Always binds to localhost. :param adapter: The adapter to drive the PowerVM API :param lpar_uuid: Partition UUID. :param force: (Optional, Default: False) If set to true will force the console to be opened as VNC even if it is already opened via some other means. :return: The VNC Port that the terminal is running on. """ # This API can only run if local. if not adapter.traits.local_api: raise pvm_exc.ConsoleNotLocal() lpar_id = _get_lpar_id(adapter, lpar_uuid) def _run_mkvterm_cmd(lpar_uuid, force): cmd = ['mkvterm', '--id', str(lpar_id), '--vnc', '--local'] ret_code, std_out, std_err = _run_proc(cmd) # If the vterm was already started, the mkvterm command will always # return an error message with a return code of 3. However, there # are 2 scenarios here, one where it was started with the VNC option # previously, which we will get a valid port number back (which is # the good path scenario), and one where it was started out-of-band # where we will get no port. If it is the out-of-band scenario and # they asked us to force the connection, then we will attempt to # terminate the old vterm session so we can start up one with VNC. if force and ret_code == 3 and not _parse_vnc_port(std_out): LOG.warning(_("Invalid output on vterm open. Trying to reset the " "vterm. Error was %s"), std_err) close_vterm(adapter, lpar_uuid) ret_code, std_out, std_err = _run_proc(cmd) # The only error message that is fine is a return code of 3 that a # session is already started, where we got back the port back meaning # that it was started as VNC. Else, raise up the error message. if ret_code != 0 and ret_code != 3: raise pvm_exc.VNCBasedTerminalFailedToOpen(err=std_err) # Parse the VNC Port out of the stdout returned from mkvterm return _parse_vnc_port(std_out) return _run_mkvterm_cmd(lpar_uuid, force) def open_remotable_vnc_vterm( adapter, lpar_uuid, local_ip, remote_ips=None, vnc_path=None, use_x509_auth=False, ca_certs=None, server_cert=None, server_key=None, force=False): """Opens a VNC vTerm to a given LPAR. Wraps in some validation. Must run on the management partition. :param adapter: The adapter to drive the PowerVM API :param lpar_uuid: Partition UUID. :param local_ip: The IP Address to bind the VNC server to. This would be the IP of the management network on the system. :param remote_ips: (Optional, Default: None) A binding to only accept clients that are from a specific list of IP addresses through. Default is None, and therefore will allow any remote IP to connect. :param vnc_path: (Optional, Default: None) If provided, the vnc client must pass in this path (in HTTP format) to connect to the VNC server. The path is in HTTP format. So if the vnc_path is 'Test' the first packet request into the VNC must be: "CONNECT Test HTTP/1.1\r\n\r\n" If the client passes in an invalid request, a 400 Bad Request will be returned. If the client sends in the correct path a 200 OK will be returned. If no vnc_path is specified, then no path is expected to be passed in by the VNC client and it will listen on the same remote port as local port. If the path is specified then it will listen on the on a single remote port of 5901 and determine the LPAR based on this path. :param use_x509_auth: (Optional, Default: False) If enabled, uses X509 Authentication for the VNC sessions started for VMs. :param ca_certs: (Optional, Default: None) Path to CA certificate to use for verifying VNC X509 Authentication. Only used if use_x509_auth is set to True. :param server_cert: (Optional, Default: None) Path to Server certificate to use for verifying VNC X509 Authentication. Only used if use_x509_auth is set to True. :param server_key: (Optional, Default: None) Path to Server private key to use for verifying VNC X509 Authentication. Only used if use_x509_auth is set to True. :param force: (Optional, Default: False) If set to true will force the console to be opened as VNC even if it is already opened via some other means. :return: The VNC Port that the terminal is running on. """ # This API can only run if local. if not adapter.traits.local_api: raise pvm_exc.ConsoleNotLocal() # Open the VNC Port. If already open, it will just return the same port, # so no harm re-opening. The stdout will just print out the existing port. local_port = open_localhost_vnc_vterm(adapter, lpar_uuid, force=force) # If a VNC path is provided then we have a way to map an incoming # connection to a given LPAR and will use the single 5901 port, otherwise # we need to listen for remote connections on the same port as the local # one so we know which VNC session to forward the connection's data to remote_port = _REMOTE_PORT if vnc_path is not None else local_port if local_port: _VNC_UUID_TO_LOCAL_PORT[lpar_uuid] = local_port # We will use a flag to the Socket Listener to tell it whether the # user provided us a VNC Path we should use to look up the UUID from if vnc_path is not None: verify_vnc_path = True _VNC_PATH_TO_UUID[vnc_path] = lpar_uuid else: verify_vnc_path = False # See if we have a VNC repeater already...if so, nothing to do. If not, # start it up. with lock.lock('powervm_vnc_term'): if remote_port not in _VNC_REMOTE_PORT_TO_LISTENER: listener = _VNCSocketListener( adapter, remote_port, local_ip, verify_vnc_path, remote_ips=remote_ips) # If we are doing x509 Authentication, then setup the certificates if use_x509_auth: listener.set_x509_certificates( ca_certs, server_cert, server_key) _VNC_REMOTE_PORT_TO_LISTENER[remote_port] = listener listener.start() return remote_port def _run_proc(cmd): """Simple wrapper to run a process. Will return the return code along with the stdout and stderr. It is the decision of the caller if it wishes to honor or ignore the return code. :return: The return code, stdout and stderr from the command. """ process = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, env=None) process.wait() stdout, stderr = process.communicate() # Convert the stdout/stderr output from a byte-string to a unicode-string # so it doesn't blow up later on anything doing an implicit conversion stdout = encodeutils.safe_decode(stdout) stderr = encodeutils.safe_decode(stderr) return process.returncode, stdout, stderr def _get_lpar_id(adapter, lpar_uuid): lpar_resp = adapter.read(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid, suffix_type='quick', suffix_parm='PartitionID') return lpar_resp.body def _parse_vnc_port(std_out): """Parse the VNC port number out of the standard output from mkvterm. :return: The port number parsed otherwise None if no valid port """ # The first line of the std_out should be the VNC port line = std_out.splitlines()[0] if std_out else None return int(line) if line and line.isdigit() else None class _VNCSocketListener(threading.Thread): """Provides a listener bound to a remote-accessible port for VNC access. The VNC sessions set up by mkvterm only allow access from the localhost, so this listener provides an additional listener on a remote-accessible port to all incoming connections for VNC sessions. This listener may be setup by the caller in a way so that there is only a single remote port for all VNC sessions or that there is one port per VM. This listener will accept incoming connections, establish authentication of the requester (if x509 authentication is enabled), and will determine what LPAR UUID the request is for and establish connections to the local port and setup a repeater to forward the data between the two sides. """ def __init__(self, adapter, remote_port, local_ip, verify_vnc_path, remote_ips=None): """Creates the listener bound to a remote-accessible port. :param adapter: The pypowervm adapter :param remote_port: The port to bind to for remote connections. :param local_ip: The IP address to bind the VNC server to. This would be the IP of the management network on the system. :param verify_vnc_path: Boolean to determine whether we verify the vnc_path. :param remote_ips: (Optional, Default: None) A binding to only accept clients that are from a specific list of IP addresses through. Default is None, and therefore will allow any remote IP to connect. """ super(_VNCSocketListener, self).__init__() self.adapter = adapter self.remote_port = remote_port self.local_ip = local_ip self.verify_vnc_path = verify_vnc_path self.remote_ips = remote_ips self.x509_certs = None self.alive = True self.vnc_killer = None def set_x509_certificates(self, ca_certs=None, server_cert=None, server_key=None): """Set the x509 Certificates to use for TLS authentication. :param ca_certs: (Optional, Default: None) Path to CA certificate to use for verifying VNC X509 Authentication. :param server_cert: (Optional, Default: None) Path to Server cert to use for verifying VNC X509 Authentication. :param server_key: (Optional, Default: None) Path to Server private key to use for verifying VNC X509 Authentication. """ self.x509_certs = dict( ca_certs=ca_certs, server_cert=server_cert, server_key=server_key) def stop(self): """Stops the listener from running.""" # This will stop listening for all clients self.alive = False # Remove ourselves from the VNC listeners. if self.remote_port in _VNC_REMOTE_PORT_TO_LISTENER: del _VNC_REMOTE_PORT_TO_LISTENER[self.remote_port] def run(self): """Used by the thread to run the listener.""" family = socket.AF_INET6 if ':' in self.local_ip else socket.AF_INET server = socket.socket(family, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind((self.local_ip, self.remote_port)) LOG.info(_("VNCSocket Listener Listening on ip=%(ip)s port=%(port)s") % {'ip': self.local_ip, 'port': self.remote_port}) server.listen(10) while self.alive: # Listen on the server socket for incoming connections s_inputs = select.select([server], [], [], 1)[0] for s_input in s_inputs: # Establish a new client connection & repeater between the two self._new_client(s_input) server.close() def _new_client(self, server): """Listens for a new client. :param server: The server socket. """ # This is the socket FROM the client side. client_addr is a tuple # of format ('1.2.3.4', '5678') - ip and port. client_socket, client_addr = server.accept() LOG.debug("New Client socket accepted client_addr=%s" % client_addr[0]) # If only select IPs are allowed through, validate if (self.remote_ips is not None and client_addr[0] not in self.remote_ips): # Close the connection, exit. client_socket.close() return # If they gave use a VNC Path to look for in the connection string # then we will do that now otherwise just skip over the header info if self.verify_vnc_path: # Check to ensure that there is output waiting. c_input = select.select([client_socket], [], [], 1)[0] # If no input, then just assume a close. We waited a second. if not c_input: # Assume HTTP 1.1. All clients should support. We have no # input, so we don't know what protocol they would like. client_socket.sendall("HTTP/1.1 400 Bad Request\r\n\r\n") client_socket.close() return # We know we had data waiting. Receive (at max) the vnc_path # string. All data after this validation string is the # actual VNC data. lpar_uuid, http_code = self._check_http_connect(client_socket) if lpar_uuid: # Send back the success message. client_socket.sendall("HTTP/%s 200 OK\r\n\r\n" % http_code) else: # Was not a success, exit. client_socket.sendall("HTTP/%s 400 Bad Request\r\n\r\n" % http_code) client_socket.close() return # If we had no VNC Path to match against, then the local port is # going to be the same as the remote port and we need to figure # out what the LPAR UUID is for that given local port VNC session else: lpar_uuid = next(k for k, v in _VNC_UUID_TO_LOCAL_PORT.items() if v == self.remote_port) # Setup the forwarding socket to the local LinuxVNC session self._setup_forwarding_socket(lpar_uuid, client_socket) def _setup_forwarding_socket(self, lpar_uuid, client_socket): """Setup the forwarding socket to the local LinuxVNC session. :param lpar_uuid: The UUID of the lpar for which we are forwarding. :param client_socket: The client-side socket to receive data from. """ local_port = _VNC_UUID_TO_LOCAL_PORT.get(lpar_uuid) # If for some reason no mapping to a local port, then give up if local_port is None: client_socket.close() # Get the forwarder. This will be the socket we read FROM the # localhost. When this receives data, it will be sent to the client # socket. fwd = socket.socket(socket.AF_INET, socket.SOCK_STREAM) fwd.connect(('127.0.0.1', local_port)) # If we were told to enable VeNCrypt using X509 Authentication, do so if self.x509_certs is not None: ssl_socket = self._enable_x509_authentication(client_socket, fwd) # If there was an error enabling SSL, then close the sockets if ssl_socket is None: client_socket.close() fwd.close() return client_socket = ssl_socket # See if we need to start up a new repeater for the given local port if local_port not in _VNC_LOCAL_PORT_TO_REPEATER: _VNC_LOCAL_PORT_TO_REPEATER[local_port] = _VNCRepeaterServer( self.adapter, lpar_uuid, local_port, client_socket, fwd) _VNC_LOCAL_PORT_TO_REPEATER[local_port].start() else: repeater = _VNC_LOCAL_PORT_TO_REPEATER[local_port] repeater.add_socket_connection_pair(client_socket, fwd) def _enable_x509_authentication(self, client_socket, server_socket): """Enables and Handshakes VeNCrypt using X509 Authentication. :param client_socket: The client-side socket to receive data from. :param server_socket: The server-side socket to forward data to. :return ssl_socket: A client-side socket wrappered for SSL or None if there is an error. """ try: # First perform the RFB Version negotiation between client/server self._version_negotiation(client_socket, server_socket) # Next perform the Security Authentication Type Negotiation if not self._auth_type_negotiation(client_socket): return None # Next perform the Security Authentication SubType Negotiation if not self._auth_subtype_negotiation(client_socket): return None # Now that the VeNCrypt handshake is done, do the SSL wrapper ca_certs = self.x509_certs.get('ca_certs') server_key = self.x509_certs.get('server_key') server_cert = self.x509_certs.get('server_cert') return ssl.wrap_socket( client_socket, server_side=True, ca_certs=ca_certs, certfile=server_cert, keyfile=server_key, ssl_version=ssl.PROTOCOL_TLSv1_2, cert_reqs=ssl.CERT_REQUIRED) # If we got an error, log and handle to not take down the thread except Exception as exc: LOG.warning(_("Error negotiating SSL for VNC Repeater: %s") % exc) LOG.exception(exc) return None def _version_negotiation(self, client_socket, server_socket): """Performs the RFB Version negotiation between client/server. :param client_socket: The client-side socket to receive data from. :param server_socket: The server-side socket to forward data to. """ # Do a pass-thru of the RFB Version negotiation up-front # The length of the version is 12, such as 'RFB 003.007\n' client_socket.sendall(self._socket_receive(server_socket, 12)) server_socket.sendall(self._socket_receive(client_socket, 12)) # Since we are doing our own additional authentication # just tell the server we are doing No Authentication (1) to it auth_size = self._socket_receive(server_socket, 1) self._socket_receive(server_socket, six.byte2int(auth_size)) server_socket.sendall(six.int2byte(1)) def _auth_type_negotiation(self, client_socket): """Performs the VeNCrypt Authentication Type Negotiation. :param client_socket: The client-side socket to receive data from. :return success: Boolean whether the handshake was successful. """ # Do the VeNCrypt handshake next before establishing SSL # Say we only support VeNCrypt (19) authentication version 0.2 client_socket.sendall(six.int2byte(1)) client_socket.sendall(six.int2byte(19)) client_socket.sendall("\x00\x02") authtype = self._socket_receive(client_socket, 1) # Make sure the Client supports the VeNCrypt (19) authentication if len(authtype) < 1 or six.byte2int(authtype) != 19: # Send a 1 telling the client the type wasn't accepted client_socket.sendall(six.int2byte(1)) return False vers = self._socket_receive(client_socket, 2) # Make sure the Client supports at least version 0.2 of it if ((len(vers) < 2 or six.byte2int(vers) != 0 or six.byte2int(vers[1:]) < 2)): # Send a 1 telling the client the type wasn't accepted client_socket.sendall(six.int2byte(1)) return False # Tell the Client we have accepted the authentication type # In this particular case 0 means the type was accepted client_socket.sendall(six.int2byte(0)) return True def _auth_subtype_negotiation(self, client_socket): """Performs the x509None Authentication Sub-Type Negotiation. :param client_socket: The client-side socket to receive data from. :return success: Boolean whether the handshake was successful. """ # Tell the client the authentication sub-type is x509None (260) client_socket.sendall(six.int2byte(1)) client_socket.sendall(struct.pack('!I', 260)) subtyp_raw = self._socket_receive(client_socket, 4) # Make sure that the client also supports sub-type x509None (260) if 260 not in struct.unpack('!I', subtyp_raw): # Send a 0 telling the client the sub-type wasn't accepted client_socket.sendall(six.int2byte(0)) return False # Tell the Client we have accepted the authentication handshake # In this particular case 1 means the sub-type was accepted client_socket.sendall(six.int2byte(1)) return True def _socket_receive(self, asocket, bufsize): """Helper method to add a timeout on each receive call. This method will raise a timeout exception if it takes > 30 seconds. :param asocket: The socket to do the receive on. :param bufsize: The number of bytes to receive. :return data: The data returned from the socket receive. """ # Add a 30 second timeout around the receive so that we don't # block forever if for some reason it never received the packet if not select.select([asocket], [], [], 30)[0]: raise socket.timeout('30 second timeout on handshake receive') return asocket.recv(bufsize) def _check_http_connect(self, client_socket): """Parse the HTTP connect string to find the LPAR UUID. :param client_socket: The client socket sending the data. :returns lpar_uuid: The LPAR UUID parsed from the connect string. :returns http_code: The HTTP Connection code used for the client connection. """ # Get the expected header. # We don't know how large the identifier will be, so use 500 as max. # If the identifier is less than 500, it will not return as many bytes. header_len = len("CONNECT %s HTTP/1.1\r\n\r\n" % ('x' * 500)) value = client_socket.recv(header_len) # Find the HTTP Code (if you can...) pat = r'^CONNECT\s+(\S+)\s+HTTP/(.*)\r\n\r\n$' res = re.match(pat, value) vnc_path = res.groups()[0] if res else None http_code = res.groups()[1] if res else '1.1' return _VNC_PATH_TO_UUID.get(vnc_path), http_code class _VNCRepeaterServer(threading.Thread): """Repeats a VNC connection from localhost to a given client. This class is separated out from the Socket Listener so that there can be one thread doing the actual repeating/forwarded of the data for the VNC sessions for a single LPAR. Otherwise if there are sessions to a lot of LPAR's with sessions, one overall thread might get overloaded. This class will be provided a pair of peer socket connections and will listen for data from each of them and forward to the other until the connection on one side goes down in which it will close the connection to the other side. Also, if no connections are open for a given local port VNC session, after a 5 minute window it will run rmvterm to close the terminal console to clean up sessions that are no longer being used. """ def __init__(self, adapter, lpar_uuid, local_port, client_socket=None, local_socket=None): """Creates the repeater. :param adapter: The pypowervm adapter :param lpar_uuid: Partition UUID. :param local_port: The local port bound to by the VNC session. :param client_socket: (Optional, Default: None) The socket descriptor of the incoming client connection. :param local_socket: (Optional, Default: None) The socket descriptor of the VNC session connection forwarding data to. """ super(_VNCRepeaterServer, self).__init__() self.peers = dict() self.adapter = adapter self.lpar_uuid = lpar_uuid self.local_port = local_port self.alive = True self.vnc_killer = None # Add the connection passed into us to the forwarding list if client_socket is not None and local_socket is not None: self.add_socket_connection_pair(client_socket, local_socket) def stop(self): """Stops the repeater from running.""" # This will stop listening for all clients self.alive = False # Remove ourselves from the VNC listeners. if self.local_port in _VNC_LOCAL_PORT_TO_REPEATER: del _VNC_LOCAL_PORT_TO_REPEATER[self.local_port] def run(self): """Used by the thread to run the repeater.""" while self.alive: # Do a select to wait for data on each of the socket connections input_list = list(self.peers) s_inputs = select.select(input_list, [], [], 1)[0] for s_input in s_inputs: # At this point, we need to read the data. We know that data # is ready. However, if that data that is ready is length # 0, then we know that we're ready to close this. data = s_input.recv(4096) if len(data) == 0: self._close_client(s_input) # Note that we have to break here. We do that because the # peer dictionary has changed with the close. So the list # to iterate over should be re-evaluated. # The remaining inputs will just be picked up on the next # pass, so nothing to worry about. break # Just process the data. self.peers[s_input].send(data) # At this point, force a close on all remaining inputs. for input_socket in self.peers: input_socket.close() def add_socket_connection_pair(self, client_socket, local_socket): """Adds the pair of socket connections to the list to forward data for. :param client_socket: The client-side incoming socket. :param local_socket: The local socket for the VNC session. """ self.peers[local_socket] = client_socket self.peers[client_socket] = local_socket # If for some reason the VNC was being killed, abort it if self.vnc_killer is not None: self.vnc_killer.abort() self.vnc_killer = None def _close_client(self, s_input): """Closes down a client. :param s_input: The socket that has received a close. """ # Close the sockets peer = self.peers[s_input] peer.close() s_input.close() # And remove from the peer list, so that we've removed all pointers to # them del self.peers[peer] del self.peers[s_input] # If this was the last port, close the local connection if len(self.peers) == 0: self.vnc_killer = _VNCKiller(self.adapter, self.lpar_uuid) self.vnc_killer.start() class _VNCKiller(threading.Thread): """The VNC Killer is a thread that will eventually close the VNC. The VNC Repeater could run indefinitely, whether clients are connected to it or not. This class will wait a period of time (5 minutes) and if the abort has not been called, will fully close the vterm. This is used in orchestration with the VNCRepeaterServer. The intention is, if the user quickly navigates off the VNC, they can come back without losing their whole session. But if they wait up to 5 minutes, then the session will be closed out and the memory will be reclaimed. """ def __init__(self, adapter, lpar_uuid): super(_VNCKiller, self).__init__() self.adapter = adapter self.lpar_uuid = lpar_uuid self._abort = False def abort(self): """Call to stop the killer from completing its job.""" self._abort = True def run(self): count = 0 # Wait up to 5 minutes to see if any new negotiations came in while count < 300 and not self._abort: time.sleep(1) if self._abort: break count += 1 if not self._abort: _close_vterm_local(self.adapter, self.lpar_uuid) pypowervm-1.1.24/pypowervm/tasks/client_storage.py0000664000175000017500000000551213571367171022024 0ustar neoneo00000000000000# Copyright 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pypowervm import util def udid_to_scsi_mapping(vios_w, udid, lpar_id, ignore_orphan=True): """Finds the SCSI mapping (if any) for a given backing storage udid. This is a helper method that will parse through a given VIOS wrapper (retrieved with pypowervm.const.XAG.VIO_SMAP) and will find the client SCSI mapping for a given backing storage element (LU, PV, LV, VOpt). :param vios_w: The Virtual I/O Server wrapper. Should have the Storage and SCSI mapping XAG associated with it. :param udid: The volume's udid. :param lpar_id: The LPARs 'short' id. :param ignore_orphan: (Optional, Default: True) If set to True, any orphan SCSI mappings (those with no client adapter) will be ignored. :return: The first matching SCSI mapping (or None). """ for scsi_map in vios_w.scsi_mappings: # No backing storage, then ignore. if not scsi_map.backing_storage: continue # If there is not a client adapter, it isn't attached fully. if not scsi_map.client_adapter and ignore_orphan: continue # Is it for the right LPAR? (The server adapter is present even if # it's an orphan.) if lpar_id != scsi_map.server_adapter.lpar_id: continue if scsi_map.backing_storage.udid == udid: return scsi_map return None def c_wwpn_to_vfc_mapping(vios_w, c_wwpn): """Finds the vFC mapping (if any) for a given client WWPN. This is a helper method that will parse through a given VIOS wrapper (retrieved with pypowervm.const.XAG.VIO_FMAP) and will find the client vFC mapping for that WWPN. :param vios_w: The Virtual I/O Server wrapper. Should have pypowervm.const.XAG.VIO_FMAP associated with it. :param c_wwpn: One of the client's WWPNs. :return: The vFC mapping (or None) """ wwpn = util.sanitize_wwpn_for_api(c_wwpn) for vfc_map in vios_w.vfc_mappings: # If there is not a client adapter, it isn't properly attached. if not vfc_map.client_adapter: continue if wwpn in vfc_map.client_adapter.wwpns: return vfc_map return None pypowervm-1.1.24/pypowervm/tasks/management_console.py0000664000175000017500000000407313571367171022661 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pypowervm.utils import transaction as trans from pypowervm.wrappers import management_console as mc def get_public_key(adapter): """Get the public key for the management console. :param adapter: The adapter for the pypowervm API. :return: The public key """ # Get the consoles feed and use the first, there is just one. console = mc.ManagementConsole.wrap( adapter.read(mc.ManagementConsole.schema_type))[0] return console.ssh_public_key def add_authorized_key(adapter, public_key): """Add an authorized public key to the management console. The public_key will be added if it doesn't already exist. :param adapter: The adapter for the pypowervm API. :param public_key: The public key to be added. """ console_w = mc.ManagementConsole.wrap( adapter.read(mc.ManagementConsole.schema_type))[0] @trans.entry_transaction def run_update(console): keys = console.ssh_authorized_keys if public_key not in keys: keys = list(keys) keys.append(public_key) console.ssh_authorized_keys = keys console.update() run_update(console_w) def get_authorized_keys(adapter): """Get all authorized keys on the management console. :param adapter: The adapter for the pypowervm API. """ console_w = mc.ManagementConsole.wrap( adapter.read(mc.ManagementConsole.schema_type))[0] return console_w.ssh_authorized_keys pypowervm-1.1.24/pypowervm/tasks/memory.py0000664000175000017500000001230113571367171020324 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from oslo_log import log as logging import pypowervm.const as c from pypowervm.i18n import _ import pypowervm.log as lgc from pypowervm.wrappers import base_partition as pvm_bp from pypowervm.wrappers import job from pypowervm.wrappers import managed_system as pvm_ms LOG = logging.getLogger(__name__) @lgc.logcall def calculate_memory_overhead_on_host(adapter, host_uuid, reserved_mem_data={}, lmb_size=None, default=512): """Calculate host memory overhead. A certain amount of additional memory, such as memory for firmware, is required from the host during the creation of an instance or while changing an instance's memory specifications. This method queries the host to get the reserved PHYP memory needed for an LPAR. The calculation is based off of the instance's max memory requested, network and I/O adapter configurations, and the host's HPT ratio. The job response contains a value for total memory required to create or change the LPAR, which is desired memory plus reserved PHYP memory. :param adapter: pypowervm adapter :param host_uuid: the UUID of the host :param reserve_mem_data: (Optional) dictionary with values for job params {'desired_mem': int, 'max_mem': int, 'lpar_env': 'AIX/Linux' OR 'OS400', 'num_virt_eth_adapters': int, 'num_vscsi_adapters': int, 'num_vfc_adapters': int} :param lmb_size: (Optional) logical memory block size :param default: (Optional) default value to use for required memory overhead value if there was an error with the job :return overhead: reserved host memory :return avail_mem: available host memory """ # If desired memory and maximum memory are not known, this query is # part of calculating host stats, and specific configurations of an # instance is not known. This will use the config option for a default # maximum memory. desired_mem = reserved_mem_data.get('desired_mem', 512) max_mem = reserved_mem_data.get('max_mem', 32768) # If lmb size is given, round max mem up to be a multiple # of lmb size. If max_mem is 0, max_mem will be set to lmb size. if lmb_size is not None: max_mem = int(math.ceil((max_mem or 1) / float(lmb_size)) * int(lmb_size)) lpar_env = reserved_mem_data.get('lpar_env', pvm_bp.LPARType.AIXLINUX) num_virt_eth_adapter = reserved_mem_data.get('num_virt_eth_adapters', 2) num_vscsi_adapter = reserved_mem_data.get('num_vscsi_adapters', 1) num_vfc_adapter = reserved_mem_data.get('num_vfc_adapters', 1) job_wrapper = job.Job.wrap(adapter.read(pvm_ms.System.schema_type, host_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=('QueryReservedMemory' 'RequiredFor' 'Partition'))) # Create job parameters job_parms = [job_wrapper.create_job_parameter( 'LogicalPartitionEnvironment', lpar_env)] job_parms.append(job_wrapper.create_job_parameter( 'DesiredMemory', str(desired_mem))) job_parms.append(job_wrapper.create_job_parameter( 'MaximumMemory', str(max_mem))) job_parms.append(job_wrapper.create_job_parameter( 'NumberOfVirtualEthernetAdapter', str(num_virt_eth_adapter))) job_parms.append(job_wrapper.create_job_parameter( 'NumberOfVirtualSCSIAdapter', str(num_vscsi_adapter))) job_parms.append(job_wrapper.create_job_parameter( 'NumberOfVirtualFibreChannelAdapter', str(num_vfc_adapter))) try: job_wrapper.run_job(host_uuid, job_parms=job_parms, timeout=120) results = job_wrapper.get_job_results_as_dict() except Exception as error: LOG.error(_("Error obtaining host memory overhead for host " "with UUID '%(host)s': %(error)s.") % {'host': host_uuid, 'error': error}) LOG.debug("Defaulting required memory overhead for host with UUID " "'%s' to %d MB" % (host_uuid, default)) return default, None required_mem = results.get('RequiredMemory') avail_mem = results.get('CurrentAvailableSystemMemory') if required_mem is not None: overhead = int(required_mem) - desired_mem else: overhead = default if avail_mem is not None: avail_mem = int(avail_mem) return overhead, avail_mem pypowervm-1.1.24/pypowervm/tasks/hdisk/0000775000175000017500000000000013571367172017550 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tasks/hdisk/_rbd.py0000664000175000017500000000307613571367171021035 0ustar neoneo00000000000000# Copyright 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import pypowervm.const as c from pypowervm.wrappers import job from pypowervm.wrappers.virtual_io_server import VIOS LOG = logging.getLogger(__name__) _JOB_NAME = "RBDExists" def rbd_exists(adapter, vios_uuid, name): """Check if rbd exists on vios :param adapter: pypowervm adapter :param vios_uuid: The uuid of the VIOS (VIOS must be a Novalink VIOS type). :param name: Name of the rbd volume (pool/image) :return: The device name of the created volume. """ resp = adapter.read(VIOS.schema_type, vios_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=(_JOB_NAME)) job_wrapper = job.Job.wrap(resp) # Create job parameters job_parms = [job_wrapper.create_job_parameter('name', name)] job_wrapper.run_job(vios_uuid, job_parms=job_parms, timeout=120) results = job_wrapper.get_job_results_as_dict() return True if results.get('exists') == "true" else False pypowervm-1.1.24/pypowervm/tasks/hdisk/__init__.py0000664000175000017500000000240513571367171021661 0ustar neoneo00000000000000# Copyright 2016, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tasks related to objects that will appear as hard disks to VMs.""" from pypowervm.tasks.hdisk import _fc from pypowervm.tasks.hdisk import _iscsi from pypowervm.tasks.hdisk import _rbd LUAType = _fc.LUAType LUAStatus = _fc.LUAStatus normalize_lun = _fc.normalize_lun ITL = _fc.ITL good_discovery = _fc.good_discovery build_itls = _fc.build_itls discover_hdisk = _fc.discover_hdisk lua_recovery = _fc.lua_recovery remove_hdisk = _fc.remove_hdisk get_pg83_via_job = _fc.get_pg83_via_job discover_iscsi = _iscsi.discover_iscsi discover_iscsi_initiator = _iscsi.discover_iscsi_initiator remove_iscsi = _iscsi.remove_iscsi rbd_exists = _rbd.rbd_exists pypowervm-1.1.24/pypowervm/tasks/hdisk/_iscsi.py0000664000175000017500000003645713571367171021411 0ustar neoneo00000000000000# Copyright 2016, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import six from oslo_log import log as logging from oslo_utils import excutils import pypowervm.const as c from pypowervm import exceptions as pexc from pypowervm.i18n import _ import pypowervm.tasks.storage as tsk_stg import pypowervm.utils.transaction as tx from pypowervm.wrappers import job from pypowervm.wrappers.virtual_io_server import VIOS LOG = logging.getLogger(__name__) _JOB_NAME = "ISCSIDiscovery" _ISCSI_REMOVE = "ISCSIRemove" class ISCSIStatus(object): """ISCSI status codes.""" ISCSI_SUCCESS = '0' ISCSI_ERR = '1' ISCSI_ERR_SESS_NOT_FOUND = '3' ISCSI_ERR_LOGIN = '5' ISCSI_ERR_INVAL = '7' ISCSI_ERR_TRANS_TIMEOUT = '8' ISCSI_ERR_INTERNAL = '9' ISCSI_ERR_LOGOUT = '10' ISCSI_ERR_SESS_EXISTS = '15' ISCSI_ERR_NO_OBJS_FOUND = '21' ISCSI_ERR_HOST_NOT_FOUND = '23' ISCSI_ERR_LOGIN_AUTH_FAILED = '24' ISCSI_ERR_ODM_QUERY = '27' ISCSI_COMMAND_NOT_FOUND = '127' _GOOD_DISCOVERY_STATUSES = [ISCSIStatus.ISCSI_SUCCESS, ISCSIStatus.ISCSI_ERR_SESS_EXISTS] _GOOD_REMOVE_STATUSES = [ISCSIStatus.ISCSI_SUCCESS, ISCSIStatus.ISCSI_ERR_NO_OBJS_FOUND] def _log_iscsi_status(status): """Logs the message based on the status code.""" if status == ISCSIStatus.ISCSI_SUCCESS: LOG.info(_("ISCSI command completed successfully")) elif status == ISCSIStatus.ISCSI_ERR_SESS_EXISTS: LOG.info(_("ISCSI session already exists and logged in")) elif status == ISCSIStatus.ISCSI_COMMAND_NOT_FOUND: LOG.warning(_("ISCSI command performed on unsupported VIOS, " "host.")) elif status == ISCSIStatus.ISCSI_ERR_ODM_QUERY: LOG.warning(_("ISCSI discovery found stale entries in " "the ODM database.")) elif status == ISCSIStatus.ISCSI_ERR_SESS_NOT_FOUND: LOG.warning(_("ISCSI session could not be found ")) elif status == ISCSIStatus.ISCSI_ERR_NO_OBJS_FOUND: LOG.warning(_("No records/targets/sessions/portals " "found to execute operation on")) elif status == ISCSIStatus.ISCSI_ERR_INTERNAL: LOG.error(_("ISCSI command failed with internal error " "status = %s"), status) elif status == ISCSIStatus.ISCSI_ERR: LOG.error(_("ISCSI generic error code")) elif status == ISCSIStatus.ISCSI_ERR_LOGIN: LOG.error(_("ISCSI session login failure")) elif status == ISCSIStatus.ISCSI_ERR_INVAL: LOG.error(_("ISCSI command invalid arguments")) elif status == ISCSIStatus.ISCSI_ERR_TRANS_TIMEOUT: LOG.error(_("ISCSI connection timer exired while trying to connect.")) elif status == ISCSIStatus.ISCSI_ERR_HOST_NOT_FOUND: LOG.error(_("ISCSI command could not lookup host")) else: LOG.error(_('ISCSI command returned unexpected status = %s') % status) def good_discovery(status, device_name): """Checks the hdisk discovery results for a good discovery. Acceptable discovery statuses are _GOOD_DISCOVERY_STATUSES """ return (device_name is not None and status in _GOOD_DISCOVERY_STATUSES) def _find_dev_by_iqn(cmd_output, iqn, host_ip): """Find device name and udid corresponding to an IQN The iqn parameter can be a singular iqn or a list of iqns. If a list of iqns is given, we can return the device name and udid for any of the iqns, since this implies a multipath device which would have the same return values for all iqns. :param cmd_output: A list of "iqn device_name udid" :param host_ip: The portal or list of portals for the iscsi target. A portal looks like ip:port. :param iqn: The IQN (iSCSI Qualified Name) or list of IQNs for the created volume on the target (e.g. iqn.2016-06.world.srv:target00). :return: The device name of the created volume. :return: The UniqueDeviceId of the create volume. """ for dev in cmd_output: try: outiqn, outname, udid = dev.split() if ((isinstance(iqn, six.string_types) and outiqn == iqn) or outiqn in iqn): return outname, udid except ValueError: LOG.warning("Invalid device output: %(dev)s" % {'dev': dev}) continue LOG.error("Expected IQN %(IQN)s not found on iscsi target %(host_ip)s" % {'IQN': iqn, 'host_ip': host_ip}) return None, None def _process_iscsi_result(result, iqn, host_ip): """Process iSCSIDiscovery Job results Checks the job result return status code and return. :param result: ISCSI command job result. :param iqn: The IQN or list of IQNs for the created volume on the target. :host_ip: The portal or list of portals for the iscsi target. :return: status, device_name and udid """ status = result.get('RETURN_CODE') # Ignore if command performed on unsupported AIX VIOS if not status: LOG.warning("ISCSI discovery job failed, no command status returned") return None, None, None if status == ISCSIStatus.ISCSI_COMMAND_NOT_FOUND: LOG.warning(_("ISCSI command performed on unsupported VIOS ")) return None, None, None # DEV_OUTPUT: ["IQN1 dev1 udid", "IQN2 dev2 udid"] output = ast.literal_eval(result.get('DEV_OUTPUT', '[]')) # Find dev corresponding to given IQN dev_name, udid = _find_dev_by_iqn(output, iqn, host_ip) return status, dev_name, udid def _add_parameter(job_parms, name, value): """Adds key/value to job parameter list Checks for null value and does any conversion needed for value to be a string :param job_parms: List of parameters for the job which will be updated by this method. :param key: Parameter name :param value: Parameter value """ if value or value == 0 or value is False: if isinstance(value, (six.string_types, int, bool)): value = six.text_type(value) else: value = '[' + ','.join([str(val) for val in value]) + ']' job_parms.append(job.Job.create_job_parameter(name, value)) def _discover_iscsi(adapter, host_ip, vios_uuid, multipath, **kwargs): """Runs iscsi discovery and login job :param adapter: pypowervm adapter :param host_ip: The portal or list of portals for the iscsi target. A portal looks like ip:port. :param vios_uuid: The uuid of the VIOS (VIOS must be a Novalink VIOS type). :param multipath: Whether the connection is multipath or not. :param kwargs: List of iSCSI authentication parameters. :return: status code of the iSCSIDiscover job :return: The device name of the created volume. :return: The UniqueDeviceId of the create volume. """ resp = adapter.read(VIOS.schema_type, vios_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=(_JOB_NAME)) job_wrapper = job.Job.wrap(resp) # Create job parameters job_parms = [] _add_parameter(job_parms, 'auth', kwargs.get('auth')) _add_parameter(job_parms, 'user', kwargs.get('user')) _add_parameter(job_parms, 'password', kwargs.get('password')) _add_parameter(job_parms, 'ifaceName', (kwargs.get('iface_name') or kwargs.get('transport_type'))) _add_parameter(job_parms, 'targetIQN', kwargs.get('iqn')) _add_parameter(job_parms, 'hostIP', host_ip) _add_parameter(job_parms, 'targetLUN', kwargs.get('lunid')) _add_parameter(job_parms, 'multipath', multipath) if multipath: _add_parameter(job_parms, 'discoveryAuth', kwargs.get('discovery_auth')) _add_parameter(job_parms, 'discoveryUser', kwargs.get('discovery_username')) _add_parameter(job_parms, 'discoveryPassword', kwargs.get('discovery_password')) try: job_wrapper.run_job(vios_uuid, job_parms=job_parms, timeout=120) except pexc.JobRequestFailed: with excutils.save_and_reraise_exception(reraise=False) as exc_ctx: # Process return code if available, else re-raise results = job_wrapper.get_job_results_as_dict() if not results.get('RETURN_CODE', None): LOG.error("iSCSI Discovery Job Failed, no RETURN_CODE.") exc_ctx.reraise = True results = job_wrapper.get_job_results_as_dict() return _process_iscsi_result(results, kwargs.get('iqn'), host_ip) def discover_iscsi(adapter, host_ip, user, password, iqn, vios_uuid, transport_type=None, lunid=None, iface_name=None, auth=None, discovery_auth=None, discovery_username=None, discovery_password=None, multipath=False): """Initiates the iSCSI discovery and login job :param adapter: pypowervm adapter :param host_ip: The portal or list of portals for the iscsi target. A portal looks like ip:port. :param user: The username needed for authentication. :param password: The password needed for authentication. :param iqn: The IQN (iSCSI Qualified Name) or list of IQNs for the created volume on the target (e.g. iqn.2016-06.world.srv:target00). :param vios_uuid: The uuid of the VIOS (VIOS must be a Novalink VIOS type). :param transport_type: (Deprecated) Transport type of the volume to be connected. Use iface_name instead. :param lunid: Target LUN ID or list of LUN IDs for the volume. :param iface_name: Iscsi iface name to use for the connection. :param auth: Authentication type :param discovery_auth: Discovery authentication type. :param discovery_username: The username needed for discovery authentication. :param discovery_password: The password needed for discovery authentication. :param multipath: Whether the connection is multipath or not. :return: The device name of the created volume. :return: The UniqueDeviceId of the create volume. :raise: ISCSIDiscoveryFailed in case of bad return code. :raise: JobRequestFailed in case of failure """ kwargs = { 'user': user, 'password': password, 'iqn': iqn, 'transport_type': transport_type, 'lunid': lunid, 'iface_name': iface_name, 'auth': auth, 'discovery_auth': discovery_auth, 'discovery_username': discovery_username, 'discovery_password': discovery_password } status, devname, udid = _discover_iscsi(adapter, host_ip, vios_uuid, multipath, **kwargs) if status: _log_iscsi_status(status) # If status is ISCSI_ERR_ODM_QUERY, then there are chance of stale iscsi # disks, cleanup and re-discover. if status == ISCSIStatus.ISCSI_ERR_ODM_QUERY: vwrap = VIOS.get(adapter, uuid=vios_uuid, xag=[c.XAG.VIO_SMAP]) # Check for stale lpars with SCSI mappings scrub_ids = tsk_stg.find_stale_lpars(vwrap) if scrub_ids: LOG.info(_("Scrub stale storage for LPAR IDs %s and " "retry iSCSI discovery."), scrub_ids) # Scrub from just the VIOS in question. scrub_task = tx.FeedTask('scrub_vios_%s' % vios_uuid, [vwrap]) tsk_stg.add_lpar_storage_scrub_tasks(scrub_ids, scrub_task) scrub_task.execute() # iSCSI Discover does not autoclean the hdisk, so remove iscsi hdisk. remove_iscsi(adapter, iqn, vios_uuid, iface_name, lunid, host_ip, multipath) # Re-discover the volume status, devname, udid = _discover_iscsi(adapter, host_ip, vios_uuid, multipath, **kwargs) if not good_discovery(status, devname): raise pexc.ISCSIDiscoveryFailed(vios_uuid=vios_uuid, status=status) return devname, udid def discover_iscsi_initiator(adapter, vios_uuid): """Discovers the initiator name. :param adapter: pypowervm adapter :param vios_uuid: The uuid of the VIOS (VIOS must be a Novalink VIOS type). :return: The iscsi initiator name. :raise: ISCSIDiscoveryFailed in case of failure. """ resp = adapter.read(VIOS.schema_type, vios_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=(_JOB_NAME)) job_wrapper = job.Job.wrap(resp) job_wrapper.run_job(vios_uuid, timeout=120) results = job_wrapper.get_job_results_as_dict() # process iscsi return code. status = results.get('RETURN_CODE') if status not in _GOOD_DISCOVERY_STATUSES: raise pexc.ISCSIDiscoveryFailed(vios_uuid=vios_uuid, status=status) # InitiatorName: iqn.2010-10.org.openstack:volume-4a75e9f7-dfa3 return results.get('InitiatorName') def remove_iscsi(adapter, targetIQN, vios_uuid, iface_name=None, lun=None, portal=None, multipath=False): """Remove an iSCSI lun from a session. If the last lun was removed from the session, also logout of the session. The iSCSI volume with the given targetIQN must not have any mappings from the VIOS to a client when this is called. :param adapter: pypowervm adapter :param targetIQN: The IQN (iSCSI Qualified Name) or list of IQNs for the created volume on the target. (e.g. iqn.2016-06.world.srv:target00) :param vios_uuid: The uuid of the VIOS (VIOS must be a Novalink VIOS type). :param iface_name: Name of the iface used for the connection. :param lun: The lun or list of luns to be removed. :param portal: The portal or list of portals associated with the created volume (ip:port). :param multipath: Whether the connection is multipath or not. :raise: ISCSIRemoveFailed in case of bad return code. :raise: JobRequestFailed in case of failure. """ resp = adapter.read(VIOS.schema_type, vios_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=(_ISCSI_REMOVE)) job_wrapper = job.Job.wrap(resp) job_parms = [] _add_parameter(job_parms, 'ifaceName', iface_name) _add_parameter(job_parms, 'targetIQN', targetIQN) _add_parameter(job_parms, 'targetPORTAL', portal) _add_parameter(job_parms, 'targetLUN', lun) _add_parameter(job_parms, 'multipath', multipath) try: job_wrapper.run_job(vios_uuid, job_parms=job_parms, timeout=120) except pexc.JobRequestFailed: results = job_wrapper.get_job_results_as_dict() # Ignore if the command is performed on NotSupported AIX VIOS if results.get('RETURN_CODE') != ISCSIStatus.ISCSI_COMMAND_NOT_FOUND: raise return results = job_wrapper.get_job_results_as_dict() status = results.get('RETURN_CODE') if status not in _GOOD_REMOVE_STATUSES: raise pexc.ISCSIRemoveFailed(vios_uuid=vios_uuid, status=status) pypowervm-1.1.24/pypowervm/tasks/hdisk/_fc.py0000664000175000017500000004643513571367171020664 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tasks around VIOS-backed 'physical' fibre channel disks.""" import itertools from lxml import etree from oslo_log import log as logging from pypowervm import const as c import pypowervm.entities as ent import pypowervm.exceptions as pexc from pypowervm.i18n import _ import pypowervm.tasks.storage as tsk_stg import pypowervm.utils.transaction as tx from pypowervm.wrappers import job as pvm_job from pypowervm.wrappers import virtual_io_server as pvm_vios LOG = logging.getLogger(__name__) _LUA_CMD_VERSION = '3' _LUA_VERSION = '2.0' _LUA_RECOVERY = 'LUARecovery' _RM_HDISK = 'RemoveDevice' _MGT_CONSOLE = 'ManagementConsole' class LUAType(object): """LUA Vendors.""" IBM = "IBM" EMC = "EMC" NETAPP = "NETAPP" HDS = "HDS" HP = "HP" OTHER = "OTHER" class LUAStatus(object): """LUA Recovery status codes.""" DEVICE_IN_USE = '1' ITL_NOT_RELIABLE = '2' DEVICE_AVAILABLE = '3' STORAGE_NOT_INTEREST = '4' LUA_NOT_INTEREST = '5' INCORRECT_ITL = '6' FOUND_DEVICE_UNKNOWN_UDID = '7' FOUND_ITL_ERR = '8' def normalize_lun(scsi_id): """Normalize the lun id to Big Endian :param scsi_id: Volume lun id :return: Converted LUN id in Big Endian as per the RFC 4455 """ # PowerVM keeps LUN identifiers in hex format. lun = '%x' % int(scsi_id) # For drivers which support complex LUA lun-id exceeding more than 2 # bytes in such cases we need to append 8 zeros else 12 zeros to # pass 8 byte lun-id if len(lun) == 8: lun += "00000000" else: lun += "000000000000" return lun class ITL(object): """The Nexus ITL. See SCSI ITL. This is the grouping of the SCSI initiator, target and LUN. """ def __init__(self, initiator, target, lun): """Create the ITL. :param initiator: The initiator WWPN. :param target: The target WWPN. :param lun: The LUN identifier. Ex. 2 (an int). The identifier will be formatted from a generic integer LUN ID to match PowerVM's LUN Identifier format. """ self.initiator = initiator.lower().replace(':', '') self.target = target.lower().replace(':', '') self.lun = normalize_lun(lun) def __eq__(self, other): if other is None or not isinstance(other, ITL): return False return (self.initiator == other.initiator and self.target == other.target and self.lun == other.lun) def __hash__(self): return hash(self.initiator) ^ hash(self.target) ^ hash(self.lun) def __ne__(self, other): return not self.__eq__(other) def good_discovery(status, device_name): """Checks the hdisk discovery results for a good discovery. Acceptable LUA discovery statuses are :- DEVICE_AVAILABLE: hdisk discovered on all the ITL paths and available. DEVICE_IN_USE: hdisk discovered on all the ITL paths and is in-use by the server. FOUND_ITL_ERR: hdisk is discovered on some of the ITL paths and available. This can happen if there are multiple ITL nexus paths are passed, and hdisk is discovered on few of the paths only. This can happen if multiple target wwpns and vios wwpns exists and only few are connected. If hdisk can be discovered on ANY of the paths its considered for good discovery. """ return device_name is not None and status in [ LUAStatus.DEVICE_AVAILABLE, LUAStatus.DEVICE_IN_USE, LUAStatus.FOUND_ITL_ERR] def build_itls(i_wwpns, t_wwpns, lun): """This method builds the list of ITLs for all of the permutations. An ITL is specific to an initiator, target, and LUN. However, with multi pathing, there are several scenarios where a given LUN will have many ITLs because of multiple initiators or targets. The initiators should be tied to a given Virtual I/O Server (or perhaps specific WWPNs within a VIOS). :param i_wwpns: List or set of initiator WWPNs. :param t_wwpns: List or set of target WWPNs. :param lun: The LUN identifier. Ex. 2 (an int). The identifier will be formatted from a generic integer LUN ID to match PowerVM's LUN Identifier format. :return: List of all the ITL permutations. """ return [ITL(i, t, lun) for i, t in itertools.product(i_wwpns, t_wwpns)] def discover_hdisk(adapter, vios_uuid, itls, vendor=LUAType.OTHER, device_id=None): """Attempt to discover a hard disk attached to a Virtual I/O Server. See lua_recovery. This method attempts that call and analyzes the results. On certain failure conditions (see below), this method will find stale LPARs, scrub storage artifacts associated with them, and then retry lua_recovery. The retry is only attempted once; that result is returned regardless. The main objective of this method is to resolve errors resulting from incomplete cleanup of previous LPARs. The stale LPAR's storage mappings can cause hdisk discovery to fail because it thinks the hdisk is already in use. Retry conditions: The scrub-and-retry will be triggered if: o dev_name is None; or o status is anything other than DEVICE_AVAILABLE or FOUND_ITL_ERR. (The latter is acceptable because it means we discovered some, but not all, of the ITLs. This is okay as long as dev_name is set.) :param adapter: The pypowervm adapter. :param vios_uuid: The Virtual I/O Server UUID. :param itls: A list of ITL objects. :param vendor: The vendor for the LUN. See the LUAType.* constants. :param device_id: The device ID parameter in the LUA input XML. Typically the base 64 encoded pg83 value. :return status: The status code from the discover process. See LUAStatus.* constants. :return dev_name: The name of the discovered hdisk. :return udid: The UDID of the device. """ # First attempt status, devname, udid = lua_recovery(adapter, vios_uuid, itls, vendor=vendor, device_id=device_id) # Do we need to scrub and retry? if not good_discovery(status, devname): vwrap = pvm_vios.VIOS.get(adapter, uuid=vios_uuid, xag=(c.XAG.VIO_SMAP, c.XAG.VIO_FMAP)) scrub_ids = tsk_stg.find_stale_lpars(vwrap) if scrub_ids: # Detailed warning message by _log_lua_status LOG.warning(_("hdisk discovery failed; will scrub stale storage " "for LPAR IDs %s and retry."), scrub_ids) # Scrub from just the VIOS in question. scrub_task = tx.FeedTask('scrub_vios_%s' % vios_uuid, [vwrap]) tsk_stg.add_lpar_storage_scrub_tasks(scrub_ids, scrub_task) scrub_task.execute() status, devname, udid = lua_recovery(adapter, vios_uuid, itls, vendor=vendor, device_id=device_id) return status, devname, udid def lua_recovery(adapter, vios_uuid, itls, vendor=LUAType.OTHER, device_id=None): """Logical Unit Address Recovery - discovery of a FC-attached hdisk. When a new disk is created externally (say on a block device), the Virtual I/O Server may or may not discover it immediately. This method forces a discovery on a given Virtual I/O Server. :param adapter: The pypowervm adapter. :param vios_uuid: The Virtual I/O Server UUID. :param itls: A list of ITL objects. :param vendor: The vendor for the LUN. See the LUAType.* constants. :param device_id: The device ID parameter in the LUA input XML. Typically the base 64 encoded pg83 value. :return status: The status code from the discover process. See LUAStatus.* constants. :return dev_name: The name of the discovered hdisk. :return udid: The UDID of the device. """ # Reduce the ITLs to ensure no duplicates itls = set(itls) # Build the LUA recovery XML lua_xml = _lua_recovery_xml(itls, adapter, vendor=vendor, device_id=device_id) # Build up the job & invoke resp = adapter.read( pvm_vios.VIOS.schema_type, root_id=vios_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_LUA_RECOVERY) job_wrapper = pvm_job.Job.wrap(resp) job_parms = [job_wrapper.create_job_parameter('inputXML', lua_xml, cdata=True)] job_wrapper.run_job(vios_uuid, job_parms=job_parms) # Get the job result, and parse the output. job_result = job_wrapper.get_job_results_as_dict() status, devname, udid = _process_lua_result(job_result) return status, devname, udid def _lua_recovery_xml(itls, adapter, vendor=LUAType.OTHER, device_id=None): """Builds the XML that is used as input for the lua_recovery job. The lua_recovery provides a very quick way for the system to discover an hdisk on the system. This method builds the input into the lua_recovery job. :param itls: The list of ITL objects that define the various connections between the server port (initiator), disk port (target) and disk itself. :param vendor: The LUA vendor. See the LUAType.* Constants. :param device_id: The device ID parameter in the LUA input XML. Typically the base 64 encoded pg83 value. :return: The CDATA XML that is used for the lua_recovery job. """ # Used for building the internal XML. root = ent.Element("XML_LIST", adapter, ns='') # The general attributes # TODO(IBM) Need to determine value of making these constants modifiable general = ent.Element("general", adapter, ns='') general.append(ent.Element("cmd_version", adapter, text=_LUA_CMD_VERSION, ns='')) general.append(ent.Element("version", adapter, text=_LUA_VERSION, ns='')) root.append(general) # TODO(IBM) This can be re-evaluated. Set to true if you know for sure # the ITLs are alive. If there are any bad ITLs, this should be false. root.append(ent.Element("reliableITL", adapter, text="false", ns='')) # There is only one device in the device list. device_list = ent.Element("deviceList", adapter, ns='') device = ent.Element("device", adapter, ns='') device.append(ent.Element("vendor", adapter, text=vendor, ns='')) if device_id: device.append(ent.Element("deviceID", adapter, text=device_id, ns='')) device.append(ent.Element("deviceTag", adapter, text="1", ns='')) itl_list = ent.Element("itlList", adapter, ns='') itl_list.append(ent.Element("number", adapter, text="%d" % (len(itls)), ns='')) for itl in itls: itl_elem = ent.Element("itl", adapter, ns='') itl_elem.append(ent.Element("Iwwpn", adapter, text=itl.initiator, ns='')) itl_elem.append(ent.Element("Twwpn", adapter, text=itl.target, ns='')) itl_elem.append(ent.Element("lua", adapter, text=itl.lun, ns='')) itl_list.append(itl_elem) device.append(itl_list) device_list.append(device) root.append(device_list) return root.toxmlstring().decode('utf-8') def _process_lua_result(result): """Processes the Output XML returned by LUARecovery. :return status: The status code from the discover process. See LUAStatus.* constants. :return dev_name: The name of the discovered hdisk. :return udid: The UDID of the device. """ if result is None: return None, None, None # The result may push to StdOut or to OutputXML (different versions push # to different locations). xml_resp = result.get('OutputXML') if xml_resp is None: xml_resp = result.get('StdOut') # If still none, nothing to do. if xml_resp is None: return None, None, None # The response is an XML block. Put into an XML structure and get # the data out of it. root = etree.fromstring(xml_resp) base = 'deviceList/device/' estatus, edev_name, eudid, emessage = ( root.find(base + x) for x in ('status', 'pvName', 'udid', 'msg/msgText')) status, dev_name, udid, message = ( y.text if y is not None else None for y in (estatus, edev_name, eudid, emessage)) _log_lua_status(status, dev_name, message) return status, dev_name, udid def _log_lua_status(status, dev_name, message): """Logs any issues with the LUA.""" if status == LUAStatus.DEVICE_AVAILABLE: LOG.info(_("LUA Recovery Successful. Device Found: %s"), dev_name) elif status == LUAStatus.FOUND_ITL_ERR: # Message is already set. LOG.warning(_("ITL Error encountered: %s"), message) elif status == LUAStatus.DEVICE_IN_USE: LOG.warning(_("%s Device is currently in use."), dev_name) elif status == LUAStatus.FOUND_DEVICE_UNKNOWN_UDID: LOG.warning(_("%s Device discovered with unknown UDID."), dev_name) elif status == LUAStatus.INCORRECT_ITL: LOG.warning(_("Failed to Discover the Device : %s"), dev_name) def remove_hdisk(adapter, host_name, dev_name, vios_uuid): """Command to remove the device from the VIOS. :param adapter: The pypowervm adapter. :param host_name: The name of the host. :param dev_name: The name of the device to remove. :param vios_uuid: The Virtual I/O Server UUID. """ if adapter.traits.rmdev_job_available: _remove_hdisk_job(adapter, dev_name, vios_uuid) else: _remove_hdisk_classic(adapter, host_name, dev_name, vios_uuid) def _remove_hdisk_job(adapter, dev_name, vios_uuid): """Runs the PowerVM Job to remove a hdisk. :param adapter: The pypowervm adapter. :param dev_name: The name of the device to remove. :param vios_uuid: The Virtual I/O Server UUID. """ # Build up the job & invoke resp = adapter.read( pvm_vios.VIOS.schema_type, root_id=vios_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_RM_HDISK) job_wrapper = pvm_job.Job.wrap(resp) job_parms = [job_wrapper.create_job_parameter('devName', dev_name)] # Run the job. If the hdisk removal failed, the job will raise an # exception. No output otherwise. job_wrapper.run_job(vios_uuid, job_parms=job_parms) def _remove_hdisk_classic(adapter, host_name, dev_name, vios_uuid): """Command to remove the device from the VIOS. Runs a remote command to perform the action. :param adapter: The pypowervm adapter. :param host_name: The name of the host. :param dev_name: The name of the device to remove. :param vios_uuid: The Virtual I/O Server UUID. """ try: # Execute a read on the vios to get the vios name resp = adapter.read(pvm_vios.VIOS.schema_type, root_id=vios_uuid) vios_w = pvm_vios.VIOS.wrap(resp) # build command rm_cmd = ('viosvrcmd -m ' + host_name + ' -p ' + vios_w.name + ' -c \"rmdev -dev ' + dev_name + '\"') LOG.debug('RMDEV Command Input: %s' % rm_cmd) # Get the response for the CLIRunner command resp = adapter.read(_MGT_CONSOLE, None, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm='CLIRunner') # Create the job parameters job_wrapper = pvm_job.Job.wrap(resp) ack_parm = 'acknowledgeThisAPIMayGoAwayInTheFuture' job_parms = [job_wrapper.create_job_parameter('cmd', rm_cmd), job_wrapper.create_job_parameter(ack_parm, 'true')] job_wrapper.run_job(None, job_parms=job_parms) return job_wrapper.job_status() except pexc.JobRequestFailed as error: LOG.warning(_('CLIRunner Error: %s') % error) def get_pg83_via_job(adapter, vios_uuid, udid): """Inventory call to fetch the encoded SCSI Page 0x83 descriptor for a PV. :param adapter: The pypowervm adapter through which to run the Job. :param vios_uuid: The UUID of the Virtual I/O Server owning the PV. :param udid: The UDID of the PV to query. :return: SCSI PG83 NAA descriptor, base64-encoded. May be None. """ # TODO(efried): Remove this method once VIOS supports pg83 in Events # Build the hdisk inventory input XML lua_xml = ('' % udid) # Build up the job & invoke job_wrapper = pvm_job.Job.wrap(adapter.read( pvm_vios.VIOS.schema_type, root_id=vios_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_LUA_RECOVERY)) job_wrapper.run_job(vios_uuid, job_parms=[ job_wrapper.create_job_parameter('inputXML', lua_xml, cdata=True)]) # Get the job result, and parse the output. result = job_wrapper.get_job_results_as_dict() # The result may push to StdOut or to OutputXML (different versions push # to different locations). if not result or not any((k in result for k in ('OutputXML', 'StdOut'))): LOG.warning(_('QUERY_INVENTORY LUARecovery Job succeeded, but result ' 'contained neither OutputXML nor StdOut.')) return None xml_resp = result.get('OutputXML', result.get('StdOut')) LOG.debug('QUERY_INVENTORY result: %s' % xml_resp) return _parse_pg83_xml(xml_resp) def _parse_pg83_xml(xml_resp): """Parse LUARecovery XML response, looking for pg83 descriptor. :param xml_resp: Tuple containing OutputXML and StdOut results of the LUARecovery Job :return: pg83 descriptor text, or None if not found. """ # QUERY_INVENTORY response may contain more than one element. Each will be # delimited by its own tag. etree will only parse one at a time. for chunk in xml_resp.split(''): if not chunk: continue try: parsed = etree.fromstring(chunk) except etree.XMLSyntaxError as e: LOG.warning(_('QUERY_INVENTORY produced invalid chunk of XML ' '(%(chunk)s). Error: %(err)s'), {'chunk': chunk, 'err': e.args[0]}) continue for elem in parsed.getiterator(): if (etree.QName(elem.tag).localname == 'PhysicalVolume_base' and elem.attrib.get('desType') == "NAA"): return elem.attrib.get('descriptor') LOG.warning(_('Failed to find pg83 descriptor in XML output:\n%s'), xml_resp) return None pypowervm-1.1.24/pypowervm/tasks/power_opts.py0000664000175000017500000003603113571367171021223 0ustar neoneo00000000000000# Copyright 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Helper classes for PowerOn/PowerOff options (additional Job parameters).""" import abc from oslo_log import log as logging import six import pypowervm.exceptions as exc import pypowervm.wrappers.base_partition as bp from pypowervm.wrappers import job import pypowervm.wrappers.logical_partition as lpar LOG = logging.getLogger(__name__) IPLSrc = lpar.IPLSrc class BootMode(object): """Valid values for the 'bootmode' parameter in power_on. Not to be confused with pypowervm.wrappers.base_partition.BootMode. Example usage: power_on(..., add_parms={BootMode.KEY: BootMode.SMS, ...}) """ KEY = 'bootmode' NORM = 'norm' SMS = 'sms' DD = 'dd' DS = 'ds' OF = 'of' PBL = 'pbl' ALL_VALUES = (NORM, SMS, DD, DS, OF, PBL) class KeylockPos(object): """Valid values for the 'keylock' parameter in power_on. Not to be confused with pypowervm.wrappers.base_partition.KeylockPos. Example usage: power_on(..., add_parms={KeylockPos.KEY: KeylockPos.MANUAL, ...}) """ KEY = 'keylock' MANUAL = 'manual' NORMAL = 'norm' UNKNOWN = 'unknown' ALL_VALUES = (MANUAL, NORMAL, UNKNOWN) class RemoveOptical(object): """Valid values for the 'remove_optical_*' parameters in power_on. This is primarily used to remove the config drive after install. KEY_NAME is required and maps to a VirtualOpticalMedia name to remove. KEY_TIME is optional and maps to the time, in minutes, to wait before deleting the media. Example usage: power_on(..., add_parms={RemoveOptical.KEY_TIME: , RemoveOptical.KEY_NAME: }, ...) """ KEY_NAME = 'remove_optical_name' KEY_TIME = 'remove_optical_time' @classmethod def bld_map(cls, name, time=0): return {cls.KEY_NAME: name, cls.KEY_TIME: time} class IBMiOperationType(object): """Valid values for the IBMi operation type in power_on.""" KEY = 'OperationType' ACTIVATE = 'activate' NETBOOT = 'netboot' CHANGE_KEYLOCK = 'changeKeylock' ALL_VALUES = (ACTIVATE, NETBOOT, CHANGE_KEYLOCK) class PowerOffOperation(object): """Valid values for the operation in power_off.""" KEY = 'operation' VSP = 'shutdown' OS = 'osshutdown' DUMPRESTART = 'dumprestart' ALL_VALUES = (VSP, OS, DUMPRESTART) class Force(object): """Enumeration indicating the strategy for forcing power-off.""" # The force-immediate option is included on the first pass. TRUE = True # The force-immediate option is not included on the first pass; but if the # power-off fails, it is retried with the force-immediate option included. # This value is False for backward compatibility. ON_FAILURE = False # The force-immediate option is not included. If the power-off fails, it # is not retried. NO_RETRY = 'no retry' @six.add_metaclass(abc.ABCMeta) class _PowerOpts(object): # Specify a set of string keys that are legal Job parameters for the # operation. Illegal keys found in legacy_add_parms will be dropped with a # warning. # Leaving as None will skip validation and send all legacy_add_parms to the # Job. valid_param_keys = None def __init__(self, legacy_add_parms=None): """Initialize a PowerOpts instance. :param legacy_add_parms: For legacy use only, initialize the internal parameter map from the specified dictionary of Job parameter name/value pairs. """ self._parm_map = {} if self.valid_param_keys is None: self._parm_map.update(legacy_add_parms or {}) else: for key in legacy_add_parms or {}: if key in self.valid_param_keys: self._parm_map[key] = legacy_add_parms[key] else: LOG.warning("Ignoring unknown Job parameter %s specified " "via legacy add_parms.", key) def __str__(self): """String representation of this instance, for log/test purposes.""" parms = ', '.join(["%s=%s" % (key, self._parm_map[key]) for key in sorted(self._parm_map)]) return "%s(%s)" % (self.JOB_SUFFIX, parms) def _process_enum(self, enum, value): if value not in enum.ALL_VALUES: raise exc.InvalidEnumValue(enum=enum.__name__, value=value, valid_values=str(enum.ALL_VALUES)) self._parm_map[enum.KEY] = value return self def _process_bool(self, key, value): """Process a boolean option. All boolean options are false by default. Thus, if value is 'true'/i or True, the key is added with the value 'true'; otherwise it is *removed* from the _PowerOpt. :param key: The JobParameterName. :param value: A bool (True/False) or string ('true', 'false', case-insensitive). Default: True. """ if key in self._parm_map: del self._parm_map[key] if str(value).lower() == 'true': self._parm_map[key] = 'true' return self def is_param_set(self, key): """Detect whether a parameter is set. For some parameters, the absence of the key assumes a default behavior. For example, is_immediate == False could mean the 'immediate' key is entirely absent; or that it is present with a value of 'false'. This method allows the consumer to distinguish between these two scenarios, typically for the purpose of deciding whether to enact some default behavior. :param key: The key of the parameter in question. :return: True if any value is set for the supplied key; False if that key is absent from the parameter list. """ return key in self._parm_map def bld_jparms(self): return [job.Job.create_job_parameter(key, str(val)) for key, val in six.iteritems(self._parm_map)] class PowerOnOpts(_PowerOpts): """Job parameters for pypowervm.tasks.power.power_on/PowerOp.start.""" JOB_SUFFIX = 'PowerOn' def bootmode(self, value): """Set the boot mode. :param value: One of the BootMode enum values. :return self: For chaining. """ return self._process_enum(BootMode, value) def keylock_pos(self, value): """Set the Keylock Position. :param value: One of the KeylockPos enum values. :return self: For chaining. """ return self._process_enum(KeylockPos, value) def bootstring(self, value): """Set the boot string. :param value: The boot string to use. :return self: For chaining. """ self._parm_map['bootstring'] = value return self def force(self, value=True): """Add the force option. :param value: A bool (True/False) or string ('true', 'false', case-insensitive). Default: True. :return self: For chaining. """ return self._process_bool('force', value) def remove_optical(self, name, time=0): """Add options to remove an optical drive after boot. :param name: The name of a VirtualOpticalMedia name to remove. :param time: The time, in minutes, to wait before deleting the media. :return self: For chaining. """ self._parm_map.update(RemoveOptical.bld_map(name, time=time)) return self def ibmi_ipl_source(self, value): """Set the IBMi IPL Source. :param value: One of the IPLSrc enum values. :return self: For chaining. """ return self._process_enum(IPLSrc, value) def ibmi_op_type(self, value): """Set the IBMi Operation Type. :param value: One of the IBMiOperationType enum values. :return self: For chaining. """ return self._process_enum(IBMiOperationType, value) def ibmi_netboot_params(self, ipaddr, serverip, gateway, serverdir, subnet=None, connspeed=None, duplex=None, mtu=None, vlanid=None): """Set parameters for IBMi netboot. Use with ibmi_op_type(IBMiOperationType.NETBOOT). :param ipaddr: IP (v4 or v6) address of the client VM. :param serverip: IP (v4 or v6) address of the netboot server. :param gateway: IP (v4 or v6) address of the gateway. :param serverdir: Location of the netboot image on the server. :param subnet: Subnet mask. IPv4 only. :param connspeed: Connection speed. :param duplex: Duplex mode. :param mtu: Maximum Transmission Unit. :param vlanid: VLAN ID. :return self: For chaining. """ self._parm_map['IPAddress'] = ipaddr self._parm_map['ServerIPAddress'] = serverip self._parm_map['Gateway'] = gateway self._parm_map['IBMiImageServerDirectory'] = serverdir # Optional args for key, val in (('SubnetMask', subnet), ('ConnectionSpeed', connspeed), ('DuplexMode', duplex), ('VLANID', vlanid), ('MaximumTransmissionUnit', mtu)): if val is not None: # connspeed/vlanid/mtu may arrive as ints self._parm_map[key] = str(val) return self class PowerOffOpts(_PowerOpts): """Job parameters for pypowervm.tasks.power.power_off/PowerOp.stop. Use *one* of os_normal, os_immediate, vsp_normal, vsp_hard, or soft_detect. Optionally specify restart. """ JOB_SUFFIX = 'PowerOff' valid_param_keys = {'operation', 'immediate', 'restart'} def immediate(self, value=True): """Whether to include immediate=true. This corresponds to "hard" for VSP, "immediate" for OS. This should only be used with operation(DUMPRESTART). Otherwise, one of the os_normal, os_immediate, vsp_normal, vsp_hard, or soft_detect methods should be used. :param value: A bool (True/False) or string ('true', 'false', case-insensitive). :return self: For chaining. """ return self._process_bool('immediate', value) @property def is_immediate(self): return self._parm_map.get('immediate') == 'true' def operation(self, value): """The PowerOff operation to perform. This should only be used for DUMPRESTART. Otherwise, one of the os_normal, os_immediate, vsp_normal, vsp_hard, or soft_detect methods should be used. :param value: One of the PowerOffOperation enum values. :return self: For chaining. """ return self._process_enum(PowerOffOperation, value) @staticmethod def can_os_shutdown(part): """Can the specified partition perform an OS shutdown? :param part: LPAR/VIOS wrapper indicating the partition to inspect. :return: True if the specified partition is capable of OS shutdown; False otherwise. """ # OS shutdown is always available on IBMi partitions. # OS shutdown is available if RMC is up. return (part.env == bp.LPARType.OS400) or (part.rmc_state == bp.RMCState.ACTIVE) @property def is_os(self): return self._parm_map.get('operation') == PowerOffOperation.OS def restart(self, value=True): """Whether to restart the partition after power-off. :param value: A bool (True/False) or string ('true', 'false', case-insensitive). Default: True. :return self: For chaining. """ return self._process_bool('restart', value) @property def is_restart(self): return self._parm_map.get('restart') == 'true' def os_normal(self): """Set up normal OS shutdown. Sends the 'shutdown' command to the operating system. :return self: For chaining. """ return self.operation(PowerOffOperation.OS).immediate(value=False) def os_immediate(self): """Set up immediate OS shutdown. Sends the 'shutdown -t now' command to the operating system. :return self: For chaining. """ return self.operation(PowerOffOperation.OS).immediate() def vsp_normal(self): """Set up normal VSP shutdown. The Virtual Service Processor sends the equivalent of an EPOW event to the operating system. The result is OS-dependent. :return self: For chaining. """ return self.operation(PowerOffOperation.VSP).immediate(value=False) def vsp_hard(self): """Set up hard VSP shutdown. Akin to pulling the plug from the partition. Processors are stopped immediately, and any pending I/O is lost. May result in data corruption. :return self: For chaining. """ return self.operation(PowerOffOperation.VSP).immediate() def soft_detect(self, part, immed_if_os=None): """Determine the appropriate soft shutdown operation for a partition. For IBMi partitions, this will always set up an OS shutdown. For non-IBMi partitions with active RMC, this will set up an OS shutdown. For non-IBMi partitions without RMC, this will set up a *normal* VSP shutdown. :param part: LPAR or VIOS wrapper indicating the partition being shut down. :param immed_if_os: If this method determines that an OS shutdown is to be performed, this parameter indicates whether that shutdown should be immediate (True) or not (False). The default is False for IBMi partitions, and True for non-IBMi partitions. This parameter is ignored if a VSP shutdown is detected. :return self: For chaining. """ if self.can_os_shutdown(part): self.operation(PowerOffOperation.OS) # Specific 'immediate' behavior requested for OS shutdown? if immed_if_os is not None: self.immediate(value=immed_if_os) else: # Default is normal for IBMi, immediate for non-IBMi. self.immediate(value=part.env != bp.LPARType.OS400) else: # OS shutdown not available; perform *normal* VSP shutdown. self.vsp_normal() return self pypowervm-1.1.24/pypowervm/tasks/monitor/0000775000175000017500000000000013571367172020135 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tasks/monitor/util.py0000664000175000017500000005072013571367171021467 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities to query and parse the metrics data.""" import abc import datetime from oslo_concurrency import lockutils from oslo_log import log as logging import six from pypowervm import adapter as pvm_adpt from pypowervm.i18n import _ from pypowervm.tasks.monitor import lpar as lpar_mon from pypowervm.wrappers import managed_system as pvm_ms from pypowervm.wrappers import monitor as pvm_mon from pypowervm.wrappers.pcm import lpar as lpar_pcm from pypowervm.wrappers.pcm import phyp as phyp_mon from pypowervm.wrappers.pcm import vios as vios_mon LOG = logging.getLogger(__name__) RAW_METRICS = 'RawMetrics' @six.add_metaclass(abc.ABCMeta) class MetricCache(object): """Provides a cache of the metrics data. The core LongTermMetrics API only refreshes its internal metric data once (generally) every 30 seconds. This class provides a generalized cache of the metrics. It stores both the raw phyp and vios metrics (if available) and will only refresh them after a specified time period has elapsed (30 seconds by default). """ def __init__(self, adapter, host_uuid, refresh_delta=30, include_vio=True): """Creates an instance of the cache. :param adapter: The pypowervm Adapter. :param host_uuid: The UUID of the host CEC to maintain a metrics cache for. :param refresh_delta: (Optional) The interval in seconds at which the metrics should be updated. Will only update if the interval has been passed and the user invokes a cache query. Will not update in the background, only if the cache is used. :param include_vio: (Optional) Defaults to True. If set to False, the cur_vioses and prev_vioses will always be unavailable. This increases the speed for refresh. """ # Ensure that the metric monitoring is enabled. ensure_ltm_monitors(adapter, host_uuid) # Save the data self.adapter = adapter self.host_uuid = host_uuid self.refresh_delta = datetime.timedelta(seconds=refresh_delta) self.include_vio = include_vio self.is_first_pass = False # Ensure these elements are defined up front. self.cur_date, self.cur_phyp, self.cur_vioses, self.cur_lpars = ( None, None, None, None) self.prev_date, self.prev_phyp, self.prev_vioses, self.prev_lpars = ( None, None, None, None) # Run a refresh up front. self._refresh_if_needed() def _refresh_if_needed(self): """Refreshes the cache if needed.""" # The refresh is needed if the current date is none, or if the refresh # time delta has been crossed. refresh_needed = self.cur_date is None # This is put into an if block so that we don't run the logic if # cur_date is in fact None... if not refresh_needed: diff_date = datetime.datetime.now() - self.cur_date refresh_needed = diff_date > self.refresh_delta # At this point, if a refresh isn't needed, then exit. if not refresh_needed: return self._set_prev() self.cur_date, self.cur_phyp, self.cur_vioses, self.cur_lpars = ( latest_stats(self.adapter, self.host_uuid, include_vio=self.include_vio)) # Have the class that is implementing the cache update its simplified # representation of the data. Ex. LparMetricCache self._update_internal_metric() def _set_prev(self): # On first boot, the cur data will be None. Query to seed it with the # second latest data (which may also still be none if LTM was just # turned on, but just in case). self.is_first_pass = self.cur_date is None if self.is_first_pass: p_date, p_phyp, p_vioses, p_lpars = ( latest_stats(self.adapter, self.host_uuid, include_vio=self.include_vio, second_latest=True)) self.prev_date, self.prev_phyp = p_date, p_phyp self.prev_vioses, self.prev_lpars = p_vioses, p_lpars else: self.prev_date, self.prev_phyp = self.cur_date, self.cur_phyp, self.prev_vioses, self.prev_lpars = self.cur_vioses, self.cur_lpars def _update_internal_metric(self): """Save the raw metric to the transformed values. Implemented by the child class. Should transform the phyp and vios data into the format required by the implementor. """ raise NotImplementedError() class LparMetricCache(MetricCache): """Provides a cache of metrics on a per LPAR level. Metrics are expensive to gather and to parse. It is expensive because the backing API gathers all of the metrics at the Hypervisor and Virtual I/O Server levels. This returns all of the LPARs. Therefore, this cache parses in all of the data once, and allows the invoker to get individual LPAR metrics without having to re-query the API server. This class provides a caching mechanism along with a built in refresh mechanism if enough time has passed since last gathering the metrics. This cache will obtain the metrics for a given system, separate them out into an individual LparMetric cache. If another LPAR is required, the cache will be used (so a subsequent API call is not required). There is a refresh_interval as well. If the interval is passed, a subsequent query of the metrics will force a refresh of the cache. The previous metric is also saved within the cache. This is useful for generating rates on the metrics (a previous element to compare against). The cache will only contain the last two samples of hypervisor/vios data. This is so that the current sample and the previous sample are maintained. The data is maintained for all of the systems that metrics data has data for - but this is still quite thin. This cache does not have support to maintain additional samples. Trimming is done upon each refresh (which is triggered by the get_latest_metric). To wipe the cache, the user should just have the cache go out of scope and it will be cleared. No manual clean up is required. """ def __init__(self, adapter, host_uuid, refresh_delta=30, include_vio=True): """Creates an instance of the cache. :param adapter: The pypowervm Adapter. :param host_uuid: The UUID of the host CEC to maintain a metrics cache for. :param refresh_delta: (Optional) The interval at which the metrics should be updated. Will only update if the interval has been passed and the user invokes a cache query. Will not update in the background, only if the cache is used. :param include_vio: (Optional) Defaults to True. If set to False, the cur_vioses and prev_vioses will always be unavailable. This increases the speed for refresh. """ # Ensure these elements are defined up front so that references don't # error out if they haven't been set yet. These will be the results # from the vm_metrics method. self.cur_metric, self.prev_metric = None, None # Invoke the parent to seed the metrics. super(LparMetricCache, self).__init__(adapter, host_uuid, refresh_delta=refresh_delta, include_vio=include_vio) @lockutils.synchronized('pvm_lpar_metrics_get') def get_latest_metric(self, lpar_uuid): """Returns the latest metrics for a given LPAR. This will pull from the cache, but will refresh the cache if the refresh interval has passed. :param lpar_uuid: The UUID of the LPAR to query for the metrics. :return: Two elements. - First is the date of the metric. - Second is the LparMetric Note that both of these can be None. If the date of the metric is None, that indicates that there was no previous metric (or something is wrong with the gather flow). If the date of the metric is None, then the second value will be None as well. If the date of the metric is set, but None is returned for the value then the LPAR had no metrics for it. Scenarios can occur where the current metric may have a value but not the previous (ex. when a LPAR was just created). """ # Refresh if needed. Will no-op if no refresh is required. self._refresh_if_needed() # No metric, no operation. if self.cur_metric is None: return self.cur_date, None return self.cur_date, self.cur_metric.get(lpar_uuid) @lockutils.synchronized('pvm_lpar_metrics_get') def get_previous_metric(self, lpar_uuid): """Returns the previous metric for a given LPAR. This will NOT update the cache. That can only be triggered from the get_latest_metric method. :param lpar_uuid: The UUID of the LPAR to query for the metrics. :return: Two elements. - First is the date of the metric. - Second is the LparMetric Note that both of these can be None. If the date of the metric is None, that indicates that there was no previous metric (or something is wrong with the gather flow). If the date of the metric is None, then the second value will be None as well. If the date of the metric is set, but None is returned for the value then the LPAR had no metrics for it. Scenarios can occur where the current metric may have a value but not the previous (ex. when a LPAR was just created). """ # No metric, no operation. if self.prev_metric is None: return self.prev_date, None return self.prev_date, self.prev_metric.get(lpar_uuid) def _update_internal_metric(self): if self.is_first_pass: self.prev_metric = vm_metrics(self.prev_phyp, self.prev_vioses, self.prev_lpars) else: self.prev_metric = self.cur_metric self.cur_metric = vm_metrics(self.cur_phyp, self.cur_vioses, self.cur_lpars) def latest_stats(adapter, host_uuid, include_vio=True, second_latest=False): """Returns the latest PHYP and (optionally) VIOS statistics. :param adapter: The pypowervm adapter. :param host_uuid: The host system's UUID. :param include_vio: (Optional) Defaults to True. If set to false, the VIO metrics will always be returned as an empty list. :param second_latest: (Optional) Defaults to False. If set to True, it will pull the second to last metric for the return data. :return: datetime - When the metrics were pulled. :return: phyp_data - The PhypInfo object for the raw metrics. May be None if there are issues gathering the metrics. :return: vios_datas - The list of ViosInfo objects. May be empty if the metrics are unavailable or if the include_vio flag is False. Is a list as the system may have many Virtual I/O Servers. :return: lpar_metrics - The list of Lpar metrics received from querying IBM.Host Resource Manager via RMC. It may be empty is the metrics are unavailable or if the include_lpars flag is False. lpar_metrics are generally collected once every two minutes, as opposed to the other data which is collected every 30 seconds. """ ltm_metrics = query_ltm_feed(adapter, host_uuid) latest_phyp = _get_metric(ltm_metrics, 'phyp', second_latest=second_latest) # If there is no current metric, return None. if latest_phyp is None: return datetime.datetime.now(), None, None, None phyp_json = adapter.read_by_href(latest_phyp.link, xag=[]).body phyp_metric = phyp_mon.PhypInfo(phyp_json) # Now find the corresponding VIOS metrics for this. vios_ltms = [] for metric in ltm_metrics: # The VIOS metrics start with the key 'vios_' if not metric.category.startswith('vios_'): continue if metric.updated_datetime == latest_phyp.updated_datetime: vios_ltms.append(metric) if include_vio: vios_metrics = [vios_mon.ViosInfo(adapter.read_by_href(x.link).body) for x in vios_ltms] else: vios_metrics = [] # Now find the corresponding LPAR metrics for this. lpar_metrics = get_lpar_metrics(ltm_metrics, adapter, second_latest=second_latest) # Get the latest date, but if we're getting the second latest we know # it is 30 seconds old. The 30 seconds is the cadence that the REST API # collects the metric data. ret_date = datetime.datetime.now() if second_latest: ret_date = ret_date - datetime.timedelta(seconds=30) return ret_date, phyp_metric, vios_metrics, lpar_metrics def get_lpar_metrics(ltm_metrics, adapter, second_latest=False): """This method returns LPAR metrics of type LparInfo :param ltm_metrics: The LTM metrics :param adapter: The pypowervm adapter. :param second_latest: (Optional) Defaults to False. If set to True, it will pull the second to last metric for the return data. :return: LparInfo object representing the LPAR metrics. None is returned if there are no LTM metrics collected. """ latest_lpar = _get_metric(ltm_metrics, 'lpar', second_latest=second_latest) # If there is no current metric, return None for lpar metrics. lpar_metrics = None if latest_lpar is not None: lpar_json = adapter.read_by_href(latest_lpar.link, xag=[]).body lpar_metrics = lpar_pcm.LparInfo(lpar_json) return lpar_metrics def _get_metric(metrics, metric_type, second_latest=False): filtered = [met for met in metrics if met.category == metric_type] metrics = sorted(filtered, key=lambda met: met.updated_datetime, reverse=True) if second_latest: return metrics[1] if len(metrics) > 1 else None else: return metrics[0] if len(metrics) > 0 else None def query_ltm_feed(adapter, host_uuid): """Will query the long term metrics feed for a given host. This method is useful due to the difference in nature of the pcm URIs compared to the standard uom. PCM URI: ManagedSystem/host_uuid/RawMetrics/LongTermMonitor :param adapter: The pypowervm adapter. :param host_uuid: The host system's UUID. :return: A list of the LTMMetrics. Note that both PHYP and VIOS entries are returned (assuming both are enabled). """ path = pvm_adpt.Adapter.build_path( pvm_mon.PCM_SERVICE, pvm_ms.System.schema_type, root_id=host_uuid, child_type=RAW_METRICS, child_id=pvm_mon.LONG_TERM_MONITOR, xag=[]) resp = adapter.read_by_path(path) return pvm_mon.LTMMetrics.wrap(resp) def ensure_ltm_monitors(adapter, host_uuid, override_to_default=False, compute_ltm=False): """Ensures that the Long Term Monitors are enabled. :param adapter: The pypowervm adapter. :param host_uuid: The host systems UUID. :param override_to_default: (Optional) If True will ensure that the defaults are set on the system. This means: - Short Term Metrics - disabled - Aggregation - turned on If left off, the previous values will be adhered to. :param compute_ltm: (Optional - Defaults to False) If set, will turn on only the compute long term metrics, and the VIOS and network metrics will not be considered. """ # Read from the feed. PCM preferences appear to be odd. If you don't # query the feed or update the feed directly, it will fail. This means # you can't even query the element or update it direct. href = adapter.build_href(pvm_ms.System.schema_type, root_id=host_uuid, child_type=pvm_mon.PREFERENCES, service=pvm_mon.PCM_SERVICE) resp = adapter.read_by_href(href) # Wrap it to our wrapper. There is only one element in the feed. pref = pvm_mon.PcmPref.wrap(resp)[0] pref.compute_ltm_enabled = compute_ltm pref.ltm_enabled = True if override_to_default: pref.stm_enabled = False pref.aggregation_enabled = True # This updates the backing entry. This is part of the jankiness. We have # to use the element from the preference, but then the etag from the feed. adapter.update(pref.entry.element, resp.etag, pvm_ms.System.schema_type, root_id=host_uuid, child_type=pvm_mon.PREFERENCES, service=pvm_mon.PCM_SERVICE) def vm_metrics(phyp, vioses, lpars): """Reduces the metrics to a per VM basis. The metrics returned by PCM are on a global level. The anchor points are PHYP and the Virtual I/O Servers. Typical consumption models for metrics are on a 'per-VM' basis. The dictionary returned contains the LPAR UUID and a LparMetric object. That object breaks down the PHYP and VIOS statistics to be approached on a LPAR level. :param phyp: The PhypInfo for the metrics. :param vioses: A list of the ViosInfos for the Virtual I/O Server components. :param lpars: The LparInfo object representing Lpar metrics collected via RMC. :return vm_data: A dictionary where the UUID is the client LPAR UUID, but the data is a LparMetric for that VM. Note: Data can not be guaranteed. It may exist in one sample, but then not in another (ex. VM was powered off between gathers). Always validate that data is 'not None' before use. """ # If the metrics just started, there may not be data yet. Log this, but # return no data if phyp is None: LOG.warning(_("Metric data is not available. This may be due to " "the metrics being recently initialized.")) return {} vm_data = {} for lpar_sample in phyp.sample.lpars: lpar_metric = lpar_mon.LparMetric(lpar_sample.uuid) # Fill in the Processor data. lpar_metric.processor = lpar_mon.LparProc(lpar_sample.processor) # Fill in the Memory data. memory_metric = lpars.find(lpar_sample.uuid) if lpars else None lpar_metric.memory = lpar_mon.LparMemory( lpar_sample.memory, memory_metric) # All partitions require processor and memory. They may not have # storage (ex. network boot) or they may not have network. Therefore # these metrics can not be guaranteed like the others. # Fill in the Network data. if lpar_sample.network is None: lpar_metric.network = None else: lpar_metric.network = lpar_mon.LparNetwork(lpar_sample.network) # Fill in the Storage metrics if lpar_sample.storage is None: lpar_metric.storage = None else: lpar_metric.storage = lpar_mon.LparStorage(lpar_sample.storage, vioses) vm_data[lpar_metric.uuid] = lpar_metric return vm_data pypowervm-1.1.24/pypowervm/tasks/monitor/host_cpu.py0000664000175000017500000002540113571367171022334 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from pypowervm.tasks.monitor import util as pcm_util LOG = logging.getLogger(__name__) class HostCPUMetricCache(pcm_util.MetricCache): """Collects the PowerVM CPU metrics. PowerVM only gathers the CPU statistics once every 30 seconds. It does this to reduce overhead. There is a function to gather statistics quicker, but that can be very expensive. Therefore, to ensure that the client's workload is not impacted, these 'longer term' metrics will be used. This class builds off of a base pypowervm function where it can obtain the samples through a PCM 'cache'. If a new sample is available, the cache pulls the sample. If it is not, the existing sample is used. This can result in multiple, quickly successive calls to the host stats returning the same data (because a new sample may not be available yet). The class analyzes the data and keeps running counts of total, user, and firmware cycles. """ def __init__(self, adapter, host_uuid): """Creates an instance of the HostCPUMetricCache. :param adapter: The pypowervm Adapter. :param host_uuid: The UUID of the host CEC to maintain a metrics cache for. """ # Running counts for total, firmware, and user cycles self.total_cycles = 0 self.total_fw_cycles = 0 self.total_user_cycles = 0 self.cpu_freq = self._get_cpu_freq() # Invoke the parent to seed the metrics. Don't include VIO - will # result in quicker calls. super(HostCPUMetricCache, self).__init__(adapter, host_uuid, include_vio=False) def refresh(self): """Updates the host-level CPU metrics if needed.""" self._refresh_if_needed() def _update_internal_metric(self): """Updates cycle totals using the latest stats from the cache. This method is invoked by the parent class after the raw metrics are updated. """ # If there is no 'new' data (perhaps sampling is not turned on) then # return no data. if self.cur_phyp is None: return # Compute the cycles spent in FW since last collection. fw_cycles_delta = self._get_fw_cycles_delta() # Compute the cycles the system spent since last run. tot_cycles_delta = self._get_total_cycles_delta() # Get the user cycles since last run user_cycles_delta = self._gather_user_cycles_delta() # Make sure that the total cycles is higher than the user/fw cycles. # Should not happen, but just in case there is any precision loss from # CPU data back to system. if user_cycles_delta + fw_cycles_delta > tot_cycles_delta: LOG.warning( "Host CPU Metrics determined that the total cycles reported " "was less than the used cycles. This indicates an issue with " "the PCM data. Please investigate the results.\n" "Total Delta Cycles: %(tot_cycles)d\n" "User Delta Cycles: %(user_cycles)d\n" "Firmware Delta Cycles: %(fw_cycles)d", {'tot_cycles': tot_cycles_delta, 'fw_cycles': fw_cycles_delta, 'user_cycles': user_cycles_delta}) tot_cycles_delta = user_cycles_delta + fw_cycles_delta self.total_cycles += tot_cycles_delta self.total_user_cycles += user_cycles_delta self.total_fw_cycles += fw_cycles_delta def _gather_user_cycles_delta(self): """The estimated user cycles of all VMs/VIOSes since last run. The sample data includes information about how much CPU has been used by workloads and the Virtual I/O Servers. There is not one global counter that can be used to obtain the CPU spent cycles. This method will calculate the delta of workload (and I/O Server) cycles between the previous sample and the current sample. There are edge cases for this however. If a VM is deleted or migrated its cycles will no longer be taken into account. The algorithm takes this into account by building on top of the previous sample's user cycles. :return: Estimated cycles spent on workload (including VMs and Virtual I/O Server). This represents the entire server's current 'user' load. """ # Current samples should be guaranteed to be there. vm_cur_samples = self.cur_phyp.sample.lpars vios_cur_samples = self.cur_phyp.sample.vioses # The previous samples may not have been there. vm_prev_samples, vios_prev_samples = None, None if self.prev_phyp is not None: vm_prev_samples = self.prev_phyp.sample.lpars vios_prev_samples = self.prev_phyp.sample.vioses # Gather the delta cycles between the previous and current data sets vm_delta_cycles = self._delta_proc_cycles(vm_cur_samples, vm_prev_samples) vios_delta_cycles = self._delta_proc_cycles(vios_cur_samples, vios_prev_samples) return vm_delta_cycles + vios_delta_cycles @staticmethod def _get_cpu_freq(): # The output will be similar to '4116.000000MHz' on a POWER system. with open('/proc/cpuinfo') as cpuinfo: for line in cpuinfo: if line.startswith('clock'): return int(float(line.split()[-1].rstrip('MHz'))) def _delta_proc_cycles(self, samples, prev_samples): """Sums all the processor delta cycles for a set of VM/VIOS samples. This sum is the difference from the last sample to the current sample. :param samples: A set of PhypVMSample or PhypViosSample samples. :param prev_samples: The set of the previous samples. May be None. :return: The cycles spent on workload across all of the samples. """ # Determine the user cycles spent between the last sample and the # current. user_cycles = 0 for lpar_sample in samples: prev_sample = self._find_prev_sample(lpar_sample, prev_samples) user_cycles += self._delta_user_cycles(lpar_sample, prev_sample) return user_cycles @staticmethod def _delta_user_cycles(cur_sample, prev_sample): """Determines the delta of user cycles from the cur and prev sample. :param cur_sample: The current sample. :param prev_sample: The previous sample. May be None. :return: The difference in cycles between the two samples. If the data only exists in the current sample (indicates a new workload), then all of the cycles from the current sample will be considered the delta. """ # If the previous sample for this VM is None it could be one of two # conditions. It could be a new spawn or a live migration. The cycles # from a live migrate are brought over from the previous host. That # can disorient the calculation because all of a sudden you could get # months of cycles. Since we can not discern between the two # scenarios, we return 0 (effectively throwing the sample out). # The next pass through will have the previous sample and will be # included. if prev_sample is None: return 0 # If the previous sample values are all 0 (happens when VM is just # migrated, phyp creates entry for VM with 0 values), then ignore the # sample. if (prev_sample.processor.util_cap_proc_cycles == prev_sample.processor.util_uncap_proc_cycles == prev_sample.processor.idle_proc_cycles == 0): return 0 # The VM utilization on host is its capped + uncapped - idle cycles. # Donated proc cycles should not be considered as these are # not guaranteed to be getting utilized by any other lpar on the host. prev_amount = (prev_sample.processor.util_cap_proc_cycles + prev_sample.processor.util_uncap_proc_cycles - prev_sample.processor.idle_proc_cycles) cur_amount = (cur_sample.processor.util_cap_proc_cycles + cur_sample.processor.util_uncap_proc_cycles - cur_sample.processor.idle_proc_cycles) return cur_amount - prev_amount @staticmethod def _find_prev_sample(sample, prev_samples): """Finds the previous VM Sample for a given current sample. :param sample: The current sample. :param prev_samples: The previous samples to search through. :return: The previous sample, if it exists. None otherwise. """ # Will occur if there are no previous samples. if prev_samples is None: return None for prev_sample in prev_samples: if prev_sample.id == sample.id and prev_sample.name == sample.name: return prev_sample return None def _get_total_cycles_delta(self): """Returns the 'total cycles' on the system since last sample. :return: The total delta cycles since the last run. """ sample = self.cur_phyp.sample cur_cores = sample.processor.configurable_proc_units cur_cycles_per_core = sample.time_based_cycles if self.prev_phyp: prev_cycles_per_core = self.prev_phyp.sample.time_based_cycles else: prev_cycles_per_core = 0 # Get the delta cycles between the cores. delta_cycles_per_core = cur_cycles_per_core - prev_cycles_per_core # Total cycles since last sample is the 'per cpu' cycles spent # times the number of active cores. return delta_cycles_per_core * cur_cores def _get_fw_cycles_delta(self): """Returns the number of cycles spent on firmware since last sample.""" cur_fw = self.cur_phyp.sample.system_firmware.utilized_proc_cycles prev_fw = (self.prev_phyp.sample.system_firmware.utilized_proc_cycles if self.prev_phyp else 0) return cur_fw - prev_fw pypowervm-1.1.24/pypowervm/tasks/monitor/__init__.py0000664000175000017500000000000013571367171022233 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tasks/monitor/lpar.py0000664000175000017500000004441413571367171021453 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Objects that contain the per LPAR monitor data.""" import abc import six class LparMetric(object): """Represents a set of metrics for a given LPAR. This is a reduction and consolidation of the raw PCM statistics. """ def __init__(self, uuid): """Creates a LPAR Metric. Data will be set by invoker. - uuid - The LPAR's UUID. - memory - The LPAR's memory statistics. - processor - The LPAR's processor statistics. - network - The LparNetwork aggregation of network statistics. - storage - the LparStorage aggregation of storage statistics. If certain attributes are None, that means the statistics could not be pulled. :param uuid: The LPAR UUID """ self.uuid = uuid self.memory = None self.processor = None self.network = None self.storage = None @six.add_metaclass(abc.ABCMeta) class PropertyWrapper(object): """Provides a thin wrapper around the raw metrics class. Sub class should have the _supported_metrics element defined. """ def __init__(self, elem): self.elem = elem def __getattr__(self, attr): if attr not in self._supported_metrics: raise AttributeError() return getattr(self.elem, attr) class LparMemory(object): """Represents the memory for a given LPAR. Requires the following as inputs: - PhypLparMemory raw metric - LparInfo.LparUtil raw metric. These metrics are got from IBM.Host Resource Manager through RMC. The supported metrics are as follows: - logical_mem: The amount of memory on the LPAR. - backed_physical_mem: The amount of backing physical memory used by the LPAR. - pct_real_mem_avbl: Percentage of available memory on VMs. It is only available for newer RSCT packages. This statistic does not count cached memory as in use. - total_pg_count: Page count of swap space for this VM. Page size is 4k. - free_pg_count: Page count of free swap space for this VM. Page size is 4k. - active_pg_count: Page count of total active memory for this VM. Page size is 4k. - real_mem_size_bytes: Total amount of memory assigned to this VM in bytes. - pct_real_mem_free: Percentage of real page frames that are currently available on the VMM (Virtual Memory Manager) free list. VMM manages the allocation of real memory page frames, resolves references to virtual memory pages that are not currently in real memory and manages the reading and writing of pages to disk storage. - vm_pg_in_rate: Represents the rate (in pages per second) that the VMM is reading both persistent and working pages from disk storage. A -1 value indicates that system could not determine this metric. - vm_pg_out_rate: Represents the rate (in pages per second) that the VMM is writing both persistent and working pages to disk storage. A -1 value indicates that system could not determine this metric. - vm_pg_swap_in_rate: Represents the rate (in pages per second) that the VMM is reading working pages from paging-space disk storage. A -1 value indicates that system could not determine this metric. - vm_pg_swap_out_rate: Represents the rate (in pages per second) that the VMM is writing working pages to paging-space disk storage. A -1 value indicates that system could not determine this metric. """ def __init__(self, lpar_mem_phyp, lpar_mem_pcm): self.logical_mem = lpar_mem_phyp.logical_mem self.backed_physical_mem = lpar_mem_phyp.backed_physical_mem # Its possible that for the lpar_sample, the memory metric was not # collected. If the metric is not available, # then assume 0 i.e. all memory is being utilized. if lpar_mem_pcm: self.pct_real_mem_avbl = lpar_mem_pcm.memory.pct_real_mem_avbl self.total_pg_count = lpar_mem_pcm.memory.total_pg_count self.free_pg_count = lpar_mem_pcm.memory.free_pg_count self.active_pg_count = lpar_mem_pcm.memory.active_pg_count self.real_mem_size_bytes = lpar_mem_pcm.memory.real_mem_size_bytes self.pct_real_mem_free = lpar_mem_pcm.memory.pct_real_mem_free self.vm_pg_in_rate = lpar_mem_pcm.memory.vm_pg_in_rate self.vm_pg_out_rate = lpar_mem_pcm.memory.vm_pg_out_rate self.vm_pg_swap_in_rate = lpar_mem_pcm.memory.vm_pg_swap_in_rate self.vm_pg_swap_out_rate = lpar_mem_pcm.memory.vm_pg_swap_out_rate else: self.pct_real_mem_free = 0 self.vm_pg_in_rate = -1 self.vm_pg_out_rate = -1 self.vm_pg_swap_in_rate = -1 self.vm_pg_swap_out_rate = -1 class LparProc(PropertyWrapper): """Represents the CPU statistics for a given LPAR. Requires the PhypLparProc raw metric as input. The supported metrics are as follows: - pool_id: The CPU pool for this LPAR. - mode: The CPU mode. Typically cap or uncap. - virt_procs: The number of virtual processors assigned to the LPAR. - proc_units: The number of proc units assigned to the LPAR. Ex. if virt_procs is 4 and proc_units is .4, then each virtual processor has .1 CPUs. - weight: The CPU weight for uncapped processors. This defines how aggressive this CPU should be when using unused cycles from other LPARs (as compared to other VMs that may also request those unused cycles). - entitled_proc_cycles: The entitled number of processor cycles. - util_cap_proc_cycles: The number of used processor cycles from its capped capacity. - util_uncap_proc_cycles: The number of utilized processor cycles pulled from uncap spare. - idle_proc_cycles: The CPU cycles spent idling. - donated_proc_cycles: The number of CPU cycles donated to other VMs due to no need. - time_wait_dispatch: Time spent waiting for CPU dispatch. - total_instructions: The total instructions executed. - total_inst_exec_time: The time for the instructions to execute. """ _supported_metrics = ('pool_id', 'mode', 'virt_procs', 'proc_units', 'weight', 'entitled_proc_cycles', 'util_cap_proc_cycles', 'util_uncap_proc_cycles', 'idle_proc_cycles', 'donated_proc_cycles', 'time_wait_dispatch', 'total_instructions', 'total_inst_exec_time') class LparStorage(object): """Represents the Storage statistics for a given LPAR. Requires the PhypLparStorage and list of ViosInfo raw metrics as input. Contains the various LPAR storage statistic elements. - virt_adapters - List of LparVirtStorageAdpt on the LPAR - vfc_adpts - List of LparVFCAdpt on the LPAR """ def __init__(self, lpar_phyp_storage, vios_metrics): """Fills the VM storage metrics from the raw PHYP/VIOS metrics. :param lpar_phyp_storage: The raw Phyp Storage object. :param vios_metrics: The list of Virtual I/O Server raw metrics that are paired to the sample from the lpar_phyp metrics. """ # Add the various adapters. self.virt_adpts = [] for vadpt in lpar_phyp_storage.v_stor_adpts: vio_adpt = self._find_vio_vstor_adpt(vadpt, vios_metrics) if vio_adpt is not None: self.virt_adpts.append(LparVirtStorageAdpt(vio_adpt)) self.vfc_adpts = [] for phyp_vfc_adpt in lpar_phyp_storage.v_fc_adpts: vfc_adpt = self._find_vio_vfc_adpt(phyp_vfc_adpt, vios_metrics) if vfc_adpt is not None: self.vfc_adpts.append(LparVFCAdpt(vfc_adpt)) @staticmethod def _find_vio_vstor_adpt(phyp_vadpt, vios_metrics): """Finds the appropriate VIOS virtual storage adapter. For a given PHYP virtual adapter, PHYP only has a little bit of information about it. Which VIOS is hosting it, and the slot. The VIOS metrics actually contain the information for that device. This method will look through all the VIOS samples to find the matching ViosStorageVAdpt for the given PhypStorageVAdpt. If one can not be found, None is returned. :param phyp_vadpt: The PhypStorageVAdpt raw metric. :param vios_metrics: The list of ViosInfos. :return: The corresponding ViosStorageVAdpt from the ViosInfos if one can be found. None otherwise. """ for vios_ltm in vios_metrics: # We need to find the VIOS sample that matches this storage # element. Loop until we find one (if one doesn't exist, then # this will just return None). if vios_ltm.sample.id != phyp_vadpt.vios_id: continue # If we reach here, we found the VIOS. From that sample, see # if we have the appropriate storage. raw_stor = vios_ltm.sample.storage if raw_stor is None or raw_stor.virt_adpts is None: break # See if this virtual adapters has the right data. slot_str = "-C%d" % phyp_vadpt.vios_slot for vadpt in raw_stor.virt_adpts: # We have to match on the location code. We can only match # on the tail end of the slot (we've already validated that # we have the right VIOS, so slot is sufficient). if vadpt.physical_location.endswith(slot_str): return vadpt # If we reached this point, we found the right VIOS, but couldn't # find proper data. Therefore we can just exit the loop. break return None @staticmethod def _find_vio_vfc_adpt(phyp_vfc_adpt, vios_metrics): """Finds the appropriate VIOS virtual FC adapter. For a given PHYP virtual FC adapter, PHYP only has a little bit of information about it. Which VIOS is hosting it, and the WWPNs. The VIOS metrics actually contain the information for that device. This method will look through all the VIOS samples to find the matching ViosFCVirtAdpt for the given PhypVirtualFCAdpt. If one can not be found, None is returned. :param phyp_vadpt: The PhypVirtualFCAdpt raw metric. :param vios_metrics: The list of ViosInfos. :return: The corresponding ViosFCVirtAdpt from the ViosInfos if one can be found. None otherwise. """ for vios_ltm in vios_metrics: # We need to find the VIOS sample that matches this VFC # element. Loop until we find one (if one doesn't exist, then # this will just return None). if vios_ltm.sample.id != phyp_vfc_adpt.vios_id: continue # If we reach here, we found the VIOS. From that sample, see # if we have the appropriate storage. raw_stor = vios_ltm.sample.storage if raw_stor is None or raw_stor.fc_adpts is None: return None # Check the WWPNs. for pfc_adpt in raw_stor.fc_adpts: vfc_adpt = LparStorage._find_vfc(phyp_vfc_adpt, pfc_adpt) if vfc_adpt is not None: return vfc_adpt return None @staticmethod def _find_vfc(phyp_vfc_adpt, vio_pfc_adpt): """Finds the matching VIOS vfc adpt for a given PHYP adapter :param phyp_vfc_adpt: The raw PhypVirtualFCAdpt object. :param vio_pfc_adpt: The raw ViosFCPhysAdpt. :return: The matching ViosFCVirtAdpt contained within the physical VIOS adapter. If one can't be found, None will be returned. """ if vio_pfc_adpt.ports is None: return None for vfc_adpt in vio_pfc_adpt.ports: for wwpn in phyp_vfc_adpt.wwpn_pair: if wwpn == vfc_adpt.wwpn: return vfc_adpt return None @six.add_metaclass(abc.ABCMeta) class LparStorageAdpt(PropertyWrapper): """Base class for storage adapters on a given LPAR. Requires the vios storage adapter raw metric as input. Specific classes are defined by the subclasses. The supported metrics are as follows: - name: The identifier of the adapter. Ex: vhost2. - physical_location: The physical location code of the adapter. - num_reads: The number of read operations done against the adapter. - num_writes: The number of write operations done against the adapter. - read_bytes: The number of bytes read from the adapter. - write_bytes: The number of bytes written to the adapter. - type: The type of the adapter. """ _supported_metrics = ('name', 'physical_location', 'num_reads', 'type', 'num_writes', 'read_bytes', 'write_bytes') class LparVFCAdpt(LparStorageAdpt): """A Virtual Fibre Channel Adapter attached to the LPAR. Requires the ViosFCVirtAdpt raw metric as input. The supported metrics are as follows: - name: The identifier of the adapter. Ex: vhost2. - physical_location: The physical location code of the adapter. - num_reads: The number of read operations done against the adapter. - num_writes: The number of write operations done against the adapter. - read_bytes: The number of bytes read from the adapter. - write_bytes: The number of bytes written to the adapter. - type: The type of the adapter. Will be set to VFC. """ @property def type(self): """Overrides the type property as the raw metric. The VFC Adapter does not natively have a type in the raw metric. This property overrides and circumvents the standard property lookup mechanism. """ return "VFC" class LparPhysAdpt(LparStorageAdpt): """A physical adapter (ex SAS drive) on the LPAR. Requires the ViosStoragePAdpt raw metric as input. The supported metrics are as follows: - name: The identifier of the adapter. Ex: vhost2. - physical_location: The physical location code of the adapter. - num_reads: The number of read operations done against the adapter. - num_writes: The number of write operations done against the adapter. - read_bytes: The number of bytes read from the adapter. - write_bytes: The number of bytes written to the adapter. - type: The type of the adapter. """ pass class LparVirtStorageAdpt(LparStorageAdpt): """A Virutal Storage Adapter (ex. vscsi) attached to the LPAR. Requires the ViosStorageVAdpt raw metric as input. The supported metrics are as follows: - name: The identifier of the adapter. Ex: vhost2. - physical_location: The physical location code of the adapter. - num_reads: The number of read operations done against the adapter. - num_writes: The number of write operations done against the adapter. - read_bytes: The number of bytes read from the adapter. - write_bytes: The number of bytes written to the adapter. - type: The type of the adapter. """ pass class LparNetwork(object): """Represents the Network statistics for a given LPAR. Requires the PhypNetwork raw metric as input. Aggregates the various types of network statistics for a given LPAR. - cnas - List of the Client Network Adapter stats. """ def __init__(self, lpar_sample_net): """Creates the Network Statistics aggregation element. Puts the network information into the lpar_metric.network variable. :param lpar_sample_net: The PHYP raw data sample. """ # Fill in the Client Network Adapter data sources self.cnas = ([] if lpar_sample_net.veas is None else [LparCNA(x) for x in lpar_sample_net.veas]) # TODO(thorst) Additional network metrics. Ex. SR-IOV ports class LparCNA(PropertyWrapper): """Represents a Client Network Adapter on a LPAR. Requires the PhypVEA raw metric as input. The supported metrics are as follows: - vlan_id: The PVID of the Client Network Adapter. - vswitch_id: The virtual switch ID (not UUID). - physical_location: The physical location for the Client Network Adapter. - received_packets: The count of packets received to the Client Network Adapter. - sent_packets: The count of packets sent by the Client Network Adapter. - dropped_packets: The count of the packets dropped by the Client Network Adapter. - sent_bytes: The count of the bytes sent by the Client Network Adapter. - received_bytes: The count of the bytes received by the Client Network Adapter. """ _supported_metrics = ('vlan_id', 'vswitch_id', 'physical_location', 'received_packets', 'sent_packets', 'dropped_packets', 'sent_bytes', 'received_bytes') pypowervm-1.1.24/pypowervm/tasks/sriov.py0000775000175000017500000006176413571367171020202 0ustar neoneo00000000000000# Copyright 2016, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Complex tasks around SR-IOV cards/ports, VFs, and vNICs.""" import copy from oslo_concurrency import lockutils as lock from oslo_log import log as logging import random import six import pypowervm.exceptions as ex from pypowervm.i18n import _ import pypowervm.tasks.partition as tpar import pypowervm.utils.transaction as tx import pypowervm.wrappers.iocard as card import pypowervm.wrappers.managed_system as ms LOG = logging.getLogger(__name__) # Take read_lock on operations that create/delete VFs (including VNIC). This # is a read_lock so we don't serialize all VF creation globally. # Take write_lock on operations that modify properties of physical ports and # rely on knowing the usage counts thereon (e.g. changing port labels). PPORT_MOD_LOCK = lock.ReaderWriterLock() def _validate_capacity(min_capacity, max_capacity): if max_capacity: if max_capacity > 1: raise ValueError('Maximum capacity cannot be greater than ' '100 percent') if max_capacity < min_capacity: raise ValueError('Maximum capacity cannot be less than ' 'min capacity') def set_vnic_back_devs(vnic_w, pports, sys_w=None, vioses=None, redundancy=1, capacity=None, max_capacity=None, check_port_status=False): """Set a vNIC's backing devices over given SRIOV physical ports and VIOSes. Assign the backing devices to a iocard.VNIC wrapper using an anti-affinity algorithm. That is, the method attempts to distribute the backing devices across as diverse a range of physical SRIOV adapters and VIOSes as possible, using the least-saturated ports first. For example, given: vios1, vios2 SRIOVAdapter1 PPortA (50% allocated) PPortB (20%) PPortC (45%) SRIOVAdapter2 PPortD (10%) PPortE (2%) PPortF (11%) set_vnic_back_devs(vnic, [PPortA, PPortB, PPortC, PPortD, PPortE, PPortF], [vios1, vios2], redundancy=4) ...we will create backing devices like: [(vios1, PPortE), (vios2, PPortB), (vios1, PPortD), (vios2, PPortC)] As part of the algorithm, we will use sriov_adaps to filter out physical ports which are already saturated. This could err either way due to out-of-band changes: - We may end up excluding a port which has had some capacity freed up since sriov_adaps was retrieved; or - We may attempt to include a port which has become saturated since sriov_adaps was retrieved, resulting in an error from the REST server. This method acts on the vNIC-related capabilities on the system and VIOSes: - If the system is not vNIC capable, the method will fail. - If none of the active VIOSes are vNIC capable, the method will fail. - If redundancy > 1, - the system must be vNIC failover capable, and - at least one active VIOS must be vNIC failover capable. - If any VIOSes are vNIC failover capable, failover-incapable VIOSes will be ignored. :param vnic_w: iocard.VNIC wrapper, as created via VNIC.bld(). If vnic_w.back_devs is nonempty, it is cleared and replaced. This parameter is modified by the method (there is no return value). If this method raises an exception, vnic_w is guaranteed to be unchanged. :param pports: List of physical location code strings (corresponding to the loc_code @property of iocard.SRIOV*PPort) for all SRIOV physical ports to be considered as backing devices for the vNIC. This does not mean that all of these ports will be used. :param sys_w: Pre-fetched pypowervm.wrappers.managed_system.System wrapper. If not specified, it will be fetched from the server. :param vioses: List of VIOS wrappers to consider for distribution of vNIC servers. Not all listed VIOSes will necessarily be used. If not specified, the feed of all active (including RMC) VIOSes will be fetched from the server. If specified, the list will be filtered to include only active (including RMC) VIOSes (according to the wrappers - the server is not re- checked). The list is also filtered to remove VIOSes which are not vNIC capable; and, if min_redundancy > 1, to remove VIOSes which are not vNIC failover capable. :param redundancy: Number of backing devices to assign. If the method can't allocate this many VFs after filtering the pports list, InsufficientSRIOVCapacity will be raised. Note that at most one VF is created on each physical port. :param capacity: (float) Minimum capacity to assign to each backing device. Must be between 0.0 and 1.0, and must be a multiple of the min_granularity of *all* of the pports. (Capacity may be assigned to each individual backing device after the fact to achieve more control; but in that case, the consumer is responsible for validating sufficient available capacity.) :param max_capacity: (float) Maximum capacity to assign to each backing device. Must be greater or equal to capacity and less than 1.0. :param check_port_status: If True, only ports with link-up status will be considered for allocation. If False (the default), link-down ports may be used. :raise NoRunningSharedSriovAdapters: If no SR-IOV adapters in Sriov mode and Running state can be found. :raise NotEnoughActiveVioses: If no active (including RMC) VIOSes can be found. :raise InsufficientSRIOVCapacity: If the method was not able to allocate enough VFs to satisfy the specified redundancy. :raise SystemNotVNICCapable: If the managed system is not vNIC capable. :raise NoVNICCapableVIOSes: If there are no vNIC-capable VIOSes. :raise VNICFailoverNotSupportedSys: If redundancy > 1, and the system is not vNIC failover capable. :raise VNICFailoverNotSupportedVIOS: If redundancy > 1, and there are no vNIC failover-capable VIOSes. """ # Validations for maximum capacity _validate_capacity(capacity, max_capacity) # An Adapter to work with adap = vnic_w.adapter if adap is None: raise ValueError('Developer error: Must build vnic_w with an Adapter.') # Check vNIC capability on the system sys_w = _check_sys_vnic_capabilities(adap, sys_w, redundancy) # Filter SR-IOV adapters sriov_adaps = _get_good_sriovs(sys_w.asio_config.sriov_adapters) # Get VIOSes which are a) active, b) vNIC capable, and c) vNIC failover # capable, if necessary. vioses = _check_and_filter_vioses(adap, vioses, redundancy) # Try not to end up lopsided on one VIOS random.shuffle(vioses) # Get the subset of backing ports corresponding to the specified location # codes which have enough space for new VFs. pport_wraps = _get_good_pport_list(sriov_adaps, pports, capacity, redundancy, check_port_status) # At this point, we've validated enough that we won't raise. Start by # clearing any existing backing devices. vnic_w.back_devs = [] card_use = {} for pport in pport_wraps: said = pport.sriov_adap_id if said not in card_use: card_use[said] = {'num_used': 0, 'ports_left': 0} card_use[said]['ports_left'] += 1 vio_idx = 0 while pport_wraps and len(vnic_w.back_devs) < redundancy: # Always rotate VIOSes vio = vioses[vio_idx] vio_idx = (vio_idx + 1) % len(vioses) # Select the least-saturated port from among the least-used adapters. least_uses = min([cud['num_used'] for cud in card_use.values()]) pp2use = min([pport for pport in pport_wraps if card_use[pport.sriov_adap_id]['num_used'] == least_uses], key=lambda pp: pp.allocated_capacity) said = pp2use.sriov_adap_id # Register a hit on the chosen port's card card_use[said]['num_used'] += 1 # And take off a port card_use[said]['ports_left'] -= 1 # If that was the last port, remove this card from consideration if card_use[said]['ports_left'] == 0: del card_use[said] # Create and add the backing device vnic_w.back_devs.append(card.VNICBackDev.bld( adap, vio.uuid, said, pp2use.port_id, capacity=capacity, max_capacity=max_capacity)) # Remove the port we just used from subsequent consideration. pport_wraps.remove(pp2use) def _check_sys_vnic_capabilities(adap, sys_w, redundancy): """Validate vNIC capabilities on the Managed System. :param adap: pypowervm Adapter. :param sys_w: pypowervm.wrappers.managed_system.System wrapper. If None, it is retrieved from the host. :param redundancy: If greater than 1, this method will verify that the System is vNIC failover-capable. Otherwise, this check is skipped. :return: The System wrapper. :raise SystemNotVNICCapable: If the System is not vNIC capable. :raise VNICFailoverNotSupportedSys: If min_redundancy > 1 and the System is not vNIC failover capable. """ if sys_w is None: sys_w = ms.System.get(adap)[0] if not sys_w.get_capability('vnic_capable'): raise ex.SystemNotVNICCapable() if redundancy > 1 and not sys_w.get_capability('vnic_failover_capable'): raise ex.VNICFailoverNotSupportedSys(red=redundancy) return sys_w def _check_and_filter_vioses(adap, vioses, redundancy): """Return active VIOSes with appropriate vNIC capabilities. Remove all VIOSes which are not active or not vNIC capable. If min_redundancy > 1, failover is required, so remove VIOSes that are not also vNIC failover capable. Error if no VIOSes remain. :param adap: pypowervm Adapter. :param vioses: List of pypowervm.wrappers.virtual_io_server.VIOS to check. If None, all active VIOSes are retrieved from the server. :param redundancy: If greater than 1, the return list will include only vNIC failover-capable VIOSes. Otherwise, if any VIOSes are vNIC failover-capable, non-failover-capable VIOSes are excluded. :return: The filtered list of VIOS wrappers. :raise NotEnoughActiveVioses: If no active (including RMC) VIOSes can be found. :raise NoVNICCapableVIOSes: If none of the vioses are vNIC capable. :raise VNICFailoverNotSupportedVIOS: If redundancy > 1 and none of the vioses is vNIC failover capable. """ # This raises if none are found vioses = tpar.get_active_vioses(adap, xag=[], vios_wraps=vioses, find_min=1) # Filter by vNIC capability vioses = [vios for vios in vioses if vios.vnic_capable] if not vioses: raise ex.NoVNICCapableVIOSes() # Filter by failover capability, if needed. # If any are failover-capable, use just those, regardless of redundancy. failover_only = [vios for vios in vioses if vios.vnic_failover_capable] if redundancy > 1 or any(failover_only): vioses = failover_only # At this point, if the list is empty, it's because no failover capability. if not vioses: raise ex.VNICFailoverNotSupportedVIOS(red=redundancy) return vioses def _get_good_sriovs(sriov_adaps): """(Retrieve and) filter SR-IOV adapters to those Running in Sriov mode. :param sriov_adaps: List of SRIOVAdapter wrappers to filter by mode/state. :return: List of SR-IOV adapters in Running state and in Sriov mode. :raise NoRunningSharedSriovAdapters: If no SR-IOV adapters can be found in Sriov mode and Running state. """ # Filter SRIOV adapters to those in the correct mode/state good_adaps = [sriov for sriov in sriov_adaps if sriov.mode == card.SRIOVAdapterMode.SRIOV and sriov.state == card.SRIOVAdapterState.RUNNING] if not good_adaps: raise ex.NoRunningSharedSriovAdapters( sriov_loc_mode_state='\n'.join([' | '.join([ sriov.phys_loc_code, sriov.mode, sriov.state or '-']) for sriov in sriov_adaps])) LOG.debug('Found running/shared SR-IOV adapter(s): %s', str([sriov.phys_loc_code for sriov in good_adaps])) return good_adaps def _get_good_pport_list(sriov_adaps, pports, capacity, redundancy, check_link_status): """Get a list of SRIOV*PPort filtered by capacity and specified pports. Builds a list of pypowervm.wrappers.iocard.SRIOV*PPort from sriov_adaps such that: - Only ports whose location codes are listed in the pports param are considered. - Only ports with sufficient remaining capacity (per the capacity param, if specified; otherwise the port's min_granularity) are considered. :param sriov_adaps: A list of SRIOVAdapter wrappers whose mode is Sriov and whose state is Running. :param pports: A list of string physical location codes of the physical ports to consider. :param capacity: (float) Minimum capacity which must be available on each backing device. Must be between 0.0 and 1.0, and must be a multiple of the min_granularity of *all* of the pports. If None, available port capacity is validated using each port's min_granularity. :param redundancy: The desired redundancy level (number of ports to return).required. If the filtered list has fewer than this number of ports, InsufficientSRIOVCapacity is raised. :param check_link_status: If True, ports with link-down status will not be returned. If False, link status is not checked. :raise InsufficientSRIOVCapacity: If the final list contains fewer than 'redundancy' ports. :return: A filtered list of SRIOV*PPort wrappers. """ def port_ok(port): pok = True # Is it in the candidate list? if port.loc_code not in pports: pok = False # Is the link state up if check_link_status and not port.link_status: pok = False # Does it have available logical ports? if port.cfg_lps >= port.cfg_max_lps: pok = False # Does it have capacity? des_cap = port.min_granularity if capacity is not None: # Must be at least min_granularity. des_cap = max(des_cap, capacity) if port.allocated_capacity + des_cap > 1.0: pok = False return pok pport_wraps = [] for sriov in sriov_adaps: for pport in sriov.phys_ports: if port_ok(pport): pp2add = copy.deepcopy(pport) pport_wraps.append(pp2add) if len(pport_wraps) < redundancy: raise ex.InsufficientSRIOVCapacity(red=redundancy, found_vfs=len(pport_wraps)) LOG.debug('Filtered list of physical ports: %s' % str([pport.loc_code for pport in pport_wraps])) return pport_wraps def get_lpar_vnics(adapter): """Return a dict mapping LPAR wrappers to their VNIC feeds. :param adapter: The pypowervm.adapter.Adapter for REST API communication. :return: A dict of the form { LPAR: [VNIC, ...] }, where the keys are pypowervm.wrappers.logical_partition.LPAR and the values are lists of the pypowervm.wrappers.iocard.VNIC they own. """ return {lpar: card.VNIC.get(adapter, parent=lpar) for lpar in tpar.get_partitions(adapter, lpars=True, vioses=False)} def _vnics_using_pport(pport, lpar2vnics): """Determine (and warn about) usage of SRIOV physical port by VNICs. Ascertain whether an SRIOV physical port is being used as a backing device for any VNICs. The method returns a list of warning messages for each such usage found. :param pport: pypowervm.wrappers.iocard.SRIOV*PPort wrapper to check. :param lpar2vnics: Dict of {LPAR: [VNIC, ...]} gleaned from get_lpar_vnics :return: A list of warning messages for found usages of the physical port. If no usages were found, the empty list is returned. """ warnings = [] for lpar, vnics in six.iteritems(lpar2vnics): for vnic in vnics: if any([backdev for backdev in vnic.back_devs if backdev.sriov_adap_id == pport.sriov_adap_id and backdev.pport_id == pport.port_id]): warnings.append( _("SR-IOV Physical Port at location %(loc_code)s is " "backing a vNIC belonging to LPAR %(lpar_name)s (LPAR " "UUID: %(lpar_uuid)s; vNIC UUID: %(vnic_uuid)s).") % {'loc_code': pport.loc_code, 'lpar_name': lpar.name, 'lpar_uuid': lpar.uuid, 'vnic_uuid': vnic.uuid}) return warnings def _vet_port_usage(sys_w, label_index): """Look for relabeled ports which are in use by vNICs. :param sys_w: pypowervm.wrappers.managed_system.System wrapper for the host. :param label_index: Dict of { port_loc_code: port_label_before } mapping the physical location code of each physical port to the value of its label before changes were made. :return: A list of translated messages warning of relabeled ports which are in use by vNICs. """ warnings = [] lpar2vnics = None for sriovadap in sys_w.asio_config.sriov_adapters: for pport in sriovadap.phys_ports: # If the port is unused, it's fine if pport.cfg_lps == 0: continue # If the original port label was unset, no harm setting it. if not label_index[pport.loc_code]: continue # If the port label is unchanged, it's fine if pport.label == label_index[pport.loc_code]: continue # Now we have to check all the VNICs on all the LPARs. Lazy-load # this, because it's expensive. if lpar2vnics is None: lpar2vnics = get_lpar_vnics(sys_w.adapter) warnings += _vnics_using_pport(pport, lpar2vnics) return warnings @tx.entry_transaction def safe_update_pports(sys_w, callback_func, force=False): """Retrying entry transaction for safe updates to SR-IOV physical ports. Usage: def changes(sys_w): for sriov in sys_w.asio_config.sriov_adapters: ... sriov.phys_ports[n].pport.label = some_new_label ... update_needed = True ... return update_needed sys_w = safe_update_pports(System.getter(adap), changes, force=maybe) The consumer passes a callback method which makes changes to the labels of the physical ports of the ManagedSystem's SR-IOV adapters. If the callback returns a False value (indicating that no update is necessary), safe_update_pports immediately returns the sys_w. If the callback returns a True value, safe_update_pports first checks whether any of the changed ports are in use by vNICs (see "Why vNICs?" below). If the force option is not True, and any uses were found, this method raises an exception whose text includes details about the found usages. Otherwise, the found usages are logged as warnings. Assuming no exception is raised, safe_update_pports attempts to update the sys_w wrapper with the REST server. (The caller does *not* do the update.) If an etag mismatch is encountered, safe_update_pports refreshes the sys_w wrapper and retries, according to the semantics of entry_transaction. Why vNICs? Care must be taken when changing port labels on the fly because those labels are used by LPM to ensure that the LPAR on the target system gets equivalent connectivity. Direct-attached VFs - either those belonging to VIOSes (e.g. for SEA) or to LPARs - mean the partition is not migratable, so the labels can be changed with impunity. And the only way a VF is migratable is if it belongs to a vNIC on a migratable LPAR. :param sys_w: pypowervm.wrappers.managed_system.System wrapper or getter thereof. :param callback_func: Method executing the actual changes on the sys_w. The method must accept sys_w (a System wrapper) as its only argument. Its return value will be interpreted as a boolean to determine whether to perform the update() (True) or not (False). :param force: If False (the default) and any of the updated physical ports are found to be in use by vNICs, the method will raise. If True, warnings are logged for each such usage, but the method will succeed. :return: The (possibly-updated) sys_w. :raise CantUpdatePPortsInUse: If any of the relabeled physical ports are in use by vNICs *and* the force option is False. """ with PPORT_MOD_LOCK.write_lock(): # Build an index of port:label for comparison after setting label_index = {pport.loc_code: pport.label for sriovadap in sys_w.asio_config.sriov_adapters for pport in sriovadap.phys_ports} # Let caller make the pport changes. if not callback_func(sys_w): # No update needed. # sys_w may be what was passed in, or the result of the getter. return sys_w # If return is True, caller wants us to update(). For each port that # changed, check its usage warnings = _vet_port_usage(sys_w, label_index) if warnings and not force: raise ex.CantUpdatePPortsInUse(warnings=warnings) # We're going to do the update. Log any found usages. if warnings: LOG.warning(_("Making changes to the following SR-IOV physical " "port labels even though they are in use by vNICs:")) for warning in warnings: LOG.warning(warning) return sys_w.update() def find_pports_for_portlabel(portlabel, adapter, msys=None): """Find SR-IOV physical ports based on the port label. :param portlabel: portlabel of the SR-IOV physical ports to find. :param adapter: The pypowervm adapter API interface. :param msys: pypowervm.wrappers.managed_system.System wrapper.If not specified, it will be retrieved from the server. :return: List of SRIOVEthPPort or SRIOVConvPPort wrappers for the specified port label, or the empty list if no such port exists. """ # Physical ports for the given physical network if msys is None: msys = ms.System.get(adapter)[0] pports = [] for sriov in msys.asio_config.sriov_adapters: for pport_w in sriov.phys_ports: if (pport_w.label or 'default') == portlabel: pports.append(pport_w) return pports def find_pport(sys_w, physloc): """Find an SR-IOV physical port based on its location code. :param sys_w: pypowervm.wrappers.managed_system.System wrapper of the host. :param physloc: Physical location code string (per SRIOV*PPort.loc_code) of the SR-IOV physical port to find. :return: SRIOVEthPPort or SRIOVConvPPort wrapper with the specified location code, or None if no such port exists in sys_w. """ for sriov in sys_w.asio_config.sriov_adapters: for pport in sriov.phys_ports: if pport.loc_code == physloc: return pport return None pypowervm-1.1.24/pypowervm/tasks/partition.py0000664000175000017500000004174113571367171021037 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tasks specific to partitions (LPARs and VIOSes).""" from oslo_log import log as logging import time import pypowervm.const as c import pypowervm.exceptions as ex from pypowervm.i18n import _ import pypowervm.util as u import pypowervm.utils.transaction as tx import pypowervm.wrappers.base_partition as bp from pypowervm.wrappers import job import pypowervm.wrappers.logical_partition as lpar import pypowervm.wrappers.virtual_io_server as vios LOG = logging.getLogger(__name__) # RMC must be either active or busy. Busy is allowed because that simply # means that something is running against the VIOS at the moment...but # it should recover shortly. _VALID_RMC_STATES = (bp.RMCState.ACTIVE, bp.RMCState.BUSY) # Only a running state is OK for now. _VALID_VM_STATES = (bp.LPARState.RUNNING,) # Not the opposite of the above _DOWN_VM_STATES = (bp.LPARState.NOT_ACTIVATED, bp.LPARState.ERROR, bp.LPARState.NOT_AVAILBLE, bp.LPARState.SHUTTING_DOWN, bp.LPARState.SUSPENDED, bp.LPARState.SUSPENDING, bp.LPARState.UNKNOWN) _SUFFIX_PARM_CLONE_UUID = 'CloneUUID' _SUFFIX_PARM_ADD_LICENSE = 'AddLicense' _LOW_WAIT_TIME = 120 _HIGH_WAIT_TIME = 600 _UPTIME_CUTOFF = 3600 def get_mgmt_partition(adapter): """Get the LPAR/VIOS wrapper representing the PowerVM management partition. :param adapter: The pypowervm.adapter.Adapter through which to query the REST API. :return: pypowervm.wrappers.logical_partition.LPAR/virtual_io_server.VIOS wrapper representing the management partition. :raise ManagementPartitionNotFoundException: if we don't find exactly one management partition. """ # There will almost always be fewer VIOSes than LPARs. Since we're # querying without xags, it should be very quick. vio_wraps = vios.VIOS.search(adapter, is_mgmt_partition=True) if len(vio_wraps) == 1: return vio_wraps[0] # We delay the query to the LPARs because there could be hundreds of them. # So we don't want to query it unless we need to. lpar_wraps = lpar.LPAR.search(adapter, is_mgmt_partition=True) if len(lpar_wraps) == 1: return lpar_wraps[0] # If we made it here, something is wrong. raise ex.ManagementPartitionNotFoundException( count=len(vio_wraps + lpar_wraps)) def get_this_partition(adapter): """Get the LPAR/VIOS wrapper of the node on which this method is running. :param adapter: The pypowervm.adapter.Adapter through which to query the REST API. :return: pypowervm.wrappers.logical_partition.LPAR/virtual_io_server.VIOS wrapper representing the local partition. :raise LocalPartitionNotFoundException: if we don't find exactly one LPAR/ VIOS with the local VM's short ID. """ myid = u.my_partition_id() # There will almost always be fewer VIOSes than LPARs. Since we're # querying without xags, it should be very quick. vio_wraps = vios.VIOS.search(adapter, id=myid) if len(vio_wraps) == 1: return vio_wraps[0] # We delay the query to the LPARs because there could be hundreds of them. # So we don't want to query it unless we need to. lpar_wraps = lpar.LPAR.search(adapter, id=myid) if len(lpar_wraps) == 1: return lpar_wraps[0] # If we made it here, something is wrong. raise ex.ThisPartitionNotFoundException( count=len(vio_wraps + lpar_wraps), lpar_id=myid) def get_active_vioses(adapter, xag=(), vios_wraps=None, find_min=None): """Returns a list of active Virtual I/O Server Wrappers for a host. Active is defined by powered on and RMC state being 'active'. The VIOSes will be sorted such that if the Mgmt partition is a VIOS, it is the first in the list. :param adapter: The pypowervm adapter for the query. :param xag: (Optional, Default: ()) Iterable of extended attributes to use. :param vios_wraps: (Optional, Default: None) A list of VIOS wrappers. If specified, the method will check for active VIOSes in this list instead of issuing a GET. :param find_min: (Optional, Default: None) If specified, the minimum acceptable number of active VIOSes. If fewer are found, this method raises NotEnoughActiveVioses. :return: List of VIOS wrappers. :raise NotEnoughActiveVioses: If find_min is specified and the number of active VIOSes is less than the specified number. """ if vios_wraps is None: vios_wraps = vios.VIOS.get(adapter, xag=xag) # A VIOS is 'active' if it is powered on and either RMC is active or it # is the mgmt partition. ret = [vio for vio in vios_wraps if vio.state in _VALID_VM_STATES and (vio.rmc_state in _VALID_RMC_STATES or vio.is_mgmt_partition)] ret = sorted(ret, key=lambda x: x.is_mgmt_partition, reverse=True) if find_min is not None and len(ret) < find_min: raise ex.NotEnoughActiveVioses(exp=find_min, act=len(ret)) LOG.debug('Found active VIOS(es): %s', str([vio.name for vio in ret])) return ret def get_partitions(adapter, lpars=True, vioses=True, mgmt=False): """Get a list of partitions. Can include LPARs, VIOSes, and the management partition. :param adapter: The pypowervm adapter. :param lpars: If True, the result will include all LPARs. :param vioses: If True, the result will include all VIOSes. :param mgmt: If True, the result is guaranteed to include the management partition, even if it would not otherwise have been included based on get_lpars/get_vioses. """ rets = [] if vioses: rets.extend(vios.VIOS.get(adapter)) if lpars: rets.extend(lpar.LPAR.get(adapter)) # If they need the mgmt lpar, get it. But ONLY if we didn't get both # VIOSes and LPARs. If we got both of those already, then we are # guaranteed to already have the mgmt lpar in there. if mgmt and not (lpars and vioses): mgmt_w = get_mgmt_partition(adapter) if mgmt_w.uuid not in [x.uuid for x in rets]: rets.append(get_mgmt_partition(adapter)) return rets # A global variable that will cache the physical WWPNs on the system. _vscsi_pfc_wwpns = None def get_physical_wwpns(adapter, force_refresh=True): """Returns the active WWPNs of the FC ports across all VIOSes on system. :param adapter: pypowervm.adapter.Adapter for REST API communication. :param force_refresh: The value discovered by this method is cached. If force_refresh is False, the cached value is returned. If True, the value is refetched from the server (and re-cached). """ global _vscsi_pfc_wwpns # TODO(IBM): Have a REST event posted when adapters power cycle, and force # refresh the cache. if force_refresh or _vscsi_pfc_wwpns is None: vios_feed = vios.VIOS.get(adapter, xag=[c.XAG.VIO_STOR]) _vscsi_pfc_wwpns = [] for vwrap in vios_feed: _vscsi_pfc_wwpns.extend(vwrap.get_active_pfc_wwpns()) return _vscsi_pfc_wwpns def build_active_vio_feed_task(adapter, name='vio_feed_task', xag=( c.XAG.VIO_STOR, c.XAG.VIO_SMAP, c.XAG.VIO_FMAP)): """Builds a FeedTask for all active VIOSes. The transaction FeedTask enables users to collect a set of 'WrapperTasks' against a feed of entities (in this case a set of active VIOSes). The WrapperTask (within the FeedTask) handles lock and retry. This is useful to batch together a set of updates across a feed of elements (and multiple updates within a given wrapper). This allows for significant performance improvements. :param adapter: The pypowervm adapter for the query. :param name: (Optional) The name of the feed manager. Defaults to vio_feed_task. :param xag: (Optional) Iterable of extended attributes to use. If not specified, defaults to all mapping/storage options (as this is most common case for using a transaction manager). :raise NotEnoughActiveVioses: if there is not at least one active VIOS. """ return tx.FeedTask(name, get_active_vioses(adapter, xag=xag, find_min=1)) def _rmc_down(vwrap): """Check if VIOS is in RMC Down state. :param vwrap: VIOS wrapper on which to check if RMC is down """ if vwrap.is_mgmt_partition: return False if (vwrap.rmc_state not in _VALID_RMC_STATES and vwrap.state not in _DOWN_VM_STATES): return True return False def _vios_waits_timed_out(no_rmc_vwraps, time_waited, max_wait_time=None): """Determine whether we've waited long enough for active VIOSes to get RMC. If max_wait_time is None, we will determine a suitable max_wait_time based on how long each VIOS has been booted. Then this method simply returns whether the time_waited exceeds the (specified or generated) max_wait_time for all VIOSes. :param no_rmc_vwraps: List of state-up/RMC-down VIOS wrappers. :param time_waited: The number of seconds the caller has waited thus far. :param max_wait_time: The maximum total number of seconds we should wait before declaring we've waited long enough. If None, a suitable value will be determined based on the VIOS's uptime. :return: True if we've waited long enough for these VIOSes to come up. False if we should wait some more. """ wait_time = max_wait_time if wait_time is None: wait_time = _LOW_WAIT_TIME # if any VIOS is still early in its startup, wait longer to give RMC # time to come up for vwrap in no_rmc_vwraps: if vwrap.uptime <= _UPTIME_CUTOFF: wait_time = _HIGH_WAIT_TIME break return time_waited >= wait_time def _wait_for_vioses(adapter, max_wait_time=None): """Wait for VIOSes to stabilize, and report on their states. :param adapter: The pypowervm adapter for the query. :param max_wait_time: Maximum number of seconds to wait for running VIOSes to get an active RMC connection. If None, we will wait longer if the VIOS booted recently, shorter if it has been up for a while. :return: List of all VIOSes returned by the REST API. :return: List of all VIOSes which are powered on, but with RMC inactive. """ vios_wraps = [] rmc_down_vioses = [] sleep_step = 5 time_waited = 0 while True: try: vios_wraps = vios.VIOS.get(adapter) rmc_down_vioses = [ vwrap for vwrap in vios_wraps if _rmc_down(vwrap)] if not vios_wraps or (not rmc_down_vioses and get_active_vioses( adapter, vios_wraps=vios_wraps)): # If there are truly no VIOSes (which should generally be # impossible if this code is running), we'll fail. # If at least one VIOS is up, and all active VIOSes have RMC, # we'll succeed. break except Exception as e: # Things like "Service Unavailable" LOG.warning(e) # If we get here, we're only waiting for VIOSes that are # state-active/RMC-down. On each iteration, if a new VIOS comes up, it # will be considered here until its RMC comes up. if _vios_waits_timed_out(rmc_down_vioses, time_waited, max_wait_time): break time.sleep(sleep_step) time_waited += sleep_step return vios_wraps, rmc_down_vioses, time_waited def validate_vios_ready(adapter, max_wait_time=None): """Check whether VIOS rmc is up and running on this host. Will query the VIOSes for a period of time attempting to ensure all running VIOSes get an active RMC. If no VIOSes are ready by the timeout, ViosNotAvailable is raised. If only some of the VIOSes had RMC go active by the end of the wait period, the method will complete. :param adapter: The pypowervm adapter for the query. :param max_wait_time: Integer maximum number of seconds to wait for running VIOSes to get an active RMC connection. Defaults to None, in which case the system will determine an appropriate amount of time to wait. This can be influenced by whether or not the VIOS just booted. :raises: A ViosNotAvailable exception if a VIOS is not available by a given timeout. """ # Used to keep track of VIOSes and reduce queries to API vwraps, rmc_down_vioses, waited = _wait_for_vioses(adapter, max_wait_time) if rmc_down_vioses: LOG.warning( _('Timed out waiting for the RMC state of all the powered on ' 'Virtual I/O Servers to be active. Wait time was: %(time)d ' 'seconds. VIOSes that did not go active were: %(vioses)s.'), {'time': waited, 'vioses': ', '.join([vio.name for vio in rmc_down_vioses])}) # If we didn't get a single active VIOS then raise an exception if not get_active_vioses(adapter, vios_wraps=vwraps): raise ex.ViosNotAvailable(wait_time=waited) def has_physical_io(part_w): """Determine whether a partition has any physical I/O adapters attached. This method looks over all of the slots on the partition and returns True if any of the slots has something in it, except for the following: - USB Device - Graphics - Empty Slot All other devices are considered I/O. :param part_w: Wrapper (LPAR or VIOS) of the partition to check. :return: True if any physical I/O adapter was found attached to the partition; False otherwise. """ try: io_slots = part_w.io_config.io_slots except AttributeError: # If the wrapper has no io_slots, it has no physical I/O return False # Doesn't count as physical I/O if description contains any of these non_ios = ("USB", "Universal Serial Bus", "Graphics", "Empty slot", "3D Controller") for io_slot in io_slots: try: # If the description *isn't* one of the non-I/O ones, it's a hit. if not any([non_io in io_slot.description for non_io in non_ios]): return True except AttributeError: # The slot didn't have a description. That shouldn't happen. But # we have to assume it's physical I/O in that case. LOG.warning( _("Assuming description-less slot is physical I/O: %s"), io_slot.toxmlstring()) return True # We got through all the I/O slots without finding a physical I/O adapter return False def clone_uuid(adapter, lpar_uuid, surrogate_lpar_name): """Issue the CloneUUID job. The CloneUUID job deletes the original LPAR and changes the surrogate LPAR's UUID to be the original LPAR's UUID. :param adapter: The pypowervm adapter to issue the job. :param lpar_uuid: Original LPAR's UUID. :param surrogate_lpar_name: Surrogate LPAR's name. """ resp = adapter.read(lpar.LPAR.schema_type, root_id=lpar_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_SUFFIX_PARM_CLONE_UUID) job_wrapper = job.Job.wrap(resp.entry) job_parms = [job_wrapper.create_job_parameter('targetLparName', surrogate_lpar_name)] job_wrapper.run_job(lpar_uuid, job_parms=job_parms) def ibmi_add_license_key(adapter, lpar_uuid, license_key): """Issue the AddLicense job. The AddLicense job submits a license key to IBMi partition. :param adapter: The pypowervm adapter to issue the job. :param lpar_uuid: Original LPAR's UUID. :param license_key: License key for IBMi partition. """ resp = adapter.read(lpar.LPAR.schema_type, root_id=lpar_uuid, suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=_SUFFIX_PARM_ADD_LICENSE) job_wrapper = job.Job.wrap(resp.entry) job_parms = [job_wrapper.create_job_parameter('licKey', license_key)] job_wrapper.run_job(lpar_uuid, job_parms=job_parms) pypowervm-1.1.24/pypowervm/hacking/0000775000175000017500000000000013571367172016725 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/hacking/__init__.py0000664000175000017500000000000013571367171021023 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/hacking/checks.py0000664000175000017500000000154413571367171020542 0ustar neoneo00000000000000# Copyright 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def no_log_warn(logical_line, filename): """Disallow 'LOG.warn(' """ if logical_line.startswith('LOG.warn('): yield(0, 'P301 Use LOG.warning() rather than LOG.warn()') def factory(register): register(no_log_warn) pypowervm-1.1.24/pypowervm/tests/0000775000175000017500000000000013571367172016463 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/locale/0000775000175000017500000000000013571367172017722 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/locale/en_US/0000775000175000017500000000000013571367172020733 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/locale/en_US/LC_MESSAGES/0000775000175000017500000000000013571367172022520 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/locale/en_US/LC_MESSAGES/pypowervm.mo0000664000175000017500000000013713571367171025125 0ustar neoneo00000000000000Þ•$,8GThis is a testThis is an English testpypowervm-1.1.24/pypowervm/tests/locale/en_US/LC_MESSAGES/pypowervm.po0000664000175000017500000000016713571367171025133 0ustar neoneo00000000000000# Generate .mo file via: # msgfmt -o pypowervm.mo pypowervm.po msgid "This is a test" msgstr "This is an English test" pypowervm-1.1.24/pypowervm/tests/test_i18n.py0000664000175000017500000000250213571367171020651 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import os import unittest from pypowervm.i18n import _ class TranslationTests(unittest.TestCase): """Test internationalization library.""" @mock.patch.dict(os.environ, { # Ensure we're using our test message catalog 'PYPOWERVM_LOCALEDIR': os.path.join( os.path.dirname(os.path.realpath(__file__)), 'locale'), # Ensure we're using the expected language 'LANG': 'en_US'}) def test_translate(self): self.assertEqual(_("This is a test"), "This is an English test") self.assertEqual( _("This is a message for which a translation doesn't exist"), "This is a message for which a translation doesn't exist") pypowervm-1.1.24/pypowervm/tests/test_utils/0000775000175000017500000000000013571367172020662 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/test_utils/refresh_httpresp.py0000664000175000017500000000711213571367171024623 0ustar neoneo00000000000000# Copyright 2014, 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Convenience program to call create_httpresp.py -refresh # # Run this standalone and it will list all the .txt files and # prompt you to select which one you want to refresh. # import os import tempfile import six from pypowervm.tests.test_utils import create_httpresp defaultresp_path = os.path.join(tempfile.gettempdir(), 'defaultresp.txt') def get_default_selection(): """Retrieve the last response file updated.""" try: if not os.path.exists(defaultresp_path): return None with open(defaultresp_path, 'r') as file_ptr: default_selection = file_ptr.readline() if default_selection is None: return None return default_selection.strip() except Exception: return None def save_default_selection(default_line): """Save the selection so it can be set as the default next time.""" try: with open(defaultresp_path, 'w') as file_ptr: file_ptr.write(default_line) except Exception as e: print("%s" % e) def get_txt_file(): error_message = None default_selection = get_default_selection() dirname = os.path.dirname(os.path.dirname(__file__)) dirname = os.path.join(dirname, "data") directory_listing = os.listdir(dirname) txtfiles = [name for name in directory_listing if name.endswith('.txt')] txtfiles.sort() while True: if default_selection and default_selection not in txtfiles: default_selection = None # The previous file was not found count = 1 for name in txtfiles: if name == default_selection: fmt = '%d:*\t[%s]' else: fmt = '%d:\t%s' print(fmt % (count, name)) count += 1 print() if error_message: print(error_message) if default_selection: fmt = ('Enter index or name of file to refresh [Enter=%s]--> ' % default_selection) else: fmt = 'Enter index or name of file to refresh--> ' line = six.moves.input(fmt) line = line.strip() if line is None: return None if len(line) == 0 and default_selection: return default_selection print(line) if line in txtfiles: save_default_selection(line) return line # The actual filename was entered try: line_index = int(line) except ValueError: error_message = 'Could not convert %s to an integer' % line continue if line_index < 1 or line_index > len(txtfiles): error_message = 'Index %d is out of range' % line_index continue save_default_selection(txtfiles[line_index - 1]) return txtfiles[line_index - 1] if __name__ == '__main__': txt_file = get_txt_file() print("Selected %s " % txt_file) create_httpresp.main(['-refresh', txt_file]) pypowervm-1.1.24/pypowervm/tests/test_utils/__init__.py0000664000175000017500000000000013571367171022760 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/test_utils/test_wrapper_abc.py0000664000175000017500000000757513571367171024575 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six import testtools import pypowervm.tests.test_fixtures as fx from pypowervm.tests.test_utils import pvmhttp @six.add_metaclass(abc.ABCMeta) class TestWrapper(testtools.TestCase): """Superclass for wrapper test cases; provides loading of data files. A single subclass tests a single wrapper class on a single file. Usage: - Subclass this class. - Provide the name of the data file to load, e.g. file = 'ssp.txt' - Indicate the wrapper class to be tested, e.g. wrapper_class_to_test = clust.SSP - If your tests will make use of traits, you must provide mock_adapter_fx_args, resulting in AdapterFx being constructed with those args and used via useFixture. Your tests may access the adapter via self.adpt and the fixture itself via self.adptfx. - No __init__ or setUp is necessary. - In your test cases, make use of the following variables: - self.resp: The raw Response object from load_pvm_resp().get_response(). May represent an entry or a feed. - self.dwrap: A single instance of the wrapper_class_to_test extracted from self.resp. If self.resp was a feed, this is the first entry. - self.entries: The result of wrap(response) of the wrapper class. May be a single wrapper instance, in which case it's (nearly*) equivalent to self.dwrap, or a list of such wrappers. * Note that wrap(response) injects each entry's etag into the wrapper instance. """ # Load the response file just once _pvmfile = None @abc.abstractproperty def file(self): """Data file name, relative to pypowervm/tests/wrappers/data/.""" return None @abc.abstractproperty def wrapper_class_to_test(self): """Indicates the type (Wrapper subclass) produced by self.dwrap.""" return None # Arguments to test_fixtures.AdapterFx(), used to create a mock adapter. # Must be represented as a dict. For example: # mock_adapter_fx_args = {} # or: # mock_adapter_fx_args = dict(session=mock_session, # traits=test_fixtures.LocalPVMTraits) mock_adapter_fx_args = None def setUp(self): super(TestWrapper, self).setUp() self.adptfx = None self.adpt = None adptfx_args = self.mock_adapter_fx_args or {} self.adptfx = self.useFixture(fx.AdapterFx(**adptfx_args)) self.adpt = self.adptfx.adpt # Load the file just once... if self.__class__._pvmfile is None: self.__class__._pvmfile = pvmhttp.PVMFile(self.file) # ...but reconstruct the PVMResp every time self.resp = pvmhttp.PVMResp(pvmfile=self.__class__._pvmfile, adapter=self.adpt).get_response() # Some wrappers don't support etag. Subclasses testing those wrappers # should not be using self.entries, so ignore. try: self.entries = self.wrapper_class_to_test.wrap(self.resp) except TypeError: pass if self.resp.feed: self.dwrap = self.wrapper_class_to_test.wrap( self.resp.feed.entries[0]) else: self.dwrap = self.wrapper_class_to_test.wrap(self.resp.entry) pypowervm-1.1.24/pypowervm/tests/test_utils/pvmhttp.py0000664000175000017500000001467213571367171022747 0ustar neoneo00000000000000# Copyright 2014, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import os import pypowervm.adapter as adp EOL = "\n" COMMENT = "#" INFO = "INFO{" HEADERS = "HEADERS{" BODY = "BODY{" END_OF_SECTION = "END OF SECTION}" class PVMFile(object): def __init__(self, file_name=None): self.comment = None self.path = None self.reason = None self.status = None self.headers = None self.body = None if file_name is not None: self.load_file(file_name) def load_file(self, file_name): """Load a REST response file.""" # If given a pathed filename, use it dirname = os.path.dirname(file_name) if not dirname: dirname = os.path.dirname(os.path.dirname(__file__)) file_name = os.path.join(dirname, "data", file_name) resp_file = open(file_name, "r") if resp_file is None: raise Exception("Could not load %s" % file_name) while True: line = resp_file.readline() if line is None or len(line) == 0: break if len(line.strip()) == 0: continue if line.startswith(COMMENT): continue if line.startswith(INFO): section = INFO elif line.startswith(HEADERS): section = HEADERS elif line.startswith(BODY): section = BODY else: resp_file.close() raise Exception("Unknown line in file %s: %s" % (file_name, line)) buf = _read_section(section, file_name, resp_file) if line.startswith(INFO): info = ast.literal_eval(buf) self.comment = info['comment'] self.path = info['path'] self.reason = info['reason'] self.status = info['status'] elif line.startswith(HEADERS): self.headers = ast.literal_eval(buf) elif line.startswith(BODY): self.body = buf resp_file.close() class PVMResp(PVMFile): """Class to encapsulate the text serialization of a response.""" def __init__(self, file_name=None, pvmfile=None, adapter=None): """Initialize this PVMResp by loading a file or pulling a PVMFile. :param file_name: Name of a file to load. :param pvmfile: An existing PVMFile instance. This PVMResp will use its attributes. If both file_name and pvmfile are specified, the file will be reloaded into the passed-in PVMFile. This is probably not what you intended. :param adapter: A pypowervm.adapter.Adapter, used for traits, etc. """ super(PVMResp, self).__init__() # Legacy no-arg constructor - allow caller to set fields manually if pvmfile is None and file_name is None: return if pvmfile is None: self.load_file(file_name) else: # Use pvmfile if file_name is not None: pvmfile.load_file(file_name) # Copy in attrs from pvmfile self.comment = pvmfile.comment self.path = pvmfile.path self.reason = pvmfile.reason self.status = pvmfile.status self.headers = pvmfile.headers self.body = pvmfile.body self.response = adp.Response(reqmethod=None, reqpath=None, status=self.status, reason=self.reason, headers=self.headers, body=self.body) self.response.adapter = adapter self.response._unmarshal_atom() def get_response(self): return self.response def refresh(self): """Do the query and get the response.""" print("Connecting.") adap = adp.Adapter() print("Reading path: " + self.path) self.response = adap.read(self.path) print("Received " + str(self.response)) def save(self, file_name): everything = { 'comment': self.comment, 'path': self.path, 'reason': self.response.reason, 'status': self.response.status, } with open(file_name, 'wb') as df: df.write("####################################################") df.write(EOL) df.write("# THIS IS AN AUTOMATICALLY GENERATED FILE") df.write(EOL) df.write("# DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE") df.write(EOL) df.write("#") df.write(EOL) df.write("# To update file, run: create_httpresp.py -refresh ") df.write(os.path.basename(file_name)) df.write(EOL) df.write("#") df.write(EOL) df.write("####################################################") df.write(EOL) df.write(INFO + EOL) df.write(str(everything)) df.write(EOL) df.write(END_OF_SECTION) df.write(EOL) df.write(HEADERS + EOL) df.write(str(self.response.headers)) df.write(EOL) df.write(END_OF_SECTION) df.write(EOL) df.write(BODY + EOL) df.write(self.response.body) df.write(EOL) df.write(END_OF_SECTION) df.write(EOL) def load_pvm_resp(file_name, adapter=None): return PVMResp(file_name, adapter=adapter) def _read_section(section, file_name, resp_file): buf = "" while True: line = resp_file.readline() if line is None or len(line) == 0: raise Exception("Could not find end of section %s of file %s" % (section, file_name)) if line.startswith(END_OF_SECTION): return buf buf += EOL + line pypowervm-1.1.24/pypowervm/tests/test_utils/create_httpresp.py0000664000175000017500000000645013571367171024434 0ustar neoneo00000000000000# Copyright 2014, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import pypowervm.adapter as adp from pypowervm.tests.test_utils import pvmhttp def refresh_response(file_to_refresh): """Reload the file and redo the query.""" print("Loading original file: ", file_to_refresh) new_http = pvmhttp.load_pvm_resp(file_to_refresh) if new_http is None or new_http.refresh() is False: print("Unable to refresh ", file_to_refresh) return 1 print("Saving refreshed file: ", file_to_refresh) new_http.save(file_to_refresh) return 0 def usage(): print("create_httpresp -path path -output out_file [-comment comment]") print(" Note: out_file can be a full path or a file in the same " "location as create_httpresp.py") print('Ex: create_httpresp -path ManagedSystem//LogicalPartition ' ' -output fakelpar.txt -comment "Created by jsmith"') print() print('create_httpresp -refresh response_file') print('Update a previously created response file by ' 'redoing the same request') exit(-1) def main(argv): new_response = pvmhttp.PVMResp() output_file = None file_to_refresh = None aindex = 0 while aindex < len(argv): if argv[aindex] == '-path': aindex += 1 new_response.path = argv[aindex] elif argv[aindex] == '-comment': aindex += 1 new_response.comment = argv[aindex] elif argv[aindex] == '-output': aindex += 1 output_file = argv[aindex] elif argv[aindex] == '-refresh': aindex += 1 file_to_refresh = argv[aindex] else: print("Unknown argument ", argv[aindex]) usage() aindex += 1 if file_to_refresh: rc = refresh_response(file_to_refresh) exit(rc) if new_response.path is None or output_file is None: usage() print("Connecting.") adap = adp.Adapter() print("Reading path: ", new_response.path) new_response.response = adap.read(new_response.path) print("Received ", new_response.response) orig_file_name = output_file dirname = os.path.dirname(output_file) if dirname is None or dirname == '': dirname = os.path.dirname(__file__) output_file = os.path.join(dirname, output_file) new_response.save(output_file) print("Response has been saved in ", output_file) print("Use the pvmhttp.load_pvm_resp('%s') method " "to load it in your testcase " % orig_file_name) print("You can have the %s file rebuilt by running: " "create_httpresp -refresh %s" % (orig_file_name, orig_file_name)) if __name__ == '__main__': main(sys.argv[1:]) pypowervm-1.1.24/pypowervm/tests/test_utils/xml_sections.py0000664000175000017500000000472513571367171023752 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os COMMENT = "#" SECTION = "SECTION:" END_OF_SECTION = "END OF SECTION" def load_xml_sections(file_name): """Loads a file that contains xml sections This method takes a file that contains xml sections and returns a dict of them. It's useful for testing the generation of the sections. See ../data/lpar_sections.txt for an example of the file contents. :param file_name: The name of the file to load. """ def _read_section(nm): buf = "" while True: ln = sect_file.readline() if ln is None or len(ln) == 0: raise Exception("Could not find end of section %s of file %s" % (nm, file_name)) if ln.startswith(END_OF_SECTION): return buf buf += ln.strip('\n') return buf sections = {} # First try to load the name as passed in. dirname = os.path.dirname(file_name) if dirname is None or dirname == '': dirname = os.path.dirname(os.path.dirname(__file__)) file_name = os.path.join(dirname, "data", file_name) sect_file = open(file_name, "r") if sect_file is None: raise Exception("Could not load %s" % file_name) while True: line = sect_file.readline() if line is None or len(line) == 0: break if len(line.strip()) == 0: continue if line.startswith(COMMENT): continue if line.startswith(SECTION): # Get the name of the section name = line[len(SECTION):].strip() # Get the data data = _read_section(name) sections[name] = data else: sect_file.close() raise Exception("Unknown line in file %s: %s" % (file_name, line)) sect_file.close() return sections pypowervm-1.1.24/pypowervm/tests/data/0000775000175000017500000000000013571367172017374 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/data/phyp_pcm_data2.txt0000664000175000017500000002344513571367171023036 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/pcm/ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/RawMetrics/LongTermMonitor/LTM_8247-22L*2125D4A_phyp_20150527T074430+0000.json'} END OF SECTION} HEADERS{ {'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ { "systemUtil": { "utilInfo": { "version": "1.3.0", "metricType": "Raw", "monitoringType": "LTM", "mtms": "8247-22L*2125D4A", "name": "dev-4" }, "utilSample": { "timeStamp": "2015-05-27T08:17:45+0000", "status": 0, "errorInfo": [], "timeBasedCycles": 8.0629725893315e+14, "systemFirmware": { "utilizedProcCycles": 58599310268, "assignedMem": 4096 }, "processor": { "totalProcUnits": 20, "configurableProcUnits": 20, "availableProcUnits": 18.9, "procCyclesPerSecond": 512000000 }, "memory": { "totalMem": 65536, "availableMem": 32512, "configurableMem": 65536 }, "sharedProcessorPool": [ { "id": 0, "name": "DefaultPool", "assignedProcCycles": 1.6125945162342e+16, "utilizedPoolCycles": 683011326288, "maxProcUnits": 20, "borrowedPoolProcUnits": 18 } ], "lparsUtil": [ { "id": 7, "uuid": "2545BCC5-BAE8-4414-AD49-EAFC2DEE2546", "type": "aixlinux", "name": "asdfasdfadsf", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 2048, "backedPhysicalMem": 2048 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 2, "maxProcUnits": 0.2, "weight": 64, "entitledProcCycles": 23502, "utilizedCappedProcCycles": 100, "utilizedUnCappedProcCycles": 100, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 6, "uuid": "2545BCC5-BAE8-4414-AD49-EAFC2DEE2546", "type": "aixlinux", "name": "diff-lpar-6", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 2048, "backedPhysicalMem": 2048 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 2, "maxProcUnits": 0.2, "weight": 64, "entitledProcCycles": 1000000, "utilizedCappedProcCycles": 10000, "utilizedUnCappedProcCycles": 5000, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 5, "uuid": "3B0237F9-26F1-41C7-BE57-A08C9452AD9D", "type": "aixlinux", "name": "fake_npiv", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 256, "backedPhysicalMem": 256 }, "processor": { "poolId": 0, "mode": "cap", "maxVirtualProcessors": 1, "maxProcUnits": 0.1, "weight": 0, "entitledProcCycles": 0, "utilizedCappedProcCycles": 0, "utilizedUnCappedProcCycles": 0, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 4, "uuid": "66A2E886-D05D-42F4-87E0-C3BA02CF7C7E", "type": "aixlinux", "name": "kh4-9fdaa1ba-kyleh", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 2048, "backedPhysicalMem": 2048 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 2, "maxProcUnits": 0.2, "weight": 64, "entitledProcCycles": 2000000, "utilizedCappedProcCycles": 50000, "utilizedUnCappedProcCycles": 10000, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 3, "uuid": "3B7A3E07-E0B0-4F35-8997-6019D0D1CFC8", "type": "aixlinux", "name": "placeholder", "state": "Not Activated", "affinityScore": 0, "memory": { "logicalMem": 0, "backedPhysicalMem": 0 }, "processor": { "mode": "share_idle_procs", "maxVirtualProcessors": 0, "maxProcUnits": 0, "entitledProcCycles": 0, "entitledProcCycles": 2000000, "utilizedCappedProcCycles": 50000, "utilizedUnCappedProcCycles": 10000, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 2, "uuid": "42AD4FD4-DC64-4935-9E29-9B7C6F35AFCC", "type": "aixlinux", "name": "Ubuntu1410", "state": "Open Firmware", "affinityScore": 100, "memory": { "logicalMem": 20480, "backedPhysicalMem": 20480 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 4, "maxProcUnits": 0.4, "weight": 128, "entitledProcCycles": 1765629232513, "utilizedCappedProcCycles": 264619289721, "utilizedUnCappedProcCycles": 641419282, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 160866895489, "totalInstructionsExecutionTime": 193139925064 }, "network": { "virtualEthernetAdapters": [ { "vlanId": 2227, "vswitchId": 0, "physicalLocation": "U8247.22L.2125D4A-V2-C2", "isPortVLANID": true, "receivedPackets": 10, "sentPackets": 100, "droppedPackets": 5, "sentBytes": 100, "receivedBytes": 10000, "receivedPhysicalPackets": 0, "sentPhysicalPackets": 0, "droppedPhysicalPackets": 0, "sentPhysicalBytes": 0, "receivedPhysicalBytes": 0 } ] }, "storage": { "genericVirtualAdapters": [ { "physicalLocation": "U8247.22L.2125D4A-V2-C3", "viosId": 1, "viosAdapterSlotId": 1000 } ] } } ], "viosUtil": [ { "id": 1, "uuid": "3443DB77-AED1-47ED-9AA5-3DB9C6CF7089", "name": "IOServer - SN2125D4A", "state": "Running", "affinityScore": 100, "memory": { "assignedMem": 4096 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 4, "maxProcUnits": 0.4, "weight": 255, "entitledProcCycles": 3603069246332, "utilizedCappedProcCycles": 334805782979, "utilizedUnCappedProcCycles": 219847016046, "idleProcCycles": 260430293020, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 1289474458948, "totalInstructionsExecutionTime": 520104519750 }, "network": { "virtualEthernetAdapters": [ { "vlanId": 2227, "vswitchId": 0, "physicalLocation": "U8247.22L.2125D4A-V1-C2", "isPortVLANID": true, "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "sentBytes": 0, "receivedBytes": 0, "receivedPhysicalPackets": 0, "sentPhysicalPackets": 0, "droppedPhysicalPackets": 0, "sentPhysicalBytes": 0, "receivedPhysicalBytes": 0 }, { "vlanId": 123, "vswitchId": 0, "physicalLocation": "U8247.22L.2125D4A-V1-C12", "isPortVLANID": true, "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "sentBytes": 0, "receivedBytes": 0, "receivedPhysicalPackets": 0, "sentPhysicalPackets": 0, "droppedPhysicalPackets": 0, "sentPhysicalBytes": 0, "receivedPhysicalBytes": 0 } ] } } ] } } } END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vios_feed_no_vg.txt0000664000175000017500000020671113571367171024264 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_vios_feed_no_vg.txt # #################################################### INFO{ {'comment': 'Use for vios feed testing. No volume groups.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/VirtualIOServer'} END OF SECTION} HEADERS{ {'x-powered-by': 'Servlet/3.0', 'transfer-encoding': 'chunked', 'set-cookie': 'JSESSIONID=0000N4dQTtVs6iVW1jRpJ54Q6F7:87025216-ad22-4a32-8e2c-3194816a5355; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 08 Jan 2014 17:05:32 GMT', 'etag': '1775366259', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 08 Jan 2014 17:05:31 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ c7da8bab-8703-317d-91c8-cfd57cac2edb 2015-02-27T05:33:32.295Z IBM Power Systems Management Console 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 VirtualIOServer 2015-02-27T05:33:33.784Z IBM Power Systems Management Console 881130005 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 1425015213565 false 191 POWER7 Disabled false false false false false false normal 2125D4A1 VIOS 2.2.4.0 true true true true true 1 2000 false RAID Controller U78CB.001.WZS007Y 842 260 842 1023 4116 2 4116 4116 false false false false false false false false false false 553844757 RAID Controller U78CB.001.WZS007Y-P1-C14 U78CB.001.WZS007Y-P1-C14 C14 842 true 553844757 U78CB.001.WZS007Y-P1-C14 C14 false Universal Serial Bus UHC Spec U78CB.001.WZS007Y 33345 3075 33345 1202 4172 2 4172 4116 false false false false false false false false false false 553713691 Universal Serial Bus UHC Spec U78CB.001.WZS007Y-P1-T2 U78CB.001.WZS007Y-P1-T2 T2 33345 true 553713691 U78CB.001.WZS007Y-P1-T2 T2 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78CB.001.WZS007Y 5719 512 5719 1056 5348 1 5348 4116 false false false false false false false false false false 553910302 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78CB.001.WZS007Y-P1-C10 U78CB.001.WZS007Y-P1-C10 C10 5719 true 553910302 U78CB.001.WZS007Y-P1-C10 C10 false Quad 8 Gigabit Fibre Channel LP Adapter U78CB.001.WZS007Y 9522 4 9522 1054 4215 2 4215 4116 false false false false false false false false false false 553713705 Quad 8 Gigabit Fibre Channel LP Adapter U78CB.001.WZS007Y-P1-C3 U78CB.001.WZS007Y-P1-C3 C3 U78CB.001.WZS007Y-P1-C3-T1 fcs0 1aU78CB.001.WZS007Y-P1-C3-T1 21000024FF649104 64 64 U78CB.001.WZS007Y-P1-C3-T4 fcs3 1aU78CB.001.WZS007Y-P1-C3-T4 21000024FF649107 U78CB.001.WZS007Y-P1-C3-T3 fcs2 1aU78CB.001.WZS007Y-P1-C3-T3 21000024FF649106 U78CB.001.WZS007Y-P1-C3-T2 fcs1 1aU78CB.001.WZS007Y-P1-C3-T2 21000024FF649105 553713705 U78CB.001.WZS007Y-P1-C3 C3 2000 false false 4096 0.0 7 4096 4096 0.0 7 0 0 4096 4096 0 4096 false true false false 0 4096 4096 false IOServer - SN2125D4A false 0.4 4 0.4 4 0.4 4 0 255 uncapped false uncapped false 4 0.4 0.4 0.4 0 255 4 4 0.4 255 running Virtual IO Server 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 default 0 0 active 9.1.2.4 25006020324608 false false true true c_8b2bda0c_userID_config.iso 0ec_8b2bda0c_userID_config.iso rw 0.000000 d_fd4626ae_userID_config.iso 0ed_fd4626ae_userID_config.iso rw 0.000000 inst1_140e4f56_kyleh_config.iso 0einst1_140e4f56_kyleh_config.iso rw 0.000000 ubuntu1410 0eubuntu1410 rw 0.5449 VMLibrary 1 true SAS RAID 0 Disk Array U78CB.001.WZS007Y-P1-C14-T1-L405DB60300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1REI2MDMwMDAwMDAwMDQw false 270648 hdisk1 active 391BIBMIPR-0 5DB603000000004010IPR-0 5DB6030003IBMsas false SAS RAID 0 Disk Array U78CB.001.WZS007Y-P1-C14-T1-L205DB60300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1REI2MDMwMDAwMDAwMDIw false 270648 hdisk0 active 391BIBMIPR-0 5DB603000000002010IPR-0 5DB6030003IBMsas false 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T1 U78CB.001.WZS007Y-P1-C10-T1 13U78CB.001.WZS007Y-P1-C10-T1 en0 9.1.2.4 255.255.248.0 Disconnected disabled ent5 false 2227 disabled 8192 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 en5 9.1.2.4 255.255.255.0 Active 105a9dd36a17958199 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 true Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 Ubuntu1410 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 60 None Ubuntu1410 0300025d4a00007a000000014b36d9deaf.1 0x8200000000000000 vtscsi0 09bddd1603b373bbf Server U8247.22L.2125D4A-V1-C4 U8247.22L.2125D4A-V1-C4 1 false true 4 3 2 Server U8247.22L.2125D4A-V1-C5 U8247.22L.2125D4A-V1-C5 1 false true 5 3 2 Server U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 1 false true 3 3 3 Server U8247.22L.2125D4A-V1-C7 U8247.22L.2125D4A-V1-C7 1 false true 7 3 2 Server U8247.22L.2125D4A-V1-C6 U8247.22L.2125D4A-V1-C6 1 false true 6 3 3 Client U8247.22L.2125D4A-V4-C2 U8247.22L.2125D4A-V4-C2 4 false true 2 1 9 U8247.22L.2125D4A-V1-C9 Server U8247.22L.2125D4A-V1-C9 U8247.22L.2125D4A-V1-C9 1 false true 9 vhost1 boot_140e4f56 4 2 U8247.22L.2125D4A-V4-C2 1eU8247.22L.2125D4A-V1-C9 1 None boot_140e4f56 0300025d4a00007a000000014b36d9deaf.2 0x8100000000000000 vtscsi1 098f611b2840d229cf Client U8247.22L.2125D4A-V4-C3 U8247.22L.2125D4A-V4-C3 4 false true 3 1 10 U8247.22L.2125D4A-V1-C10 Server U8247.22L.2125D4A-V1-C10 U8247.22L.2125D4A-V1-C10 1 false true 10 vhost2 inst1_140e4f56_kyleh_config.iso 4 3 U8247.22L.2125D4A-V4-C3 1eU8247.22L.2125D4A-V1-C10 inst1_140e4f56_kyleh_config.iso 0einst1_140e4f56_kyleh_config.iso rw 0.000000 0x8100000000000000 vtopt1 195e4efef939d84e90 Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 ubuntu1410 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 ubuntu1410 0eubuntu1410 rw 0.5449 0x8100000000000000 vtopt0 197993aace0a82198c 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T4 U78CB.001.WZS007Y-P1-C10-T4 13U78CB.001.WZS007Y-P1-C10-T4 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T3 U78CB.001.WZS007Y-P1-C10-T3 13U78CB.001.WZS007Y-P1-C10-T3 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T2 U78CB.001.WZS007Y-P1-C10-T2 13U78CB.001.WZS007Y-P1-C10-T2 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 U78CB.001.WZS007Y-P1-C10-T4 en3 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 U78CB.001.WZS007Y-P1-C10-T3 en2 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 U78CB.001.WZS007Y-P1-C10-T2 en1 Inactive END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/tier.txt0000664000175000017500000002161413571367171021103 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh tier.txt # #################################################### INFO{ {'comment': None, 'path': 'Tier', 'reason': 'OK', 'status': 200} END OF SECTION} HEADERS{ {'Content-Length': '8090', 'X-Powered-By': 'Servlet/3.1', 'Set-Cookie': 'JSESSIONID=0000l3vwKPVRqZanwEG7adGoK1E:c83b267d-dc99-4d51-b4fd-a9b2013da3f6; Path=/; Secure; HttpOnly', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Fri, 01 Apr 2016 17:52:51 GMT', 'X-Transaction-ID': 'XT10054584', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Date': 'Fri, 01 Apr 2016 17:52:53 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'X-TransactionRecord-Uuid': '9612a8fd-7733-4da3-89ea-461ab72288fe', 'ETag': '2124537305'} END OF SECTION} BODY{ 11ab724b-f199-35fd-8f0f-035794b4c912 2016-04-01T13:52:51.986-04:00 IBM Power Systems Management Console c390432c-f139-3e7f-a168-e76f8c1cbc98 Tier 2016-04-01T13:52:53.218-04:00 IBM Power Systems Management Console 2124537274 c390432c-f139-3e7f-a168-e76f8c1cbc98 0 SYSTEM 256c097502d44311e58004000040f2e95d7d95846d854f9f38 UnrestrictedSystemTier true 3071.25 NotMirrored Default 266c097502d44311e58004000040f2e95db5acdeee517e4afe 3071.63 Online MPIO IBM 2076 FC Disk 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyN0YwNjFEODgwMDAwMDAwMDAwMTMwQg== 524288 hdisk3 active true Tm92YUxpbmtfT1NfQ0lfU1NQX3Yy MPIO IBM 2076 FC Disk 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyN0YwNjFEODgwMDAwMDAwMDAwMTMwQQ== 524288 hdisk2 active true Tm92YUxpbmtfT1NfQ0lfU1NQX3Yx MPIO IBM 2076 FC Disk 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyN0YwNjFEODgwMDAwMDAwMDAwMTMwQw== 524288 hdisk6 active true Tm92YUxpbmtfT1NfQ0lfU1NQX3Yz MPIO IBM 2076 FC Disk 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyN0YwNjFEODgwMDAwMDAwMDAwMTMwRA== 524288 hdisk7 active true Tm92YUxpbmtfT1NfQ0lfU1NQX3Y0 MPIO IBM 2076 FC Disk 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyN0YwNjFEODgwMDAwMDAwMDAwMTMwRg== 524288 hdisk9 active true Tm92YUxpbmtfT1NfQ0lfU1NQX3Y2 MPIO IBM 2076 FC Disk 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyN0YwNjFEODgwMDAwMDAwMDAwMTMwRQ== 524288 hdisk8 active true Tm92YUxpbmtfT1NfQ0lfU1NQX3Y1 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/lpar_builder.txt0000664000175000017500000004306713571367171022612 0ustar neoneo00000000000000 # Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # SECTION:dedicated_lpar false 127 64 102410241024 TheName 111 truesre idle proces AIX/Linux default false END OF SECTION # SECTION:shared_lpar false 127 2000 102410241024 TheName false 0.5010.5010.501064 uncapped AIX/Linux default false END OF SECTION # SECTION:capped_lpar true 127 64 102410241024 TheName false0.5010.5010.5010 capped AIX/Linux default true END OF SECTION # SECTION:uncapped_lpar false 127 64 102410241024 TheName false0.5010.5010.5010100 uncapped AIX/Linux POWER6 END OF SECTION # SECTION:ded_lpar_sre_idle_procs_always false 127 64 102410241024 TheName 111 truesre idle procs always AIX/Linux POWER7 false END OF SECTION # SECTION:vios false 127 64true12345falsefalse54321false 102410241024 TheName 111 truesre idle proces Virtual IO Server default END OF SECTION # SECTION:ppt_lpar false 127 64 1024310241024 TheName false 0.5010.5010.501064 uncapped AIX/Linux default false END OF SECTION SECTION:secure_boot_lpar false 127 64 102410241024 SecureBoot false 0.5010.5010.501064 uncapped AIX/Linux default 2 false END OF SECTION SECTION:secure_boot_ibmi_lpar false 127 64NONEHMC0 102410241024 SecureBoot false 0.5010.5010.501064 uncapped OS400 default false true END OF SECTION # pypowervm-1.1.24/pypowervm/tests/data/vnic_feed.txt0000664000175000017500000002623213571367171022063 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh vios_with_sriov_lp.txt # #################################################### INFO{ {'comment': None, 'path': 'LogicalPartition/020C9406-2B1A-461D-A8D3-0C9A7EB6746A/VirtualNICDedicated', 'reason': 'OK', 'status': 200} END OF SECTION} HEADERS{ {'Content-Length': '7644', 'X-Powered-By': 'Servlet/3.1', 'Set-Cookie': 'JSESSIONID=0000ZnxUV-fSBOQFueaGXWRFjja:54518243-2afe-3834-a1f2-f7ada1d16030; Path=/; Secure; HttpOnly', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Fri, 03 Jun 2016 20:30:39 GMT', 'X-Transaction-ID': 'XT10129661', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Date': 'Fri, 03 Jun 2016 20:30:39 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'X-TransactionRecord-Uuid': '54518243-2afe-3834-a1f2-f7ada1d16030', 'ETag': '7743738979'} END OF SECTION} BODY{ 54518243-2afe-3834-a1f2-f7ada1d16030 2016-06-17T03:48:04.197-05:00 IBM Power Systems Management Console c4cd2502-caf5-399d-97f8-b9644ac8c9f9 VirtualNICDedicated 2016-06-17T03:48:06.283-05:00 IBM Power Systems Management Console -1370264134 c4cd2502-caf5-399d-97f8-b9644ac8c9f9 1466075583618 U8286.42A.21C1B6V-V10-C3 10 7
0 0 ALL AE7A25E59A07 192.168.2.7 255.255.255.0 192.168.2.0 ALL unavailable DEDICATED 2.0%
SRIOV false LINK_DOWN 1 2.0% 100.0% 0 2.0% 100.0% SRIOV true OPERATIONAL 2 42.42% 17.0% 0 2.0% 100.0%
9655fc81-3df4-3f58-883c-6b732b377bdd VirtualNICDedicated 2016-06-17T03:48:06.285-05:00 IBM Power Systems Management Console 545222413 9655fc81-3df4-3f58-883c-6b732b377bdd 1466075584690 U8286.42A.21C1B6V-V10-C4 10 8
0 0 ALL AE7A25E59A08 ALL unavailable DEDICATED 2.0%
SRIOV true OPERATIONAL 2 2.0% 70.0% 0 2.0% 100.0%
END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/sys_with_sriov.txt0000664000175000017500000037603413571367171023244 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh sys_with_sriov.txt # #################################################### INFO{ {'comment': None, 'path': 'ManagedSystem', 'reason': 'OK', 'status': 200} END OF SECTION} HEADERS{ {'X-Powered-By': 'Servlet/3.1', 'Transfer-Encoding': 'chunked', 'X-TransactionRecord-Uuid': '3e5cc147-d7f5-46eb-8c0a-578029ad9065', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Fri, 03 Jun 2016 15:25:07 GMT', 'X-Transaction-ID': 'XT10000043', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Set-Cookie': 'JSESSIONID=0000JuyF9JDEx0jb5uGVjKVD7Sk:33812f47-0916-4faf-9225-40f81341e124; Path=/; Secure; HttpOnly', 'Date': 'Fri, 03 Jun 2016 15:25:08 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'ETag': '2102051559'} END OF SECTION} BODY{ 70459846-6c0f-32d2-ac61-fb26a8ed810b 2016-06-03T10:25:07.960-05:00 IBM Power Systems Management Console 95329c84-9a01-330f-809e-2e5577110d1e ManagedSystem 2016-06-03T10:25:08.090-05:00 IBM Power Systems Management Console 2102051528 95329c84-9a01-330f-809e-2e5577110d1e 0 false true true false true true false true true true true true true true true true true true true 0 false SAS RAID Controller, PCIe2, Dual-port 6Gb 260 4660 1224 2 842 4116 553713697 U78C7.001.RCH0004-P3-R1 false PCIe2 4-port (10Gb FCoE & 1GbE) SR&RJ45 Adapter "EN0H,EN0J" 512 4660 1039 16 57888 4116 553713696 U78C7.001.RCH0004-P1-C8 false PCIe2 4-port (10Gb FCoE & 1GbE) SR&RJ45 Adapter "EN0H,EN0J" 512 4660 1039 16 57888 4116 553713699 U78C7.001.RCH0004-P1-C9 false Empty slot 0 65535 4660 65535 255 65535 65535 553713728 U78C7.001.RCH0004-P1-C1 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 512 4660 1056 1 5719 4116 553713704 U78C7.001.RCH0004-P1-C9 false Empty slot 0 65535 4660 65535 255 65535 65535 553713736 U78C7.001.RCH0004-P1-C2 false Universal Serial Bus UHC Spec 3075 4660 1202 2 33345 4116 553713707 U78C7.001.RCH0004-P1-T1 false Empty slot 0 65535 4661 65535 255 65535 65535 553779244 U78C7.001.RCH0004-P1-C6 false 2-Port 10GbE RoCE SFP+ Adapter 512 4661 1046 176 26448 4116 553844781 U78C7.001.RCH0004-P1-C7 false SAS RAID Controller, PCIe2, Dual-port 6Gb 260 4661 1224 2 842 4116 553713681 U78C7.001.RCH0004-P3-R2 false Empty slot 0 65535 4661 65535 255 65535 65535 553713680 U78C7.001.RCH0004-P1-C10 false 2-Port 10GbE RoCE SFP+ Adapter 512 4661 1046 176 26448 4116 553713712 U78C7.001.RCH0004-P1-C3 false Empty slot 0 65535 4661 65535 255 65535 65535 553713689 U78C7.001.RCH0004-P1-C12 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 512 4661 1056 1 5719 4116 553713688 U78C7.001.RCH0004-P1-C11 false PCIe2 2-Port 10GbE Base-T Adapter 512 4662 1170 16 5774 4116 553713720 U78C7.001.RCH0004-P1-C4 553713696 PCIe2 4-port (10Gb FCoE & 1GbE) SR&RJ45 Adapter U78C7.001.RCH0004-P1-C8 1 Running Sriov MIN_CAPACITY Auto false E_1500 autoDuplex Veb Veb E10Mbps fullDuplex linkUp Veb Auto false E_1500 autoDuplex Veb Veb Unknown Veb Auto false E_1500 autoDuplex Veb Veb E1Gbps fullDuplex linkUp Veb Auto false E_1500 autoDuplex Veb Veb Unknown Veb 0 48 654327812 PHB 4100 U78C7.001.RCH0004-P1-C8-T1-S4 654327813 PHB 4101 U78C7.001.RCH0004-P1-C8-T1-S5 654327814 PHB 4102 U78C7.001.RCH0004-P1-C8-T1-S6 654327815 PHB 4103 U78C7.001.RCH0004-P1-C8-T1-S7 654327816 PHB 4104 U78C7.001.RCH0004-P1-C8-T1-S8 654327817 PHB 4105 U78C7.001.RCH0004-P1-C8-T1-S9 654327818 PHB 4106 U78C7.001.RCH0004-P1-C8-T1-S10 654327819 PHB 4107 U78C7.001.RCH0004-P1-C8-T1-S11 654327820 PHB 4108 U78C7.001.RCH0004-P1-C8-T1-S12 654327821 PHB 4109 U78C7.001.RCH0004-P1-C8-T1-S13 654327822 PHB 4110 U78C7.001.RCH0004-P1-C8-T1-S14 654327823 PHB 4111 U78C7.001.RCH0004-P1-C8-T1-S15 654327824 PHB 4112 U78C7.001.RCH0004-P1-C8-T1-S16 654327825 PHB 4113 U78C7.001.RCH0004-P1-C8-T1-S17 654327826 PHB 4114 U78C7.001.RCH0004-P1-C8-T1-S18 654327827 PHB 4115 U78C7.001.RCH0004-P1-C8-T1-S19 654327828 PHB 4116 U78C7.001.RCH0004-P1-C8-T1-S20 654327829 PHB 4117 U78C7.001.RCH0004-P1-C8-T1-S21 654327830 PHB 4118 U78C7.001.RCH0004-P1-C8-T1-S22 654327831 PHB 4119 U78C7.001.RCH0004-P1-C8-T1-S23 654327832 PHB 4120 U78C7.001.RCH0004-P1-C8-T1-S24 654327833 PHB 4121 U78C7.001.RCH0004-P1-C8-T1-S25 654327834 PHB 4122 U78C7.001.RCH0004-P1-C8-T1-S26 654327835 PHB 4123 U78C7.001.RCH0004-P1-C8-T1-S27 654327836 PHB 4124 U78C7.001.RCH0004-P1-C8-T1-S28 654327837 PHB 4125 U78C7.001.RCH0004-P1-C8-T1-S29 654327838 PHB 4126 U78C7.001.RCH0004-P1-C8-T1-S30 654327839 PHB 4127 U78C7.001.RCH0004-P1-C8-T1-S31 654327840 PHB 4128 U78C7.001.RCH0004-P1-C8-T1-S32 654327841 PHB 4129 U78C7.001.RCH0004-P1-C8-T1-S33 654327842 PHB 4130 U78C7.001.RCH0004-P1-C8-T1-S34 654327843 PHB 4131 U78C7.001.RCH0004-P1-C8-T1-S35 654327844 PHB 4132 U78C7.001.RCH0004-P1-C8-T1-S36 654327845 PHB 4133 U78C7.001.RCH0004-P1-C8-T1-S37 654327846 PHB 4134 U78C7.001.RCH0004-P1-C8-T1-S38 654327847 PHB 4135 U78C7.001.RCH0004-P1-C8-T1-S39 654327848 PHB 4136 U78C7.001.RCH0004-P1-C8-T1-S40 654327849 PHB 4137 U78C7.001.RCH0004-P1-C8-T1-S41 654327850 PHB 4138 U78C7.001.RCH0004-P1-C8-T1-S42 654327851 PHB 4139 U78C7.001.RCH0004-P1-C8-T1-S43 654327852 PHB 4140 U78C7.001.RCH0004-P1-C8-T1-S44 654327853 PHB 4141 U78C7.001.RCH0004-P1-C8-T1-S45 654327854 PHB 4142 U78C7.001.RCH0004-P1-C8-T1-S46 654327855 PHB 4143 U78C7.001.RCH0004-P1-C8-T1-S47 654327856 PHB 4144 U78C7.001.RCH0004-P1-C8-T1-S48 553713699 PCIe2 4-port (10Gb FCoE & 1GbE) SR&RJ45 Adapter U78C7.001.RCH0004-P1-C9 2 Running Sriov MAX_MIN_CAPACITY Auto false E_1500 autoDuplex Veb Veb E10Mbps fullDuplex linkUp Veb Auto false E_1500 autoDuplex Veb Veb Unknown Veb Auto false E_1500 autoDuplex Veb Veb E1Gbps fullDuplex linkUp Veb Auto false E_1500 autoDuplex Veb Veb Unknown Veb 0 48 654327812 PHB 4100 U78C7.001.RCH0004-P1-C9-T1-S4 654327813 PHB 4101 U78C7.001.RCH0004-P1-C9-T1-S5 654327814 PHB 4102 U78C7.001.RCH0004-P1-C9-T1-S6 654327815 PHB 4103 U78C7.001.RCH0004-P1-C9-T1-S7 654327816 PHB 4104 U78C7.001.RCH0004-P1-C9-T1-S8 654327817 PHB 4105 U78C7.001.RCH0004-P1-C9-T1-S9 654327818 PHB 4106 U78C7.001.RCH0004-P1-C9-T1-S10 654327819 PHB 4107 U78C7.001.RCH0004-P1-C9-T1-S11 654327820 PHB 4108 U78C7.001.RCH0004-P1-C9-T1-S12 654327821 PHB 4109 U78C7.001.RCH0004-P1-C9-T1-S13 654327822 PHB 4110 U78C7.001.RCH0004-P1-C9-T1-S14 654327823 PHB 4111 U78C7.001.RCH0004-P1-C9-T1-S15 654327824 PHB 4112 U78C7.001.RCH0004-P1-C9-T1-S16 654327825 PHB 4113 U78C7.001.RCH0004-P1-C9-T1-S17 654327826 PHB 4114 U78C7.001.RCH0004-P1-C9-T1-S18 654327827 PHB 4115 U78C7.001.RCH0004-P1-C9-T1-S19 654327828 PHB 4116 U78C7.001.RCH0004-P1-C9-T1-S20 654327829 PHB 4117 U78C7.001.RCH0004-P1-C9-T1-S21 654327830 PHB 4118 U78C7.001.RCH0004-P1-C9-T1-S22 654327831 PHB 4119 U78C7.001.RCH0004-P1-C9-T1-S23 654327832 PHB 4120 U78C7.001.RCH0004-P1-C9-T1-S24 654327833 PHB 4121 U78C7.001.RCH0004-P1-C9-T1-S25 654327834 PHB 4122 U78C7.001.RCH0004-P1-C9-T1-S26 654327835 PHB 4123 U78C7.001.RCH0004-P1-C9-T1-S27 654327836 PHB 4124 U78C7.001.RCH0004-P1-C9-T1-S28 654327837 PHB 4125 U78C7.001.RCH0004-P1-C9-T1-S29 654327838 PHB 4126 U78C7.001.RCH0004-P1-C9-T1-S30 654327839 PHB 4127 U78C7.001.RCH0004-P1-C9-T1-S31 654327840 PHB 4128 U78C7.001.RCH0004-P1-C9-T1-S32 654327841 PHB 4129 U78C7.001.RCH0004-P1-C9-T1-S33 654327842 PHB 4130 U78C7.001.RCH0004-P1-C9-T1-S34 654327843 PHB 4131 U78C7.001.RCH0004-P1-C9-T1-S35 654327844 PHB 4132 U78C7.001.RCH0004-P1-C9-T1-S36 654327845 PHB 4133 U78C7.001.RCH0004-P1-C9-T1-S37 654327846 PHB 4134 U78C7.001.RCH0004-P1-C9-T1-S38 654327847 PHB 4135 U78C7.001.RCH0004-P1-C9-T1-S39 654327848 PHB 4136 U78C7.001.RCH0004-P1-C9-T1-S40 654327849 PHB 4137 U78C7.001.RCH0004-P1-C9-T1-S41 654327850 PHB 4138 U78C7.001.RCH0004-P1-C9-T1-S42 654327851 PHB 4139 U78C7.001.RCH0004-P1-C9-T1-S43 654327852 PHB 4140 U78C7.001.RCH0004-P1-C9-T1-S44 654327853 PHB 4141 U78C7.001.RCH0004-P1-C9-T1-S45 654327854 PHB 4142 U78C7.001.RCH0004-P1-C9-T1-S46 654327855 PHB 4143 U78C7.001.RCH0004-P1-C9-T1-S47 654327856 PHB 4144 U78C7.001.RCH0004-P1-C9-T1-S48 553713680 PCIe2 4-port (10Gb FCoE & 1GbE) SR&RJ45 Adapter U78CB.001.WZS06RG-P1-C7 NotConfigured Dedicated 553713697 PCIe2 4-port (10Gb FCoE & 1GbE) SR&RJ45 Adapter U78CB.001.WZS06RG-P1-C5 NotConfigured Dedicated 13857705831041335296 524288 491008 256 6 524288 1 256 8192 491008 256 256 40 36 64 64 256 64 64 64 64 40 256 0.05 36 default POWER6 POWER6_Plus POWER7 POWER8 8408 E8E 10678CV false 800 9.5.249.35 169.254.3.147 operating z1403ce-8408-E8E-SN10678CV 16 16 0 0 00000000 true true false END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/lufeed.txt0000664000175000017500000011533213571367171021405 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh lufeed.txt # #################################################### INFO{ {'comment': None, 'path': 'Tier/c390432c-f139-3e7f-a168-e76f8c1cbc98/LogicalUnit', 'reason': 'OK', 'status': 200} END OF SECTION} HEADERS{ {'X-Powered-By': 'Servlet/3.1', 'Transfer-Encoding': 'chunked', 'Set-Cookie': 'JSESSIONID=0000f0UTE-8egUwjXJBFXhtlLqp:c83b267d-dc99-4d51-b4fd-a9b2013da3f6; Path=/; Secure; HttpOnly', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Fri, 01 Apr 2016 20:27:06 GMT', 'X-Transaction-ID': 'XT10081487', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Date': 'Fri, 01 Apr 2016 20:27:08 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'X-TransactionRecord-Uuid': '410d8190-8602-44c2-862b-fa98e9862170', 'ETag': '-168932186'} END OF SECTION} BODY{ 5856c96c-ea00-3e9e-b93c-438d157d7e90 2016-04-01T16:27:06.554-04:00 IBM Power Systems Management Console a2b11d20-322d-3dcc-84ee-74fce7e90310 LogicalUnit 2016-04-01T16:27:08.245-04:00 IBM Power Systems Management Console 1056750293 a2b11d20-322d-3dcc-84ee-74fce7e90310 0 true 276c097502d44311e58004000040f2e95dc4a789dfd63464654319f89c04aa5382 1 VirtualIO_Disk 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab false boot_pvm3_tempest_ser_a73d7083 1bf136f4-5e8e-30e4-8f4c-3926ddee169e LogicalUnit 2016-04-01T16:27:08.245-04:00 IBM Power Systems Management Console 1878760355 1bf136f4-5e8e-30e4-8f4c-3926ddee169e 0 true 276c097502d44311e58004000040f2e95df9ca942cb7818d357aa8fcf3df691919 1 VirtualIO_Disk 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab false boot_pvm3_tempest.com_2d9a59b7 116c2146-db2c-324a-9702-b422c4c90153 LogicalUnit 2016-04-01T16:27:08.246-04:00 IBM Power Systems Management Console 2113241389 116c2146-db2c-324a-9702-b422c4c90153 0 true 276c097502d44311e58004000040f2e95d2d9e58cbe630d0b1128bc20c9f03ac91 1 VirtualIO_Disk 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab false rescue_pvm3_tempest.com_e8220bdc 1af3de01-1a54-32a7-aee1-e6c879c01e5c LogicalUnit 2016-04-01T16:27:08.246-04:00 IBM Power Systems Management Console -1197303150 1af3de01-1a54-32a7-aee1-e6c879c01e5c 0 true 296c097502d44311e58004000040f2e95d0ec5f748940367e2947b871234b786ba 1 VirtualIO_Image false image_tempest.common.compute_insta_157330555537896c89120ac206347be2 5196af4a-24c8-3bcc-aa77-270f0a294564 LogicalUnit 2016-04-01T16:27:08.247-04:00 IBM Power Systems Management Console 1750783433 5196af4a-24c8-3bcc-aa77-270f0a294564 0 true 296c097502d44311e58004000040f2e95d38242afc010e4ed813c655a4f64baf6c 20 VirtualIO_Image false image_template_PowerVM_Ubuntu_Base_15d1cc7daa4a07853b511bf5c2a15773 8df99b8d-676c-32a4-92c9-496ac4665097 LogicalUnit 2016-04-01T16:27:08.248-04:00 IBM Power Systems Management Console -1855452777 8df99b8d-676c-32a4-92c9-496ac4665097 0 true 276c097502d44311e58004000040f2e95da60dffe1d10cdb5e2dc1ea85b377bd63 20 VirtualIO_Disk 296c097502d44311e58004000040f2e95d9437fb054f7c56b6821fd4857f1bffa8 true boot_PowerVM_CI_Po_7acf8fd5_pvm 11a5cf67-2175-3f64-831c-973418ca6f4e LogicalUnit 2016-04-01T16:27:08.248-04:00 IBM Power Systems Management Console 1442734507 11a5cf67-2175-3f64-831c-973418ca6f4e 0 true 276c097502d44311e58004000040f2e95d96fcd91c17448f7337d76cdf4a55df54 20 VirtualIO_Disk 296c097502d44311e58004000040f2e95d9437fb054f7c56b6821fd4857f1bffa8 true boot_PowerVM_CI_Po_1faabc71_pvm 7f309915-e19a-3d2e-884c-16a405b9a211 LogicalUnit 2016-04-01T16:27:08.249-04:00 IBM Power Systems Management Console -942402102 7f309915-e19a-3d2e-884c-16a405b9a211 0 true 276c097502d44311e58004000040f2e95d7e751b2f0bb4bbfeb5122aa991aebaef 1 VirtualIO_Disk 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab true boot_pvm3_ssp_primer_8786c8bd a94ae849-d55c-3cb0-b850-cb23060de650 LogicalUnit 2016-04-01T16:27:08.249-04:00 IBM Power Systems Management Console -979315907 a94ae849-d55c-3cb0-b850-cb23060de650 0 true 276c097502d44311e58004000040f2e95dc8abd602a5cb3f32cff25dd6628f2cf4 20 VirtualIO_Disk 296c097502d44311e58004000040f2e95d9437fb054f7c56b6821fd4857f1bffa8 true boot_PowerVM_CI_Po_9b2a2cef_pvm 15e16d9b-bd08-31e8-afc2-6aae91a995ca LogicalUnit 2016-04-01T16:27:08.250-04:00 IBM Power Systems Management Console -879618324 15e16d9b-bd08-31e8-afc2-6aae91a995ca 0 true 276c097502d44311e58004000040f2e95d50df766402f40d2942104b846d9dd947 20 VirtualIO_Disk 296c097502d44311e58004000040f2e95d38242afc010e4ed813c655a4f64baf6c true boot_PowerVM_CI_Po_ed7d0f14_pvm 7c438dd8-6d97-34cb-af74-2cf4bea97794 LogicalUnit 2016-04-01T16:27:08.251-04:00 IBM Power Systems Management Console -1455910120 7c438dd8-6d97-34cb-af74-2cf4bea97794 0 true 276c097502d44311e58004000040f2e95df687a18124cfb5d0a23628cd700c4a53 1 VirtualIO_Disk 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab false boot_pvm3_tempest.com_c0a14a57 5391ee78-60a9-3a14-82ea-9063360c7a37 LogicalUnit 2016-04-01T16:27:08.251-04:00 IBM Power Systems Management Console 37164908 5391ee78-60a9-3a14-82ea-9063360c7a37 0 true 276c097502d44311e58004000040f2e95d463909128b565b9b2dba38b300911de5 20 VirtualIO_Disk 296c097502d44311e58004000040f2e95d38242afc010e4ed813c655a4f64baf6c true boot_efried_60166e2e_pvm ced6279b-db23-3741-a0a8-77281d71234d LogicalUnit 2016-04-01T16:27:08.252-04:00 IBM Power Systems Management Console -1696135952 ced6279b-db23-3741-a0a8-77281d71234d 0 true 276c097502d44311e58004000040f2e95d634671e06877829710625a5e2faa2081 20 VirtualIO_Disk 296c097502d44311e58004000040f2e95d9437fb054f7c56b6821fd4857f1bffa8 true boot_PowerVM_CI_Po_87d2fdd2_pvm a671df5f-c5ee-3396-be27-9bd35cf90789 LogicalUnit 2016-04-01T16:27:08.252-04:00 IBM Power Systems Management Console -598644111 a671df5f-c5ee-3396-be27-9bd35cf90789 0 true 276c097502d44311e58004000040f2e95db0c981cbc0499dfe6f4adcdadd1548d1 20 VirtualIO_Disk 296c097502d44311e58004000040f2e95d9437fb054f7c56b6821fd4857f1bffa8 true boot_PowerVM_CI_Po_b7f4117b_pvm d4b7d0ef-c1ef-30a9-9453-ea2cbb020849 LogicalUnit 2016-04-01T16:27:08.253-04:00 IBM Power Systems Management Console -1647099011 d4b7d0ef-c1ef-30a9-9453-ea2cbb020849 0 true 276c097502d44311e58004000040f2e95da1b9879e39d3b740100c7c2e7aeaa7f2 1 VirtualIO_Disk 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab false boot_pvm3_tempest.com_846d9d11 084779fc-5358-363c-bc81-335de8df3c8c LogicalUnit 2016-04-01T16:27:08.253-04:00 IBM Power Systems Management Console -428708139 084779fc-5358-363c-bc81-335de8df3c8c 0 true 276c097502d44311e58004000040f2e95d0163c9b46a2d014558f28b47cf448c48 1 VirtualIO_Disk 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab false boot_pvm3_tempest_mul_853cf019 6b396fc6-4c80-32d5-b15b-11b68e28f3bb LogicalUnit 2016-04-01T16:27:08.254-04:00 IBM Power Systems Management Console 1707249548 6b396fc6-4c80-32d5-b15b-11b68e28f3bb 0 true 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab 1 VirtualIO_Image false image_base_os_157330555537896c89120ac206347be2 aa88a05c-5e0e-3245-b3fc-cf9575cc70bf LogicalUnit 2016-04-01T16:27:08.255-04:00 IBM Power Systems Management Console -1602810180 aa88a05c-5e0e-3245-b3fc-cf9575cc70bf 0 true 276c097502d44311e58004000040f2e95d909f7fec0dbd0c0c91fcc2fa3c0f0d0d 20 VirtualIO_Disk 296c097502d44311e58004000040f2e95d9437fb054f7c56b6821fd4857f1bffa8 true boot_PowerVM_CI_Po_7a6b11ba_pvm 026f1411-5c1f-3b46-9542-386040ad43a5 LogicalUnit 2016-04-01T16:27:08.255-04:00 IBM Power Systems Management Console 2071179378 026f1411-5c1f-3b46-9542-386040ad43a5 0 true 276c097502d44311e58004000040f2e95de37f293d6c706a3ddab827eea56fa7d2 1 VirtualIO_Disk 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab false boot_pvm3_tempest_Lis_34aa187a 6d1fae20-487e-3fd0-ba39-fa0f7db933bc LogicalUnit 2016-04-01T16:27:08.256-04:00 IBM Power Systems Management Console 1450299766 6d1fae20-487e-3fd0-ba39-fa0f7db933bc 0 true 296c097502d44311e58004000040f2e95d9437fb054f7c56b6821fd4857f1bffa8 20 VirtualIO_Image false image_template_PowerVM_Ubuntu_Base_97f6adc3b9144076a210393b9eea4da6 d381ed63-9dbb-31f3-beab-aef8d546cc03 LogicalUnit 2016-04-01T16:27:08.256-04:00 IBM Power Systems Management Console -117485050 d381ed63-9dbb-31f3-beab-aef8d546cc03 0 true 276c097502d44311e58004000040f2e95d3a9ae7ce486d290ccc07fb71b3560446 1 VirtualIO_Disk 296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b497780a1799236ab false boot_pvm3_tempest_ser_2edc910f 17f79393-cc9e-3c95-8cb0-61b7e0c14d8f LogicalUnit 2016-04-01T16:27:08.257-04:00 IBM Power Systems Management Console 1536917179 17f79393-cc9e-3c95-8cb0-61b7e0c14d8f 0 true 276c097502d44311e58004000040f2e95db15ba9d075e58aa7bb316d0f8c84d80b 20 VirtualIO_Disk 296c097502d44311e58004000040f2e95d38242afc010e4ed813c655a4f64baf6c true boot_PowerVM_CI_Po_5efd5f67_pvm END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/phyp_pcm_data.txt0000664000175000017500000002303413571367171022746 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/pcm/ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/RawMetrics/LongTermMonitor/LTM_8247-22L*2125D4A_phyp_20150527T074430+0000.json'} END OF SECTION} HEADERS{ {'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ { "systemUtil": { "utilInfo": { "version": "1.3.0", "metricType": "Raw", "monitoringType": "LTM", "mtms": "8247-22L*2125D4A", "name": "dev-4" }, "utilSample": { "timeStamp": "2015-05-27T08:17:45+0000", "status": 0, "errorInfo": [], "timeBasedCycles": 8.0629725893315e+14, "systemFirmware": { "utilizedProcCycles": 58599310268, "assignedMem": 4096 }, "processor": { "totalProcUnits": 20, "configurableProcUnits": 20, "availableProcUnits": 18.9, "procCyclesPerSecond": 512000000 }, "memory": { "totalMem": 65536, "availableMem": 32512, "configurableMem": 65536 }, "sharedProcessorPool": [ { "id": 0, "name": "DefaultPool", "assignedProcCycles": 1.6125945162342e+16, "utilizedPoolCycles": 683011326288, "maxProcUnits": 20, "borrowedPoolProcUnits": 18 } ], "lparsUtil": [ { "id": 6, "uuid": "2545BCC5-BAE8-4414-AD49-EAFC2DEE2546", "type": "aixlinux", "name": "fkh4-99b8fdca-kyleh", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 2048, "backedPhysicalMem": 2048 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 2, "maxProcUnits": 0.2, "weight": 64, "entitledProcCycles": 0, "utilizedCappedProcCycles": 0, "utilizedUnCappedProcCycles": 0, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 5, "uuid": "3B0237F9-26F1-41C7-BE57-A08C9452AD9D", "type": "aixlinux", "name": "fake_npiv", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 256, "backedPhysicalMem": 256 }, "processor": { "poolId": 0, "mode": "cap", "maxVirtualProcessors": 1, "maxProcUnits": 0.1, "weight": 0, "entitledProcCycles": 0, "utilizedCappedProcCycles": 0, "utilizedUnCappedProcCycles": 0, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 4, "uuid": "66A2E886-D05D-42F4-87E0-C3BA02CF7C7E", "type": "aixlinux", "name": "kh4-9fdaa1ba-kyleh", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 2048, "backedPhysicalMem": 2048 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 2, "maxProcUnits": 0.2, "weight": 64, "entitledProcCycles": 0, "utilizedCappedProcCycles": 0, "utilizedUnCappedProcCycles": 0, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 3, "uuid": "3B7A3E07-E0B0-4F35-8997-6019D0D1CFC8", "type": "aixlinux", "name": "placeholder", "state": "Not Activated", "affinityScore": 0, "memory": { "logicalMem": 0, "backedPhysicalMem": 0 }, "processor": { "mode": "share_idle_procs", "maxVirtualProcessors": 0, "maxProcUnits": 0, "entitledProcCycles": 0, "utilizedCappedProcCycles": 0, "utilizedUnCappedProcCycles": 0, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 2, "uuid": "42AD4FD4-DC64-4935-9E29-9B7C6F35AFCC", "type": "aixlinux", "name": "Ubuntu1410", "state": "Open Firmware", "affinityScore": 100, "memory": { "logicalMem": 20480, "backedPhysicalMem": 20480 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 4, "maxProcUnits": 0.4, "weight": 128, "entitledProcCycles": 1765629232513, "utilizedCappedProcCycles": 264619289721, "utilizedUnCappedProcCycles": 641419282, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 160866895489, "totalInstructionsExecutionTime": 193139925064 }, "network": { "virtualEthernetAdapters": [ { "vlanId": 2227, "vswitchId": 0, "physicalLocation": "U8247.22L.2125D4A-V2-C2", "isPortVLANID": true, "receivedPackets": 10, "sentPackets": 100, "droppedPackets": 5, "sentBytes": 100, "receivedBytes": 10000, "receivedPhysicalPackets": 0, "sentPhysicalPackets": 0, "droppedPhysicalPackets": 0, "sentPhysicalBytes": 0, "receivedPhysicalBytes": 0 } ] }, "storage": { "genericVirtualAdapters": [ { "physicalLocation": "U8247.22L.2125D4A-V2-C3", "viosId": 1, "viosAdapterSlotId": 1000 } ], "virtualFiberChannelAdapters": [ { "physicalLocation": "U8247.22L.2125D4A-V2-C2", "viosId": 2, "wwpnpair": [ 13857705835384867080, 13857705835384867081 ] }, { "physicalLocation": "U8247.22L.2125D4A-V2-C3", "viosId": 1, "wwpnPair": [ 13857705835384867082, 13857705835384867083 ] } ] } } ], "viosUtil": [ { "id": 1, "uuid": "3443DB77-AED1-47ED-9AA5-3DB9C6CF7089", "name": "IOServer - SN2125D4A", "state": "Running", "affinityScore": 100, "memory": { "assignedMem": 4096 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 4, "maxProcUnits": 0.4, "weight": 255, "entitledProcCycles": 3603069246332, "utilizedCappedProcCycles": 334805782979, "utilizedUnCappedProcCycles": 219847016046, "idleProcCycles": 260430293020, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 1289474458948, "totalInstructionsExecutionTime": 520104519750 }, "network": { "virtualEthernetAdapters": [ { "vlanId": 2227, "vswitchId": 0, "physicalLocation": "U8247.22L.2125D4A-V1-C2", "isPortVLANID": true, "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "sentBytes": 0, "receivedBytes": 0, "receivedPhysicalPackets": 0, "sentPhysicalPackets": 0, "droppedPhysicalPackets": 0, "sentPhysicalBytes": 0, "receivedPhysicalBytes": 0 }, { "vlanId": 123, "vswitchId": 0, "physicalLocation": "U8247.22L.2125D4A-V1-C12", "isPortVLANID": true, "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "sentBytes": 0, "receivedBytes": 0, "receivedPhysicalPackets": 0, "sentPhysicalPackets": 0, "droppedPhysicalPackets": 0, "sentPhysicalBytes": 0, "receivedPhysicalBytes": 0 } ] } } ] } } } END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/get_volume_group.txt0000664000175000017500000005120513571367171023521 0ustar neoneo00000000000000INFO{ {'comment': 'Use for media rep processor testing. Get of a volume group', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': '/; HttpOnly'} END OF SECTION} HEADERS{ {'cache-control': 'no-cache="set-cookie, set-cookie2"', 'content-length': '16539', 'x-powered-by': 'Servlet/3.0', 'date': 'Wed, 07 Aug 2013 11:42:46 GMT', 'set-cookie': 'JSESSIONID=0000NmgK-pjcupBNwMN5_d4RMRf:537630eb-a35f-4f87-b20c-6e02059b963e; Path=/; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'content-type': 'application/xml', 'etag': '1782236641'} END OF SECTION} BODY{ 8731bf3a-1f11-30a1-9d8b-c26d4cbe7ccf VolumeGroup 2013-09-04T13:54:02.827Z IBM Power Systems Management Console 8731bf3a-1f11-30a1-9d8b-c26d4cbe7ccf 1378302838563 558.9111 558.9111 558.9111 rootvg 00004d4a00007a00000001405006c06b 0 vopt_089ee7c70e47480892bc20148a45d955 0evopt_089ee7c70e47480892bc20148a45d955 rw 0.000000 vopt_1cdb7958692945c58da79d761f7e8f6a 0evopt_1cdb7958692945c58da79d761f7e8f6a rw 0.000000 vopt_2731b9382a7a4f19849c977861be894e 0evopt_2731b9382a7a4f19849c977861be894e rw 0.000000 vopt_39fc7a67ec124d6c89f1eb064fd5e94b 0evopt_39fc7a67ec124d6c89f1eb064fd5e94b rw 0.000000 vopt_3a1b36dda62f4d85a0acb3d3577ccc75 0evopt_3a1b36dda62f4d85a0acb3d3577ccc75 rw 0.000000 vopt_41078229067949d1911af20d7bf2df0b 0evopt_41078229067949d1911af20d7bf2df0b rw 0.000000 vopt_444b0be71be640b1b51c971e5b4df1a2 0evopt_444b0be71be640b1b51c971e5b4df1a2 rw 0.000000 vopt_4903b73ee95b443eb5d784fdbf3d8ea3 0evopt_4903b73ee95b443eb5d784fdbf3d8ea3 rw 0.000000 vopt_4fac13f3dceb470bb2d200688246f04e 0evopt_4fac13f3dceb470bb2d200688246f04e rw 0.000000 vopt_51eae35ab0ff47f7b90d25bff08737c5 0evopt_51eae35ab0ff47f7b90d25bff08737c5 rw 0.000000 vopt_53c9937e8ae647199eba895fb5ba9455 0evopt_53c9937e8ae647199eba895fb5ba9455 rw 0.000000 vopt_5e091301829a45228a8b0bb26e705906 0evopt_5e091301829a45228a8b0bb26e705906 rw 0.000000 vopt_69d22f6b548d42b7bb2747cd99f5c302 0evopt_69d22f6b548d42b7bb2747cd99f5c302 rw 0.000000 vopt_6c0762b2abbd458aba2c71d3185d158b 0evopt_6c0762b2abbd458aba2c71d3185d158b rw 0.000000 vopt_6d56f428db8f42dbbbd4d16b24a71c28 0evopt_6d56f428db8f42dbbbd4d16b24a71c28 rw 0.000000 vopt_70ae8e68ab68449c95a41231ffaa39ec 0evopt_70ae8e68ab68449c95a41231ffaa39ec rw 0.000000 vopt_74539131037443eb9eb482e6c29d28cf 0evopt_74539131037443eb9eb482e6c29d28cf rw 0.000000 vopt_771d01d4f7114a5cb0fdd424329047f4 0evopt_771d01d4f7114a5cb0fdd424329047f4 rw 0.000000 vopt_7f3e5855ff50436b8907fc8cafec165a 0evopt_7f3e5855ff50436b8907fc8cafec165a rw 0.000000 vopt_998bfd2bfbf64fcbb48544cb2879a9bb 0evopt_998bfd2bfbf64fcbb48544cb2879a9bb rw 0.000000 vopt_a00dc2c9535047ff95a91ab5155a9c23 0evopt_a00dc2c9535047ff95a91ab5155a9c23 rw 0.000000 vopt_a75065604cfe4785ad2018f914975518 0evopt_a75065604cfe4785ad2018f914975518 rw 0.000000 vopt_a755d19d6c0b4ce2a0122abec9cde473 0evopt_a755d19d6c0b4ce2a0122abec9cde473 rw 0.000000 vopt_b17310c19c70495eac828f38fbac36d9 0evopt_b17310c19c70495eac828f38fbac36d9 rw 0.000000 vopt_c03ce43687ae450ca82d277c1d12b448 0evopt_c03ce43687ae450ca82d277c1d12b448 rw 0.000000 vopt_c1685c355165434db239972bb9ddc789 0evopt_c1685c355165434db239972bb9ddc789 rw 0.000000 vopt_c1e7cd84917c48929aa8ef260d812403 0evopt_c1e7cd84917c48929aa8ef260d812403 rw 0.000000 vopt_c3a5de26201045368a643ff33db18d4c 0evopt_c3a5de26201045368a643ff33db18d4c rw 0.000000 vopt_c850de703d314f829a876c4d52354e9a 0evopt_c850de703d314f829a876c4d52354e9a rw 0.000000 vopt_ccfce05ce9e44b93a6ba7e6ee609e2f0 0evopt_ccfce05ce9e44b93a6ba7e6ee609e2f0 rw 0.000000 vopt_daa2573bba0f487abac96090677cd91c 0evopt_daa2573bba0f487abac96090677cd91c rw 0.000000 vopt_fe426d6d6ad34c78a66be771ed070149 0evopt_fe426d6d6ad34c78a66be771ed070149 rw 0.000000 VMLibrary 1 SAS Disk Drive U78AB.001.WZSHFLF-P3-D1 NoReserve Round_Robin 01M0lCTU1CRjI2MDBSQzUwMDAwMzk0ODgwMDI5QkM= false 572325 hdisk0 active 2811350000394880029BC09MBF2600RC03IBMsas 0400004d4a00007a00000001405006c06b END OF SECTION}pypowervm-1.1.24/pypowervm/tests/data/fake_cna.txt0000664000175000017500000001213013571367171021660 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_cna.txt # #################################################### INFO{ {'comment': 'Used for testing cna_wrapper.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'LogicalPartition/0A68CFAB-F62B-46D4-A6A0-F4EBE0264AD5/ClientNetworkAdapter/6445b54b-b9dc-3bc2-b1d3-f8cc22ba95b8'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 6445b54b-b9dc-3bc2-b1d3-f8cc22ba95b8 ClientNetworkAdapter 2014-09-18T07:04:42.910Z IBM Power Systems Management Console 249747277 6445b54b-b9dc-3bc2-b1d3-f8cc22ba95b8 1411023882909 U8246.L2C.0604C7A-V24-C32 U8246.L2C.0604C7A-V24-C32 3 false true 32 true ALL FAD4433ED120 100 0 true 53 54 55 true VSITID 77.99 VSIMID 0 192.168.2.6 255.255.255.0 192.168.2.0 1 1500 br-int iface-id=ba9d8ec3-64b2-47fe-9f50-e12ba373814c,iface-status=active,attached-mac=fa:e6:c8:3f:80:20,vm-uuid=64443c49-920d-47d7-9b78-1216845c51f5 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vswitch_feed.txt0000664000175000017500000001002613571367171023573 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh cna_vswitches.txt # #################################################### INFO{ {'comment': 'Use for Client Network Adapter Testing.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/4abca7ff-3710-3160-b9e4-cb4456c33f43/VirtualSwitch'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ b9eb81ac-a98d-3b65-a055-431219689198 2015-01-20T00:11:02.465Z IBM Power Systems Management Console 4d9735ae-feaf-32c2-a1bc-102026df9168 VirtualSwitch 2015-01-20T00:11:02.740Z IBM Power Systems Management Console -465528264 4d9735ae-feaf-32c2-a1bc-102026df9168 1421712662473 0 Veb ETHERNET0(Default) END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_volume_group_no_vg.txt0000664000175000017500000000436113571367171025041 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_volume_group.txt # #################################################### INFO{ {'comment': 'Used for testing test_volume_group.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/14B854F7-42CE-4FF0-BD57-1D117054E701/VolumeGroup'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 8070198f-b358-3b26-a144-1b268cf6f8d4 2015-01-22T04:15:33.346Z IBM Power Systems Management Console END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/phyp_pcm_data3.txt0000664000175000017500000002171613571367171023036 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/pcm/ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/RawMetrics/LongTermMonitor/LTM_8247-22L*2125D4A_phyp_20150527T074430+0000.json'} END OF SECTION} HEADERS{ {'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ { "systemUtil": { "utilInfo": { "version": "1.3.0", "metricType": "Raw", "monitoringType": "LTM", "mtms": "8247-22L*2125D4A", "name": "dev-4" }, "utilSample": { "timeStamp": "2015-05-27T08:17:45+0000", "status": 0, "errorInfo": [], "timeBasedCycles": 8.0629725893315e+14, "systemFirmware": { "utilizedProcCycles": 58599310268, "assignedMem": 4096 }, "processor": { "totalProcUnits": 20, "configurableProcUnits": 20, "availableProcUnits": 18.9, "procCyclesPerSecond": 512000000 }, "memory": { "totalMem": 65536, "availableMem": 32512, "configurableMem": 65536 }, "sharedProcessorPool": [ { "id": 0, "name": "DefaultPool", "assignedProcCycles": 1.6125945162342e+16, "utilizedPoolCycles": 683011326288, "maxProcUnits": 20, "borrowedPoolProcUnits": 18 } ], "lparsUtil": [ { "id": 6, "uuid": "2545BCC5-BAE8-4414-AD49-EAFC2DEE2546", "type": "aixlinux", "name": "fkh4-99b8fdca-kyleh", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 2048, "backedPhysicalMem": 2048 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 2, "maxProcUnits": 0.2, "weight": 64, "entitledProcCycles": 1000000, "utilizedCappedProcCycles": 10000, "utilizedUnCappedProcCycles": 5000, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 5, "uuid": "3B0237F9-26F1-41C7-BE57-A08C9452AD9D", "type": "aixlinux", "name": "fake_npiv", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 256, "backedPhysicalMem": 256 }, "processor": { "poolId": 0, "mode": "cap", "maxVirtualProcessors": 1, "maxProcUnits": 0.1, "weight": 0, "entitledProcCycles": 0, "utilizedCappedProcCycles": 0, "utilizedUnCappedProcCycles": 0, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 4, "uuid": "66A2E886-D05D-42F4-87E0-C3BA02CF7C7E", "type": "aixlinux", "name": "kh4-9fdaa1ba-kyleh", "state": "Not Activated", "affinityScore": 100, "memory": { "logicalMem": 2048, "backedPhysicalMem": 2048 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 2, "maxProcUnits": 0.2, "weight": 64, "entitledProcCycles": 500000, "utilizedCappedProcCycles": 10000, "utilizedUnCappedProcCycles": 5000, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 3, "uuid": "3B7A3E07-E0B0-4F35-8997-6019D0D1CFC8", "type": "aixlinux", "name": "placeholder", "state": "Not Activated", "affinityScore": 0, "memory": { "logicalMem": 0, "backedPhysicalMem": 0 }, "processor": { "mode": "share_idle_procs", "maxVirtualProcessors": 0, "maxProcUnits": 0, "entitledProcCycles": 1000000, "utilizedCappedProcCycles": 10000, "utilizedUnCappedProcCycles": 5000, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 0, "totalInstructionsExecutionTime": 0 } }, { "id": 2, "uuid": "42AD4FD4-DC64-4935-9E29-9B7C6F35AFCC", "type": "aixlinux", "name": "Ubuntu1410", "state": "Open Firmware", "affinityScore": 100, "memory": { "logicalMem": 20480, "backedPhysicalMem": 20480 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 4, "maxProcUnits": 0.4, "weight": 128, "entitledProcCycles": 1665629232513, "utilizedCappedProcCycles": 254619289721, "utilizedUnCappedProcCycles": 631419282, "idleProcCycles": 0, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 150866895489, "totalInstructionsExecutionTime": 183139925064 }, "network": { "virtualEthernetAdapters": [ { "vlanId": 2227, "vswitchId": 0, "physicalLocation": "U8247.22L.2125D4A-V2-C2", "isPortVLANID": true, "receivedPackets": 10, "sentPackets": 100, "droppedPackets": 5, "sentBytes": 100, "receivedBytes": 10000, "receivedPhysicalPackets": 0, "sentPhysicalPackets": 0, "droppedPhysicalPackets": 0, "sentPhysicalBytes": 0, "receivedPhysicalBytes": 0 } ] }, "storage": { "genericVirtualAdapters": [ { "physicalLocation": "U8247.22L.2125D4A-V2-C3", "viosId": 1, "viosAdapterSlotId": 1000 } ] } } ], "viosUtil": [ { "id": 1, "uuid": "3443DB77-AED1-47ED-9AA5-3DB9C6CF7089", "name": "IOServer - SN2125D4A", "state": "Running", "affinityScore": 100, "memory": { "assignedMem": 4096 }, "processor": { "poolId": 0, "mode": "uncap", "maxVirtualProcessors": 4, "maxProcUnits": 0.4, "weight": 255, "entitledProcCycles": 3503069246332, "utilizedCappedProcCycles": 324805782979, "utilizedUnCappedProcCycles": 209847016046, "idleProcCycles": 250430293020, "donatedProcCycles": 0, "timeSpentWaitingForDispatch": 0, "totalInstructions": 1189474458948, "totalInstructionsExecutionTime": 510104519750 }, "network": { "virtualEthernetAdapters": [ { "vlanId": 2227, "vswitchId": 0, "physicalLocation": "U8247.22L.2125D4A-V1-C2", "isPortVLANID": true, "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "sentBytes": 0, "receivedBytes": 0, "receivedPhysicalPackets": 0, "sentPhysicalPackets": 0, "droppedPhysicalPackets": 0, "sentPhysicalBytes": 0, "receivedPhysicalBytes": 0 }, { "vlanId": 123, "vswitchId": 0, "physicalLocation": "U8247.22L.2125D4A-V1-C12", "isPortVLANID": true, "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "sentBytes": 0, "receivedBytes": 0, "receivedPhysicalPackets": 0, "sentPhysicalPackets": 0, "droppedPhysicalPackets": 0, "sentPhysicalBytes": 0, "receivedPhysicalBytes": 0 } ] } } ] } } } END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/nbbr_virtual_switch.txt0000664000175000017500000001141113571367171024204 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh nbbr_virtual_switch.txt # #################################################### INFO{ {'comment': 'Created by thorst.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/VirtualSwitch'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 0b81f37e-06b3-39dc-8d10-35734aaddac5 2015-02-17T23:07:17.073Z IBM Power Systems Management Console e1a852cb-2be5-3a51-9147-43761bc3d720 VirtualSwitch 2015-02-17T23:07:17.128Z IBM Power Systems Management Console -1301754041 e1a852cb-2be5-3a51-9147-43761bc3d720 1424132682404 0 Veb ETHERNET0(Default) END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/event_feed.txt0000664000175000017500000015423113571367171022246 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh event_feed.txt # #################################################### INFO{ {'comment': None, 'path': 'Event', 'reason': 'OK', 'status': 200} END OF SECTION} HEADERS{ {'X-Powered-By': 'Servlet/3.1', 'Transfer-Encoding': 'chunked', 'X-TransactionRecord-Uuid': '6148f006-14dc-4c7f-9e15-204831b0f426', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Thu, 15 Sep 2016 20:18:21 GMT', 'X-Transaction-ID': 'XT10002170', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Set-Cookie': 'JSESSIONID=00001o3HkNTofVHzVLVuVhMCL6Y:6b9de794-8590-4eb9-b376-d2a5483686df; Path=/; Secure; HttpOnly', 'Date': 'Thu, 15 Sep 2016 20:18:21 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'ETag': '1241218321'} END OF SECTION} BODY{ 3d13f965-e38b-3c65-bfa4-e347046da56f 2016-09-15T16:18:21.326-04:00 IBM Power Systems Management Console 510ae1e6-3e86-34c6-bf4c-c638e76a5f68 Event 2016-09-15T16:18:21.335-04:00 IBM Power Systems Management Console 1427405456 510ae1e6-3e86-34c6-bf4c-c638e76a5f68 0 ADD_URI 1473962006548 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/1E6FC741-6253-4B69-B88B-8A44BED92145 4d3bc887-ba46-3ce7-a519-22716146b53f Event 2016-09-15T16:18:21.336-04:00 IBM Power Systems Management Console 265059762 4d3bc887-ba46-3ce7-a519-22716146b53f 0 MODIFY_URI 1473962006549 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3 Other 3ad0fbb6-8498-33be-862b-a1207cb7e702 Event 2016-09-15T16:18:21.337-04:00 IBM Power Systems Management Console 1496155289 3ad0fbb6-8498-33be-862b-a1207cb7e702 0 DELETE_URI 1473962006550 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/1E6FC741-6253-4B69-B88B-8A44BED92145 d3667d39-6a27-3e00-96ef-0dc6e3c28a13 Event 2016-09-15T16:18:21.338-04:00 IBM Power Systems Management Console 1816704714 d3667d39-6a27-3e00-96ef-0dc6e3c28a13 0 ADD_URI 1473962006551 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/0AAD02EB-BDFE-49F3-9806-92EEBCE354C4 16e5340e-4cdd-398a-9f29-531e21f06829 Event 2016-09-15T16:18:21.338-04:00 IBM Power Systems Management Console -760365602 16e5340e-4cdd-398a-9f29-531e21f06829 0 MODIFY_URI 1473962006552 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/0AAD02EB-BDFE-49F3-9806-92EEBCE354C4 NVRAM 8dc75f20-c833-363e-b1b4-dceec1e8a4ce Event 2016-09-15T16:18:21.339-04:00 IBM Power Systems Management Console 1891405832 8dc75f20-c833-363e-b1b4-dceec1e8a4ce 0 MODIFY_URI 1473962006553 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/113ABFC5-FE3C-4321-A845-310C7A37F276 Other 1e079de1-e10f-3ff7-96d3-19e2167b188d Event 2016-09-15T16:18:21.340-04:00 IBM Power Systems Management Console 313787948 1e079de1-e10f-3ff7-96d3-19e2167b188d 0 MODIFY_URI 1473962006554 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3 Other 492f0aba-c888-3c30-9566-2665c7e5060f Event 2016-09-15T16:18:21.340-04:00 IBM Power Systems Management Console 1895154154 492f0aba-c888-3c30-9566-2665c7e5060f 0 MODIFY_URI 1473962006555 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/113ABFC5-FE3C-4321-A845-310C7A37F276 Other c0a7bbed-4a50-36a9-8421-fe147ed43e41 Event 2016-09-15T16:18:21.341-04:00 IBM Power Systems Management Console -1208499057 c0a7bbed-4a50-36a9-8421-fe147ed43e41 0 MODIFY_URI 1473962006556 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/0AAD02EB-BDFE-49F3-9806-92EEBCE354C4 PartitionState,Other 79aae91f-15ef-3c82-88b9-1807167b052b Event 2016-09-15T16:18:21.342-04:00 IBM Power Systems Management Console 1898902476 79aae91f-15ef-3c82-88b9-1807167b052b 0 MODIFY_URI 1473962006557 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/113ABFC5-FE3C-4321-A845-310C7A37F276 Other 900779d6-1404-3d28-9e2c-f30f10f452af Event 2016-09-15T16:18:21.343-04:00 IBM Power Systems Management Console -662619128 900779d6-1404-3d28-9e2c-f30f10f452af 0 MODIFY_URI 1473962006558 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/7E84C205-5961-4D78-85AB-D84A6EB83C04 Other 5d85d159-8dbf-3f2f-b52e-4b7f675a146d Event 2016-09-15T16:18:21.343-04:00 IBM Power Systems Management Console -1396030668 5d85d159-8dbf-3f2f-b52e-4b7f675a146d 0 ADD_URI 1473962006559 http://localhost:12080/rest/api/uom/Tier/665971d0-2ac6-3201-9ebd-e08a198973b2/LogicalUnit/e47ddc5f-5cd6-3134-a749-1fb73a0363e7 LogicalUnit c46d2dae-a439-311b-a076-7014ed218abe Event 2016-09-15T16:18:21.344-04:00 IBM Power Systems Management Console 603848151 c46d2dae-a439-311b-a076-7014ed218abe 0 MODIFY_URI 1473962006560 http://localhost:12080/rest/api/uom/Tier/665971d0-2ac6-3201-9ebd-e08a198973b2 Tier 39d8556a-be4d-35f6-9f5e-c664a2a7b6d1 Event 2016-09-15T16:18:21.345-04:00 IBM Power Systems Management Console 823670728 39d8556a-be4d-35f6-9f5e-c664a2a7b6d1 0 MODIFY_URI 1473962006561 http://localhost:12080/rest/api/uom/Tier/665971d0-2ac6-3201-9ebd-e08a198973b2/LogicalUnit/e47ddc5f-5cd6-3134-a749-1fb73a0363e7 derived 50eb341b-7e33-374a-b3f2-6c504b97abd2 Event 2016-09-15T16:18:21.345-04:00 IBM Power Systems Management Console 514771863 50eb341b-7e33-374a-b3f2-6c504b97abd2 0 MODIFY_URI 1473962006562 http://localhost:12080/rest/api/uom/Tier/665971d0-2ac6-3201-9ebd-e08a198973b2 other 0ebccbbd-db7a-347d-a9ab-7f008240cae2 Event 2016-09-15T16:18:21.346-04:00 IBM Power Systems Management Console 1918118943 0ebccbbd-db7a-347d-a9ab-7f008240cae2 0 MODIFY_URI 1473962006563 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/7E84C205-5961-4D78-85AB-D84A6EB83C04 freeIOAdaptersForLinkAggregation 613b00b4-ed42-35a0-ad61-6b2966f5b136 Event 2016-09-15T16:18:21.347-04:00 IBM Power Systems Management Console 188421573 613b00b4-ed42-35a0-ad61-6b2966f5b136 0 MODIFY_URI 1473962006564 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/113ABFC5-FE3C-4321-A845-310C7A37F276 freeIOAdaptersForLinkAggregation a20c3507-82ad-3781-8991-bf11ea52bd4f Event 2016-09-15T16:18:21.347-04:00 IBM Power Systems Management Console 373761100 a20c3507-82ad-3781-8991-bf11ea52bd4f 0 MODIFY_URI 1473962006565 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3 Other 41386b9b-8783-3bdc-8403-1e55aea566a4 Event 2016-09-15T16:18:21.348-04:00 IBM Power Systems Management Console 1955127306 41386b9b-8783-3bdc-8403-1e55aea566a4 0 MODIFY_URI 1473962006566 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/113ABFC5-FE3C-4321-A845-310C7A37F276 Other cd22556f-0548-3fb7-8315-27bd61e2b818 Event 2016-09-15T16:18:21.349-04:00 IBM Power Systems Management Console 377509422 cd22556f-0548-3fb7-8315-27bd61e2b818 0 MODIFY_URI 1473962006567 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3 Other 3ffd907d-b2e5-3e42-959b-1a11f4fc653e Event 2016-09-15T16:18:21.349-04:00 IBM Power Systems Management Console -604520137 3ffd907d-b2e5-3e42-959b-1a11f4fc653e 0 MODIFY_URI 1473962006568 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/7E84C205-5961-4D78-85AB-D84A6EB83C04 Other e2d5776f-a114-333d-9baf-159280cc4af5 Event 2016-09-15T16:18:21.350-04:00 IBM Power Systems Management Console 381257744 e2d5776f-a114-333d-9baf-159280cc4af5 0 MODIFY_URI 1473962006569 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3 Other 4489270c-70fe-39ab-8863-4ede34b6361b Event 2016-09-15T16:18:21.351-04:00 IBM Power Systems Management Console 1870422532 4489270c-70fe-39ab-8863-4ede34b6361b 0 MODIFY_URI 1473962006570 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/0AAD02EB-BDFE-49F3-9806-92EEBCE354C4 Other 6f28a17f-b0f7-35b3-9033-3a20de9e517b Event 2016-09-15T16:18:21.352-04:00 IBM Power Systems Management Console 424363447 6f28a17f-b0f7-35b3-9033-3a20de9e517b 0 MODIFY_URI 1473962006571 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3 Other 8e576ddd-aad4-31c8-a53f-c4d801d83a47 Event 2016-09-15T16:18:21.352-04:00 IBM Power Systems Management Console -1733881994 8e576ddd-aad4-31c8-a53f-c4d801d83a47 0 MODIFY_URI 1473962006572 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/7E84C205-5961-4D78-85AB-D84A6EB83C04 VirtualSCSIMappings 30311fe5-4943-3a2e-89ec-e2ea949ef68f Event 2016-09-15T16:18:21.353-04:00 IBM Power Systems Management Console -1732007833 30311fe5-4943-3a2e-89ec-e2ea949ef68f 0 MODIFY_URI 1473962006573 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/7E84C205-5961-4D78-85AB-D84A6EB83C04 VirtualSCSIMappings b2f2e69b-9fee-3141-9628-9c7abda9d35a Event 2016-09-15T16:18:21.354-04:00 IBM Power Systems Management Console 833262093 b2f2e69b-9fee-3141-9628-9c7abda9d35a 0 MODIFY_URI 1473962006574 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/113ABFC5-FE3C-4321-A845-310C7A37F276 VirtualSCSIMappings c2e9fc10-2a01-3d93-8dd5-bf5c38a6e088 Event 2016-09-15T16:18:21.354-04:00 IBM Power Systems Management Console 835136254 c2e9fc10-2a01-3d93-8dd5-bf5c38a6e088 0 MODIFY_URI 1473962006575 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/113ABFC5-FE3C-4321-A845-310C7A37F276 VirtualSCSIMappings 7cb330aa-6e91-33ae-a512-73e2be315307 Event 2016-09-15T16:18:21.355-04:00 IBM Power Systems Management Console -636670976 7cb330aa-6e91-33ae-a512-73e2be315307 0 MODIFY_URI 1473962006576 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/0AAD02EB-BDFE-49F3-9806-92EEBCE354C4 NVRAM c50ea120-be79-3100-9d8c-5b29c21e3a5a Event 2016-09-15T16:18:21.356-04:00 IBM Power Systems Management Console 5713998 c50ea120-be79-3100-9d8c-5b29c21e3a5a 0 MODIFY_URI 1473962006577 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/113ABFC5-FE3C-4321-A845-310C7A37F276 PartitionState,PartitionName,Other ef0ec4ab-6457-33e4-84f2-0671c611fb65 Event 2016-09-15T16:18:21.357-04:00 IBM Power Systems Management Console 1739159690 ef0ec4ab-6457-33e4-84f2-0671c611fb65 0 MODIFY_URI 1473962006578 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/VirtualIOServer/7E84C205-5961-4D78-85AB-D84A6EB83C04 PartitionState,PartitionName,Other df1c6322-4cda-3685-8614-6bed4aa679a4 Event 2016-09-15T16:18:21.357-04:00 IBM Power Systems Management Console -1086678592 df1c6322-4cda-3685-8614-6bed4aa679a4 0 MODIFY_URI 1473962006579 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/0AAD02EB-BDFE-49F3-9806-92EEBCE354C4 PartitionState,Other 056e9bf3-b6b7-3c0c-893f-706500774899 Event 2016-09-15T16:18:21.358-04:00 IBM Power Systems Management Console 480588277 056e9bf3-b6b7-3c0c-893f-706500774899 0 MODIFY_URI 1473962006580 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3 Other 798d85fc-ebe7-3685-92f6-b2f28e571e7f Event 2016-09-15T16:18:21.359-04:00 IBM Power Systems Management Console -1043572889 798d85fc-ebe7-3685-92f6-b2f28e571e7f 0 MODIFY_URI 1473962006581 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/0AAD02EB-BDFE-49F3-9806-92EEBCE354C4 PartitionState,Other 99ab3339-675a-3c49-a888-c4029aca349a Event 2016-09-15T16:18:21.359-04:00 IBM Power Systems Management Console 484336599 99ab3339-675a-3c49-a888-c4029aca349a 0 MODIFY_URI 1473962006582 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3 Other 0235867e-67bc-328a-993c-e20a359bffdf Event 2016-09-15T16:18:21.360-04:00 IBM Power Systems Management Console -584194468 0235867e-67bc-328a-993c-e20a359bffdf 0 MODIFY_URI 1473962006583 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/0AAD02EB-BDFE-49F3-9806-92EEBCE354C4 NVRAM 9cf6946f-7e82-3183-86f7-9d0a975017ca Event 2016-09-15T16:18:21.361-04:00 IBM Power Systems Management Console -1600796637 9cf6946f-7e82-3183-86f7-9d0a975017ca 0 MODIFY_URI 1473962006584 http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/0AAD02EB-BDFE-49F3-9806-92EEBCE354C4 ReferenceCode,Other END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/ltm_feed2.txt0000664000175000017500000024426613571367171022013 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'RawMetrics/LongTermMonitor'} END OF SECTION} HEADERS{ {'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ c5d782c7-44e4-3086-ad15-b16fb039d63b 2015-05-27T08:17:30.000Z LongTermMetrics ManagedSystem c5d782c7-44e4-3086-ad15-b16fb039d63b 5362f2b2-2b33-4bdb-9ee4-67fb926e76a3 2015-05-27T08:13:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081300+0000.json 2015-05-27T08:13:00.000-05:00 IBM Power Systems Management Console 41fd67cf-3df8-4b21-95c6-34670e9d600a 2015-05-27T08:17:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081730+0000.json 2015-05-27T08:17:30.000Z IBM Power Systems Management Console 6f510966-41f7-46e6-a138-de55120f4c34 2015-05-27T08:06:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080630+0000.json 2015-05-27T08:06:30.000Z IBM Power Systems Management Console 82cf332d-f6fe-4fdc-9d36-53243b64eb77 2015-05-27T07:55:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075530+0000.json 2015-05-27T07:55:30.000Z IBM Power Systems Management Console 704d78d9-5e94-4666-8ab2-27c7bdb597e9 2015-05-27T07:56:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075600+0000.json 2015-05-27T07:56:00.000Z IBM Power Systems Management Console e30f3214-4332-4417-932f-b7ca8f6f924a 2015-05-27T07:56:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075630+0000.json 2015-05-27T07:56:30.000Z IBM Power Systems Management Console b6b8c0a0-24d4-41f8-b958-4d35465ddbfd 2015-05-27T07:58:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075830+0000.json 2015-05-27T07:58:30.000Z IBM Power Systems Management Console 10c00a3d-9246-492d-921a-52ac8adcffc3 2015-05-27T08:04:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080400+0000.json 2015-05-27T08:04:00.000Z IBM Power Systems Management Console 3a4e7e06-3880-4c73-9a43-0a9b59741514 2015-05-27T07:47:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T074730+0000.json 2015-05-27T07:47:30.000Z IBM Power Systems Management Console 2c1b4bda-9cf3-4cd9-b6d3-1f8d45fd76e1 2015-05-27T08:16:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081630+0000.json 2015-05-27T08:16:30.000Z IBM Power Systems Management Console f18d6387-e704-456f-8c13-2dc4af207c3d 2015-05-27T08:07:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080700+0000.json 2015-05-27T08:07:00.000Z IBM Power Systems Management Console 1187a32e-8198-431b-adf8-e90b95cbb79b 2015-05-27T07:54:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075400+0000.json 2015-05-27T07:54:00.000Z IBM Power Systems Management Console 8e612d8f-aac1-47cd-b0c7-2b40f3d8abd9 2015-05-27T07:46:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T074600+0000.json 2015-05-27T07:46:00.000Z IBM Power Systems Management Console a1622072-6ab5-4bfd-bb34-2e65c1cd2fc5 2015-05-27T08:04:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080430+0000.json 2015-05-27T08:04:30.000Z IBM Power Systems Management Console 11fffb4e-a396-48ef-9a45-35500c3a83eb 2015-05-27T08:00:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080030+0000.json 2015-05-27T08:00:30.000Z IBM Power Systems Management Console 082c5f02-bdcb-42b4-8c55-98ee179ed1be 2015-05-27T07:55:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075500+0000.json 2015-05-27T07:55:00.000Z IBM Power Systems Management Console 9750a08e-356a-4b60-b6d4-e5703109e7b1 2015-05-27T07:51:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075100+0000.json 2015-05-27T07:51:00.000Z IBM Power Systems Management Console e3c4bcce-daf4-44c4-bcfd-cf015e62a22d 2015-05-27T07:54:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075430+0000.json 2015-05-27T07:54:30.000Z IBM Power Systems Management Console 1338723a-a726-4438-a735-8b2462747534 2015-05-27T07:53:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075330+0000.json 2015-05-27T07:53:30.000Z IBM Power Systems Management Console e2a6c413-7c96-4b8d-9b6c-726c549195e3 2015-05-27T08:02:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080200+0000.json 2015-05-27T08:02:00.000Z IBM Power Systems Management Console 70e940cc-4ac1-4723-8703-a2b594cd44de 2015-05-27T08:14:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081400+0000.json 2015-05-27T08:14:00.000Z IBM Power Systems Management Console 0557b2d0-4e7c-4c33-ac8e-7cae14c0fbcb 2015-05-27T07:59:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075900+0000.json 2015-05-27T07:59:00.000Z IBM Power Systems Management Console b4d89934-c54b-4d97-a807-3730c0f3fb4a 2015-05-27T07:50:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075000+0000.json 2015-05-27T07:50:00.000Z IBM Power Systems Management Console 63430211-246a-4333-8e48-bb2f9befe6a2 2015-05-27T08:13:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081330+0000.json 2015-05-27T08:13:30.000Z IBM Power Systems Management Console b37d4b57-fede-478a-bfcd-e62d42c92e5e 2015-05-27T08:08:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080830+0000.json 2015-05-27T08:08:30.000Z IBM Power Systems Management Console 630016a8-191a-4f01-b36b-459a674a8100 2015-05-27T07:51:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075130+0000.json 2015-05-27T07:51:30.000Z IBM Power Systems Management Console 6cf4c7bf-cc25-45d0-b8a4-408a0aeac932 2015-05-27T08:06:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080600+0000.json 2015-05-27T08:06:00.000Z IBM Power Systems Management Console 4f92a6a1-5e7e-427b-9f17-e4da94986ae0 2015-05-27T07:48:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T074830+0000.json 2015-05-27T07:48:30.000Z IBM Power Systems Management Console 16dcf227-99bb-4124-a2e1-27b83f6d8268 2015-05-27T07:53:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075300+0000.json 2015-05-27T07:53:00.000Z IBM Power Systems Management Console f07ca4b9-ad43-495d-8dfb-a7bdad444170 2015-05-27T08:14:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081430+0000.json 2015-05-27T08:14:30.000Z IBM Power Systems Management Console 062a7c70-be95-4382-827f-896b98400a74 2015-05-27T08:10:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081000+0000.json 2015-05-27T08:10:00.000Z IBM Power Systems Management Console 07260191-fe08-489f-819c-42f4ba07bb1c 2015-05-27T07:48:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T074800+0000.json 2015-05-27T07:48:00.000Z IBM Power Systems Management Console 11e4235f-374e-42e6-be64-edd7b9338312 2015-05-27T08:15:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081500+0000.json 2015-05-27T08:15:00.000Z IBM Power Systems Management Console 9450f9f5-45b5-4fde-8293-f7083b58a832 2015-05-27T07:52:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075200+0000.json 2015-05-27T07:52:00.000Z IBM Power Systems Management Console 7083f9cb-99c9-413d-bfe6-1c26668ea31d 2015-05-27T07:49:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T074900+0000.json 2015-05-27T07:49:00.000Z IBM Power Systems Management Console b84cb5b9-3fa2-4aeb-b8c0-17e94e80e40b 2015-05-27T07:52:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075230+0000.json 2015-05-27T07:52:30.000Z IBM Power Systems Management Console 8cc45f12-bc62-484c-8981-782e1d8c5f0e 2015-05-27T08:10:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081030+0000.json 2015-05-27T08:10:30.000Z IBM Power Systems Management Console d6d3f30f-3992-4053-a9a1-ce7d29097f2c 2015-05-27T07:58:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075800+0000.json 2015-05-27T07:58:00.000Z IBM Power Systems Management Console 6276d95a-bfb4-42f8-8aed-034088825632 2015-05-27T07:57:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075730+0000.json 2015-05-27T07:57:30.000Z IBM Power Systems Management Console 37943851-d7c1-4898-83c8-188979fd295a 2015-05-27T08:08:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080800+0000.json 2015-05-27T08:08:00.000Z IBM Power Systems Management Console 6440bca8-bf7f-408e-891e-2abbeb2fc82c 2015-05-27T08:05:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080500+0000.json 2015-05-27T08:05:00.000Z IBM Power Systems Management Console b027afda-2f3e-4abd-9d39-c4c14018530a 2015-05-27T08:00:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080000+0000.json 2015-05-27T08:00:00.000Z IBM Power Systems Management Console 0ca31ff0-27e8-404c-9bbb-fa913a292e8a 2015-05-27T07:59:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075930+0000.json 2015-05-27T07:59:30.000Z IBM Power Systems Management Console 5c864ceb-0b6a-4588-bd1b-2890a90155e3 2015-05-27T08:17:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081700+0000.json 2015-05-27T08:17:00.000Z IBM Power Systems Management Console 6986aa1c-5b1d-4466-8107-8b2295b15d8c 2015-05-27T07:57:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075700+0000.json 2015-05-27T07:57:00.000Z IBM Power Systems Management Console c539c758-547e-4cb9-8455-253510027164 2015-05-27T07:45:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T074530+0000.json 2015-05-27T07:45:30.000Z IBM Power Systems Management Console 1df71ccc-da64-41a6-a1e4-9eae0a19ea04 2015-05-27T08:05:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080530+0000.json 2015-05-27T08:05:30.000Z IBM Power Systems Management Console c6394bf8-228b-4e9c-a0b6-550ca6222450 2015-05-27T08:01:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080100+0000.json 2015-05-27T08:01:00.000Z IBM Power Systems Management Console 1429cd27-82ff-43aa-bd1e-af4edb8ca845 2015-05-27T08:11:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081130+0000.json 2015-05-27T08:11:30.000Z IBM Power Systems Management Console 3b984cdf-f176-49c6-a558-7615b65fb06b 2015-05-27T08:15:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081530+0000.json 2015-05-27T08:15:30.000Z IBM Power Systems Management Console e6265207-baa8-49c6-9bfd-3d69ec44e92a 2015-05-27T08:09:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080900+0000.json 2015-05-27T08:09:00.000Z IBM Power Systems Management Console 79af7b93-15ce-4f47-b2b9-faae37a20a79 2015-05-27T07:46:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T074630+0000.json 2015-05-27T07:46:30.000Z IBM Power Systems Management Console b3aa92d9-462e-4236-b616-cda351e12575 2015-05-27T07:50:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T075030+0000.json 2015-05-27T07:50:30.000Z IBM Power Systems Management Console 9af55902-48cc-4bff-8158-16e417d235bd 2015-05-27T08:12:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081200+0000.json 2015-05-27T08:12:00.000Z IBM Power Systems Management Console 7d9cd157-2839-4c06-9a2f-c5c185686eb3 2015-05-27T08:12:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081230+0000.json 2015-05-27T08:12:30.000Z IBM Power Systems Management Console 67318ca0-bcac-4765-9134-3db3561f6bab 2015-05-27T08:07:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080730+0000.json 2015-05-27T08:07:30.000Z IBM Power Systems Management Console 4660e4ca-b69b-490e-ba1d-1fd4e0e566e2 2015-05-27T08:01:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080130+0000.json 2015-05-27T08:01:30.000Z IBM Power Systems Management Console 1a67418d-eba7-4e42-952c-b8d25f4ae2ce 2015-05-27T08:03:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080330+0000.json 2015-05-27T08:03:30.000Z IBM Power Systems Management Console 0f7bf9fe-c855-451a-bb51-9495bd1af0ce 2015-05-27T08:02:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080230+0000.json 2015-05-27T08:02:30.000Z IBM Power Systems Management Console 23883fcd-607e-42ee-b399-446c2c5620eb 2015-05-27T07:49:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T074930+0000.json 2015-05-27T07:49:30.000Z IBM Power Systems Management Console 5bec7742-238a-41d4-b6fe-82d5f785e7c5 2015-05-27T08:03:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080300+0000.json 2015-05-27T08:03:00.000Z IBM Power Systems Management Console cb87008b-b9c1-41e4-bc19-57d55bed2c29 2015-05-27T08:09:30.000Z LTM_8247-22L*2125D4A_vios_1_20150527T080930+0000.json 2015-05-27T08:09:30.000Z IBM Power Systems Management Console 39b40b93-af2f-4a95-88b6-eff09104a612 2015-05-27T07:47:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T074700+0000.json 2015-05-27T07:47:00.000Z IBM Power Systems Management Console 32233353-e1d8-4c65-9cca-21a4a9a3b75e 2015-05-27T08:16:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081600+0000.json 2015-05-27T08:16:00.000Z IBM Power Systems Management Console a56df013-316f-4a4d-89e7-01e9fdfc8b40 2015-05-27T08:11:00.000Z LTM_8247-22L*2125D4A_vios_1_20150527T081100+0000.json 2015-05-27T08:11:00.000Z IBM Power Systems Management Console 2d4072f8-bfc3-46b0-8e54-e9f3c44b2088 2015-05-27T08:07:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080730+0000.json 2015-05-27T08:07:30.000-05:00 IBM Power Systems Management Console cc253e36-5825-4ef6-8835-579b3903be22 2015-05-27T07:55:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075500+0000.json 2015-05-27T07:55:00.000Z IBM Power Systems Management Console c43accaf-6520-49f4-a523-f70373fad3f6 2015-05-27T08:00:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080030+0000.json 2015-05-27T08:00:30.000Z IBM Power Systems Management Console 6e44ba1a-9c7e-49d8-a1de-4d2d3187e19c 2015-05-27T07:52:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075200+0000.json 2015-05-27T07:52:00.000Z IBM Power Systems Management Console 3c8a65fb-2a21-40f0-ba43-d2e72a19ab2c 2015-05-27T08:11:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T081130+0000.json 2015-05-27T08:11:30.000Z IBM Power Systems Management Console 55985117-0484-419f-af90-1fa057105618 2015-05-27T07:55:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075530+0000.json 2015-05-27T07:55:30.000Z IBM Power Systems Management Console 67078ea3-8edd-4544-8824-961cad789b90 2015-05-27T07:59:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075900+0000.json 2015-05-27T07:59:00.000Z IBM Power Systems Management Console 2fd41e51-aee3-4233-9ae9-3a05b5885365 2015-05-27T08:01:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080100+0000.json 2015-05-27T08:01:00.000Z IBM Power Systems Management Console 469f912c-1f3b-46be-a40b-fefdca94fd34 2015-05-27T08:08:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080830+0000.json 2015-05-27T08:08:30.000Z IBM Power Systems Management Console dc785f67-065c-48f0-981f-5f5df7f55363 2015-05-27T07:57:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075730+0000.json 2015-05-27T07:57:30.000Z IBM Power Systems Management Console 685ab968-94f6-466b-954f-ac01d32e73ef 2015-05-27T08:16:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T081600+0000.json 2015-05-27T08:16:00.000Z IBM Power Systems Management Console 39ebdab6-c455-404c-afa4-06fa080c885f 2015-05-27T07:51:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075100+0000.json 2015-05-27T07:51:00.000Z IBM Power Systems Management Console 8ea9e33e-b08a-43b4-ac4b-ef35bd9ffcae 2015-05-27T08:03:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080330+0000.json 2015-05-27T08:03:30.000Z IBM Power Systems Management Console 86e79382-d4dd-48f1-ae83-1984e92f344b 2015-05-27T08:10:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T081000+0000.json 2015-05-27T08:10:00.000Z IBM Power Systems Management Console dd6ecf8e-b714-4c08-82df-552d42957c43 2015-05-27T07:48:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T074800+0000.json 2015-05-27T07:48:00.000Z IBM Power Systems Management Console 9988f9da-4399-4577-a188-4bf6e90bd0dd 2015-05-27T07:56:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075600+0000.json 2015-05-27T07:56:00.000Z IBM Power Systems Management Console 442934d1-8e02-4321-a1db-f7f3b125ceef 2015-05-27T07:56:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075630+0000.json 2015-05-27T07:56:30.000Z IBM Power Systems Management Console bfe3217d-ae18-491f-9541-d4089353c0cc 2015-05-27T08:00:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080000+0000.json 2015-05-27T08:00:00.000Z IBM Power Systems Management Console 1d34a90b-8f19-4165-b47a-2c4877f59feb 2015-05-27T08:17:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T081700+0000.json 2015-05-27T08:17:00.000Z IBM Power Systems Management Console 55233fa8-c44a-40f7-aede-4ccbc15f7898 2015-05-27T08:04:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080430+0000.json 2015-05-27T08:04:30.000Z IBM Power Systems Management Console 1992f9ea-7745-41bf-bffc-7903d5624ca5 2015-05-27T07:45:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T074530+0000.json 2015-05-27T07:45:30.000Z IBM Power Systems Management Console d699acf8-cfc6-4491-99c8-d984c0c33354 2015-05-27T07:47:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T074700+0000.json 2015-05-27T07:47:00.000Z IBM Power Systems Management Console 29d3068a-3f7b-4527-99c0-ec60c21c16e6 2015-05-27T07:46:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T074630+0000.json 2015-05-27T07:46:30.000Z IBM Power Systems Management Console deb97e2d-e09d-4c33-bb02-2eb75c77458c 2015-05-27T08:04:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080400+0000.json 2015-05-27T08:04:00.000Z IBM Power Systems Management Console e32931ea-14c1-4d9d-85d5-91ddad91b374 2015-05-27T07:49:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T074930+0000.json 2015-05-27T07:49:30.000Z IBM Power Systems Management Console b42981ce-5664-49ca-8fe2-bc718f7296d0 2015-05-27T07:53:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075330+0000.json 2015-05-27T07:53:30.000Z IBM Power Systems Management Console 36817959-e560-445d-a0c7-fde977f30c4e 2015-05-27T07:46:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T074600+0000.json 2015-05-27T07:46:00.000Z IBM Power Systems Management Console ed4e118d-1456-4d42-8aa8-8c63228e3ff3 2015-05-27T08:02:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080200+0000.json 2015-05-27T08:02:00.000Z IBM Power Systems Management Console e2194be7-a3b1-48af-8c10-00ca92415269 2015-05-27T07:50:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075000+0000.json 2015-05-27T07:50:00.000Z IBM Power Systems Management Console 2ccdf36e-5126-41cd-adb6-2e54ede54cb4 2015-05-27T08:05:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080530+0000.json 2015-05-27T08:05:30.000Z IBM Power Systems Management Console b9f82418-3542-4f4c-add0-b5ccc96a1bcd 2015-05-27T08:13:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T081300+0000.json 2015-05-27T08:13:00.000Z IBM Power Systems Management Console 00afbbb3-4303-4678-81f6-1530c953251a 2015-05-27T08:07:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080700+0000.json 2015-05-27T08:07:00.000Z IBM Power Systems Management Console aa28f179-a6e2-48ef-8fc4-d1141b6230ee 2015-05-27T08:06:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080630+0000.json 2015-05-27T08:06:30.000Z IBM Power Systems Management Console 2ceae18f-e7a4-40b8-a7f8-630f4c592a57 2015-05-27T07:58:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075800+0000.json 2015-05-27T07:58:00.000Z IBM Power Systems Management Console 0a437e05-8576-4f1a-a5de-eab1a4456949 2015-05-27T08:03:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080300+0000.json 2015-05-27T08:03:00.000Z IBM Power Systems Management Console d52b0f39-f457-4200-ac18-864a50d67124 2015-05-27T07:59:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075930+0000.json 2015-05-27T07:59:30.000Z IBM Power Systems Management Console 12ab23e9-ab81-4490-bf70-72e1899ed342 2015-05-27T08:02:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080230+0000.json 2015-05-27T08:02:30.000Z IBM Power Systems Management Console 2213c10c-b9f4-4b81-83c0-8f99bae88823 2015-05-27T08:08:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080800+0000.json 2015-05-27T08:08:00.000Z IBM Power Systems Management Console 6654dc7f-889d-4d8f-af5c-521732d73699 2015-05-27T08:10:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T081030+0000.json 2015-05-27T08:10:30.000Z IBM Power Systems Management Console d4422415-de52-4e72-8a41-558f20835884 2015-05-27T08:05:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080500+0000.json 2015-05-27T08:05:00.000Z IBM Power Systems Management Console 7cf10c30-1411-44f0-9002-7110206a6475 2015-05-27T08:12:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T081230+0000.json 2015-05-27T08:12:30.000Z IBM Power Systems Management Console 2a208e11-9348-48f5-ba87-e45899111ff3 2015-05-27T07:47:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T074730+0000.json 2015-05-27T07:47:30.000Z IBM Power Systems Management Console aea959e5-b4e6-4a1c-bb4d-7145c3730d18 2015-05-27T07:52:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075230+0000.json 2015-05-27T07:52:30.000Z IBM Power Systems Management Console 8f355c55-ffb1-4b06-a550-c9f4097f4781 2015-05-27T08:14:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T081400+0000.json 2015-05-27T08:14:00.000Z IBM Power Systems Management Console 2e86b2d0-d1f9-4d6f-bb48-9d17f8532bbd 2015-05-27T07:54:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075400+0000.json 2015-05-27T07:54:00.000Z IBM Power Systems Management Console b7fba167-1ebc-4997-81a5-02eef7c3b835 2015-05-27T07:50:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075030+0000.json 2015-05-27T07:50:30.000Z IBM Power Systems Management Console 078c8068-c0f5-45c8-a6c9-abc62e02ae22 2015-05-27T08:16:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T081630+0000.json 2015-05-27T08:16:30.000Z IBM Power Systems Management Console 5372a641-5b0c-4608-bac7-c110772fb46b 2015-05-27T08:09:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080930+0000.json 2015-05-27T08:09:30.000Z IBM Power Systems Management Console 240c55ad-d017-444c-8698-bbdfed61ab18 2015-05-27T08:13:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T081330+0000.json 2015-05-27T08:13:30.000Z IBM Power Systems Management Console 4536df57-8a26-45f1-9a96-032529ee7c24 2015-05-27T08:06:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080600+0000.json 2015-05-27T08:06:00.000Z IBM Power Systems Management Console 1df1751d-1d22-4311-b479-e52457157ac5 2015-05-27T07:49:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T074900+0000.json 2015-05-27T07:49:00.000Z IBM Power Systems Management Console 51c76359-1248-48cd-9d52-cda28015ed18 2015-05-27T07:48:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T074830+0000.json 2015-05-27T07:48:30.000Z IBM Power Systems Management Console fa41a0ac-0202-4ee4-b030-1d9e8548f5f1 2015-05-27T08:17:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T081730+0000.json 2015-05-27T08:17:30.000Z IBM Power Systems Management Console 601a4031-3ecd-4378-8dc5-63e12c131f89 2015-05-27T08:15:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T081530+0000.json 2015-05-27T08:15:30.000Z IBM Power Systems Management Console e2a339cb-d739-49cc-a1f2-a83096f4f67a 2015-05-27T08:01:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T080130+0000.json 2015-05-27T08:01:30.000Z IBM Power Systems Management Console eab46235-d348-460e-be5f-15eb40be68ec 2015-05-27T08:14:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T081430+0000.json 2015-05-27T08:14:30.000Z IBM Power Systems Management Console a6f5d48d-508a-459d-87a3-30773de4af06 2015-05-27T07:51:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075130+0000.json 2015-05-27T07:51:30.000Z IBM Power Systems Management Console da2f6e66-f9ad-47b1-8dc8-d3bf079b5ba6 2015-05-27T07:57:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075700+0000.json 2015-05-27T07:57:00.000Z IBM Power Systems Management Console 8edbd596-53a3-4b60-9aeb-da40aa8544ac 2015-05-27T07:54:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075430+0000.json 2015-05-27T07:54:30.000Z IBM Power Systems Management Console 53c08030-d105-4cc8-86bc-9dca71cb6ed4 2015-05-27T07:53:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T075300+0000.json 2015-05-27T07:53:00.000Z IBM Power Systems Management Console 90cb7ada-b4c4-45b1-99ea-f94d01610da4 2015-05-27T08:11:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T081100+0000.json 2015-05-27T08:11:00.000Z IBM Power Systems Management Console 5c0ae3f3-6436-45c7-8b18-c473a8b51926 2015-05-27T07:58:30.000Z LTM_8247-22L*2125D4A_phyp_20150527T075830+0000.json 2015-05-27T07:58:30.000Z IBM Power Systems Management Console abaf6eb3-ace4-4f1c-87dc-cf6359216e60 2015-05-27T08:09:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T080900+0000.json 2015-05-27T08:09:00.000Z IBM Power Systems Management Console b74cc46c-0665-4c96-98d7-15274ee0d046 2015-05-27T08:12:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T081200+0000.json 2015-05-27T08:12:00.000Z IBM Power Systems Management Console ef484863-51d7-4efe-aad1-a056e4de45f7 2015-05-27T08:15:00.000Z LTM_8247-22L*2125D4A_phyp_20150527T081500+0000.json 2015-05-27T08:15:00.000Z IBM Power Systems Management Console END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/enterprise_pool_member_feed.txt0000664000175000017500000004711013571367171025662 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh enterprise_pool_member_feed.txt # #################################################### INFO{ {'comment': None, 'status': 200, 'pw': 'passw0rd', 'reason': 'OK', 'host': 'localhost', 'user': 'neo', 'path': 'PowerEnterprisePool/e0bc50d4-196a-3d8b-87b9-5ca694c2eb66/PowerEnterprisePoolMember?group=None'} END OF SECTION} HEADERS{ {'Content-Length': '12851', 'X-Powered-By': 'Servlet/3.1', 'Set-Cookie': 'JSESSIONID=0000J8O5MRIOqRltpYnoJYFsSxc:44ae1077-95b8-46ac-9bab-6ba92c1af191; Path=/; Secure; HttpOnly', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Wed, 10 Feb 2016 17:54:12 GMT', 'X-Transaction-ID': 'XT10001951', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Date': 'Wed, 10 Feb 2016 17:54:12 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'X-TransactionRecord-Uuid': '20af9cfd-a374-4384-964b-f4759d50aaf3', 'ETag': '11828871'} END OF SECTION} BODY{ 4f701a62-8d2f-30a5-8ced-9a09ad5f2b99 2016-02-10T12:54:12.506-05:00 IBM Power Systems Management Console 7e3ddc7f-6426-33d9-9f10-ef925d0386c2 PowerEnterprisePoolMember 2016-02-10T12:54:12.876-05:00 IBM Power Systems Management Console 1063371592 7e3ddc7f-6426-33d9-9f10-ef925d0386c2 0 4 0 2 0 0 0 Server-8284-22A-SN21B63CV 20 524288 8284 22A 21B63CV operating ip9-1-2-3 7042 CR7 10B6EDC true false 9.1.2.3 d8a95d37-1dbd-37bf-9caa-756f60056000 PowerEnterprisePoolMember 2016-02-10T12:54:12.881-05:00 IBM Power Systems Management Console -194335763 d8a95d37-1dbd-37bf-9caa-756f60056000 0 0 0 6 0 0 0 V7R2_14 16 262144 8246 L2D 100854A operating ip9-1-2-3 7042 CR7 10B6EDC true false 9.1.2.3 c40a7d74-6dd1-3fdf-897a-3c627891ebfc PowerEnterprisePoolMember 2016-02-10T12:54:12.885-05:00 IBM Power Systems Management Console -1851824255 c40a7d74-6dd1-3fdf-897a-3c627891ebfc 0 0 0 6 0 0 0 Server-8284-22A-SN21B63DV 20 524288 8284 22A 21B63DV operating ip9-1-2-3 7042 CR7 10B6EDC true false 9.1.2.3 c2278015-5887-339d-8b0a-6397e4e1c1bd PowerEnterprisePoolMember 2016-02-10T12:54:12.890-05:00 IBM Power Systems Management Console 527358447 c2278015-5887-339d-8b0a-6397e4e1c1bd 0 0 0 6 0 0 0 V7R2_15 16 262144 8246 L2D 10085AA operating ip9-1-2-3 7042 CR7 10B6EDC true false 9.1.2.3 0de35e7b-278e-3e44-a44d-8a36779757ac PowerEnterprisePoolMember 2016-02-10T12:54:12.895-05:00 IBM Power Systems Management Console 418621051 0de35e7b-278e-3e44-a44d-8a36779757ac 0 0 0 6 0 0 0 V7R2_13 16 262144 8246 L2D 100857A operating ip9-1-2-3 7042 CR7 10B6EDC true false 9.1.2.3 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vios_feed3.txt0000664000175000017500000015350313571367171023157 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: k2http.py -refresh fake_virtual_switch.txt # #################################################### INFO{ {'comment': 'Created by thorst.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/726e9cb3-6576-3df5-ab60-40893d51d074/VirtualIOServer'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 95b05dcf-1ff1-3243-827d-d8d3854696c7 2015-04-29T08:21:47.599-05:00 IBM Power Systems Management Console 691019AF-506A-4896-AADE-607E21FA93EE VirtualIOServer 2015-04-29T08:21:49.674-05:00 IBM Power Systems Management Console 1088276233 691019AF-506A-4896-AADE-607E21FA93EE 0 false 1 200 true Empty slot 0 65535 65535 255 65535 65535 553713680 U78CB.001.WZS00A8-P1-C7 553713680 U78CB.001.WZS00A8-P1-C7 false true Empty slot 0 65535 65535 255 65535 65535 553713683 U78CB.001.WZS00A8-P1-C11 553713683 U78CB.001.WZS00A8-P1-C11 false true Quad 8 Gigabit Fibre Channel LP Adapter 4 1054 2 9522 4116 553713688 U78CB.001.WZS00A8-P1-C6 553713688 U78CB.001.WZS00A8-P1-C6 false true Universal Serial Bus UHC Spec 3075 1202 2 33345 4116 553713691 U78CB.001.WZS00A8-P1-T2 553713691 U78CB.001.WZS00A8-P1-T2 false true Empty slot 0 65535 65535 255 65535 65535 553713697 U78CB.001.WZS00A8-P1-C5 553713697 U78CB.001.WZS00A8-P1-C5 false true SAS RAID Controller, PCIe2, Dual-port 6Gb 260 1023 2 842 4116 553844757 U78CB.001.WZS00A8-P1-C14 553844757 U78CB.001.WZS00A8-P1-C14 false true Empty slot 0 65535 65535 255 65535 65535 553844765 U78CB.001.WZS00A8-P1-C9 553844765 U78CB.001.WZS00A8-P1-C9 false true 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 512 1056 1 5719 4116 553910302 U78CB.001.WZS00A8-P1-C10 553910302 U78CB.001.WZS00A8-P1-C10 false true Empty slot 0 65535 65535 255 65535 65535 553975839 U78CB.001.WZS00A8-P1-C15 553975839 U78CB.001.WZS00A8-P1-C15 false 4 Server 1 2E223A41C3D6 2 1 200 24576 6 24576 1024 AUTO 6 24576 24576 0 1024 0 false 21-25D0A false keep idle procs false keep idle procs false keep idle procs 2 2 0.00 1 0 1 2 running Virtual IO Server 691019AF-506A-4896-AADE-607E21FA93EE inactive true true false U8247.22L.2125D0A-V1-C4 U8247.22L.2125D0A-V1-C4 1 4 2E223A41C3D6 2 true 0 1 54F98B4D-92F5-45BC-8CB2-28DF1FC6D6DC VirtualIOServer 2015-04-29T08:21:49.684-05:00 IBM Power Systems Management Console 1019359186 54F98B4D-92F5-45BC-8CB2-28DF1FC6D6DC 0 false 6.1, 6100-09-04-1441 2 500 true Quad 8 Gigabit Fibre Channel LP Adapter 4 1054 2 9522 4116 553713705 U78CB.001.WZS00A8-P1-C3 553713705 U78CB.001.WZS00A8-P1-C3 false true 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 512 1056 1 5719 4116 553779220 U78CB.001.WZS00A8-P1-C12 553779220 U78CB.001.WZS00A8-P1-C12 false 3 Server 2 3 4 4 Server 2 3 3 6 Server 2 3 5 7 Server 2 4 5 8 Client 2 4697E5C2A811 2 10 Server 2 5EF2D77E0AF1 50 1 500 2048 6 4096 1024 AUTO 6 4096 2048 0 1024 0 false vios1 false keep idle procs false keep idle procs false keep idle procs 4 8 0.00 4 0 1 8 running Virtual IO Server 54F98B4D-92F5-45BC-8CB2-28DF1FC6D6DC active 9.1.2.4 true false false 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent0 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C4-T1 U78AB.001.WZSH5ZY-P1-C4-T1 13U78AB.001.WZSH5ZY-P1-C4-T1 disabled ent8 false 1 disabled 8192 true U8246.L2C.0604C7A-V4-C2 U8246.L2C.0604C7A-V4-C2 false true 2 ALL 1683B6A32202 1 false false 2 ent4 1 U8246.L2C.0604C7A-V4-C3 U8246.L2C.0604C7A-V4-C3 false true 3 ALL 1683B6A32203 4094 false 100 150 175 200 250 300 333 350 900 1001 2227 2228 true 2 ent5 1 true en8 9.1.2.3 255.255.255.0 Active 10298abf2b234f52cb true U8246.L2C.0604C7A-V4-C2 U8246.L2C.0604C7A-V4-C2 false true 2 ALL 1683B6A32202 1 false false 2 ent4 1 U8246.L2C.0604C7A-V4-C3 U8246.L2C.0604C7A-V4-C3 false true 3 ALL 1683B6A32203 4094 false 100 150 175 200 250 300 333 350 900 1001 2227 2228 true 2 ent5 1 true 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 physicalEthernetAdpter U78CB.001.WZS00A8-P1-C12-T4 U78CB.001.WZS00A8-P1-C12-T4 13U78CB.001.WZS00A8-P1-C12-T4 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 physicalEthernetAdpter U78CB.001.WZS00A8-P1-C12-T3 U78CB.001.WZS00A8-P1-C12-T3 13U78CB.001.WZS00A8-P1-C12-T3 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 physicalEthernetAdpter U78CB.001.WZS00A8-P1-C12-T2 U78CB.001.WZS00A8-P1-C12-T2 13U78CB.001.WZS00A8-P1-C12-T2 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 U78CB.001.WZS00A8-P1-C12-T4 en3 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 U78CB.001.WZS00A8-P1-C12-T3 en2 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 U78CB.001.WZS00A8-P1-C12-T2 en1 Inactive END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_volume_group.txt0000664000175000017500000001776413571367171023664 0ustar neoneo00000000000000# Copyright 2014 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_volume_group.txt # #################################################### INFO{ {'comment': 'Used for testing test_volume_group.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/14B854F7-42CE-4FF0-BD57-1D117054E701/VolumeGroup/b6bdbf1f-eddf-3c81-8801-9859eb6fedcb'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ b6bdbf1f-eddf-3c81-8801-9859eb6fedcb VolumeGroup 2014-12-17T03:38:34.635Z IBM Power Systems Management Console 1507843581 b6bdbf1f-eddf-3c81-8801-9859eb6fedcb 1418787514631 1051.1 1051.2 1063.3 image_pool 00f8d6de00004b000000014a54555cd9 1024 blank_media1 0eblank_media1 rw 0.0977 blank_media_2 0eblank_media_2 rw 0.0488 VMLibrary 11 22 33 SAS RAID 0 Disk Array U78C9.001.WZS0095-P1-C14-R1-L405D828300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1RDgyODMwMDAwMDAwMDQw true 1089592 hdisk1 active 391BIBMIPR-0 5D8283000000004010IPR-0 5D82830003IBMsas false 0400f8d6de00004b000000014a54555cd9 44 55 Unlocked aes-xts-plain64 256 sha1 1 None asdf 0300f8d6de00004b000000014a54555cd9.1 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vios_with_volume_group_data.txt0000664000175000017500000014561513571367171026745 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_volume_group_with_vio_data.txt # #################################################### INFO{ {'comment': 'Used for testing test_volume_group.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/3443DB77-AED1-47ED-9AA5-3DB9C6CF7089/VolumeGroup/dc08da6c-8bff-3fa8-b0d9-9ba7405aca91'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 VirtualIOServer 2015-01-31T02:54:19.458Z IBM Power Systems Management Console 1722168167 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 1422672292644 false 191 POWER7 Disabled false false false false false false false normal 2125D4A1 VIOS 2.2.4.0 true true true true true 1 2000 false RAID Controller U78CB.001.WZS007Y 842 260 842 1023 4116 2 4116 4116 false false false false false false false false false false 553844757 RAID Controller U78CB.001.WZS007Y-P1-C14 U78CB.001.WZS007Y-P1-C14 C14 842 true 553844757 U78CB.001.WZS007Y-P1-C14 C14 false Universal Serial Bus UHC Spec U78CB.001.WZS007Y 33345 3075 33345 1202 4172 2 4172 4116 false false false false false false false false false false 553713691 Universal Serial Bus UHC Spec U78CB.001.WZS007Y-P1-T2 U78CB.001.WZS007Y-P1-T2 T2 33345 true 553713691 U78CB.001.WZS007Y-P1-T2 T2 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78CB.001.WZS007Y 5719 512 5719 1056 5348 1 5348 4116 false false false false false false false false false false 553910302 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78CB.001.WZS007Y-P1-C10 U78CB.001.WZS007Y-P1-C10 C10 5719 true 553910302 U78CB.001.WZS007Y-P1-C10 C10 false Quad 8 Gigabit Fibre Channel LP Adapter U78CB.001.WZS007Y 9522 4 9522 1054 4215 2 4215 4116 false false false false false false false false false false 553713705 Quad 8 Gigabit Fibre Channel LP Adapter U78CB.001.WZS007Y-P1-C3 U78CB.001.WZS007Y-P1-C3 C3 U78CB.001.WZS007Y-P1-C3-T4 fcs3 1aU78CB.001.WZS007Y-P1-C3-T4 21000024FF649107 U78CB.001.WZS007Y-P1-C3-T3 fcs2 1aU78CB.001.WZS007Y-P1-C3-T3 21000024FF649106 U78CB.001.WZS007Y-P1-C3-T1 fcs0 1aU78CB.001.WZS007Y-P1-C3-T1 21000024FF649104 64 64 U78CB.001.WZS007Y-P1-C3-T2 fcs1 1aU78CB.001.WZS007Y-P1-C3-T2 21000024FF649105 553713705 U78CB.001.WZS007Y-P1-C3 C3 2000 false false 4096 0.0 7 4096 4096 0.0 7 0 0 4096 4096 0 4096 false true false false 0 4096 4096 false IOServer - SN2125D4A false 0.4 4 0.4 4 0.4 4 0 255 uncapped false uncapped false 4 0.4 0.4 0.4 0 255 4 4 0.4 255 running Virtual IO Server 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 default 0 0 active 9.1.2.4 25006020324608 false false true true bg_7f81628b_thorst_config.iso 0ebg_7f81628b_thorst_config.iso rw 0.000000 VMLibrary 1 true SAS RAID 0 Disk Array U78CB.001.WZS007Y-P1-C14-T1-L405DB60300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1REI2MDMwMDAwMDAwMDQw false 270648 hdisk1 active 391BIBMIPR-0 5DB603000000004010IPR-0 5DB6030003IBMsas false SAS RAID 0 Disk Array U78CB.001.WZS007Y-P1-C14-T1-L205DB60300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1REI2MDMwMDAwMDAwMDIw false 270648 hdisk0 active 391BIBMIPR-0 5DB603000000002010IPR-0 5DB6030003IBMsas false U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 true Client U8247.22L.2125D4A-V2-C2 U8247.22L.2125D4A-V2-C2 2 false true 2 1 3 U8247.22L.2125D4A-V1-C3 Server U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 1 false true 3 vhost0 boot_7f81628b 2 2 U8247.22L.2125D4A-V2-C2 1eU8247.22L.2125D4A-V1-C3 1 None boot_7f81628b 0300025d4a00007a000000014b36d9deaf.1 0x8100000000000000 vtscsi0 09314645ddb7e3beca Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 false true 3 1 4 U8247.22L.2125D4A-V1-C4 Server U8247.22L.2125D4A-V1-C4 U8247.22L.2125D4A-V1-C4 1 false true 4 vhost1 bg_7f81628b_thorst_config.iso 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C4 bg_7f81628b_thorst_config.iso 0ebg_7f81628b_thorst_config.iso rw 0.000000 0x8100000000000000 vtopt0 19fbfe4c47fdd4738f 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T4 U78CB.001.WZS007Y-P1-C10-T4 13U78CB.001.WZS007Y-P1-C10-T4 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T3 U78CB.001.WZS007Y-P1-C10-T3 13U78CB.001.WZS007Y-P1-C10-T3 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T1 U78CB.001.WZS007Y-P1-C10-T1 13U78CB.001.WZS007Y-P1-C10-T1 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T2 U78CB.001.WZS007Y-P1-C10-T2 13U78CB.001.WZS007Y-P1-C10-T2 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 U78CB.001.WZS007Y-P1-C10-T4 en3 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 U78CB.001.WZS007Y-P1-C10-T3 en2 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 U78CB.001.WZS007Y-P1-C10-T1 en0 9.1.2.4 255.255.255.0 Active 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 U78CB.001.WZS007Y-P1-C10-T2 en1 Inactive END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/lpar_pcm_data.txt0000664000175000017500000000747413571367171022736 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/pcm/ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/RawMetrics/LongTermMonitor/LTM_8247-22L*2125D4A_lpar_20160328T031000-0400.json'} END OF SECTION} HEADERS{ {'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ { "lparUtil": [ { "uuid": "42AD4FD4-DC64-4935-9E29-9B7C6F35AFCC", "id": "2", "name": "Ubuntu1410", "memory": { "pctRealMemAvbl": 80, "totalPgSpSizeCount": 1024, "totalPgSpFreeCount": 512, "vmActivePgCount": 64, "realMemSizeBytes": 1048576, "pctRealMemFree": 61, "vmPgInRate": 0, "vmPgOutRate": 25, "vmPgSpInRate": 0, "vmPgSpOutRate": 0 }, "timeStamp": "20160328T031000-0400" }, { "uuid": "5536C020-C995-4508-A010-5292CD21F307", "id": "13", "name": "test_vm2", "memory": { "pctRealMemAvbl": 81, "totalPgSpSizeCount": 2048, "totalPgSpFreeCount": 1024, "vmActivePgCount": 128, "realMemSizeBytes": 1048576, "pctRealMemFree": 62, "vmPgInRate": 0, "vmPgOutRate": 94, "vmPgSpInRate": 0, "vmPgSpOutRate": 0 }, "timeStamp": "20160328T031000-0400" }, { "uuid": "3B8B95E8-4D85-4599-803E-E2B3CD193033", "id": "8", "name": "test_vm3", "memory": { "pctRealMemAvbl": 82, "totalPgSpSizeCount": 4096, "totalPgSpFreeCount": 2048, "vmActivePgCount": 256, "realMemSizeBytes": 1048576, "pctRealMemFree": 60, "vmPgInRate": 0, "vmPgOutRate": 0, "vmPgSpInRate": 0, "vmPgSpOutRate": 0 }, "timeStamp": "20160328T031000-0400" }, { "uuid": "7C478B9C-64C6-4CD4-B6F8-3C9BBAA1CFD5", "id": "15", "name": "test_vm4", "memory": { "pctRealMemAvbl": 83, "totalPgSpSizeCount": 8192, "totalPgSpFreeCount": 4096, "vmActivePgCount": 512, "realMemSizeBytes": 1048576, "pctRealMemFree": 62, "vmPgInRate": 0, "vmPgOutRate": 80, "vmPgSpInRate": 0, "vmPgSpOutRate": 0 }, "timeStamp": "20160328T031000-0400" }, { "uuid": "3B0237F9-26F1-41C7-BE57-A08C9452AD9D", "id": "24", "name": "3B0237F9-26F1-41C7-BE57-A08C9452AD9D", "errorInfo": { "errorId": "6001", "message": "Lpar is not in running state on Managed System" }, "timeStamp": null }, { "uuid": "vm_inactive_rmc", "id": "3", "name": "vm_inactive_rmc", "errorInfo": { "errorId": "6003", "message": "RMC is INACTIVE on lpar" }, "timeStamp": null } ] } END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_httperror_service_unavail.txt0000664000175000017500000000446013571367171026416 0ustar neoneo00000000000000#################################################### INFO{ {'comment': 'User for HttpErrorResponse', 'status': '503', 'pw': 'abc123', 'reason': 'Service Unavailable', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/uom/SharedStoragePool/3dc777cb-e7b1-3f3d-b904-b5e34b47c6fe?group=None' } END OF SECTION} HEADERS{ {'content-length': '1746', 'x-transactionrecord-uuid': 'f3d8e93a-3ca5-4308-9c38-2ffb9529002e', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000IpIgP5r5Z7GMlTUhrOhOyF4:7e93a4c1-31cf-47c7-a2e1-3ba43f7626f9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'connection': 'Close', 'x-transaction-id': 'XT10140897', 'cache-control': 'no-cache="set-cookie, set-cookie2"', 'date': 'Fri, 17 Apr 2015 23:51:45 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 1a14745f-ebfd-4d4c-b22b-e6a6ab33e777 HttpErrorResponse 2015-04-17T23:51:46.756Z IBM Power Systems Management Console 503 /rest/api/uom/SharedStoragePool/3dc777cb-e7b1-3f3d-b904-b5e34b47c6fe Unknown internal error. Test error {If-None-Match=-1208061982, X-Audit-Memento=root, User-Agent=python-requests/2.5.3 CPython/2.7.6 Linux/3.13.0-49-generic, Accept=application/atom+xml, Accept-Encoding=gzip, deflate, Host=9.1.2.3:12443, Connection=keep-alive, X-API-Session=*******, X-Transaction-ID=XT99999999} END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/vfc_client_adapter_feed.txt0000664000175000017500000002122213571367171024732 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: refresh_httpresp.py vfc_client_adapter_feed.txt # #################################################### INFO{ {'comment': None, 'status': 200, 'pw': None, 'reason': 'OK', 'host': None, 'user': None, 'path': 'LogicalPartition/6A219EF0-591D-4199-ADC2-8F854B2F471B/VirtualFibreChannelClientAdapter'} END OF SECTION} HEADERS{ {'content-length': '7698', 'x-transactionrecord-uuid': '7683b31d-2089-4018-a0fc-f11cd6090704', 'x-powered-by': 'Servlet/3.1', 'set-cookie': 'JSESSIONID=0000YheE-SOeTpwDuklLPnanVY-:356ef63c-ea0f-4a3b-b94b-f6edad1632ed; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_3_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Tue, 22 Sep 2015 21:16:28 GMT', 'x-transaction-id': 'XT10519925', 'etag': '1387621433', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Tue, 22 Sep 2015 21:16:28 GMT', 'x-mc-type': 'PVM', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ e5b7e5b9-07b0-34db-9015-fd5eb192fad3 2015-09-22T17:16:28.843-04:00 IBM Power Systems Management Console e0d12002-bdac-3fb6-b567-bfcb17238de6 VirtualFibreChannelClientAdapter 2015-09-22T17:16:29.358-04:00 IBM Power Systems Management Console -256826913 e0d12002-bdac-3fb6-b567-bfcb17238de6 0 Client U8247.21L.212A64A-V25-C4 U8247.21L.212A64A-V25-C4 25 4 2 10 Server U8247.21L.212A64A-V2-C10 U8247.21L.212A64A-V2-C10 2 10 vfchost5 25 4 1dU8247.21L.212A64A-V2-C10 fcs0 U78CB.001.WZS05RN-P1-C6-T1 fcs0 1aU78CB.001.WZS05RN-P1-C6-T1 21000024ff64eb60 20000024ff64eb60 57 64 c05076087cba0169 c05076087cba0168 7f28c1d6-4e56-3ad6-b79b-0ae808c46bba VirtualFibreChannelClientAdapter 2015-09-22T17:16:29.359-04:00 IBM Power Systems Management Console 759320183 7f28c1d6-4e56-3ad6-b79b-0ae808c46bba 0 Client U8247.21L.212A64A-V25-C5 U8247.21L.212A64A-V25-C5 25 5 3 21 Server U8247.21L.212A64A-V3-C21 U8247.21L.212A64A-V3-C21 3 21 vfchost2 25 5 1dU8247.21L.212A64A-V3-C21 fcs0 U78CB.001.WZS05RN-P1-C7-T1 fcs0 1aU78CB.001.WZS05RN-P1-C7-T1 21000024ff64f4c4 20000024ff64f4c4 58 64 c05076087cba016d c05076087cba016c END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/job_response_exception.txt0000664000175000017500000000751613571367171024713 0ustar neoneo00000000000000INFO{ {'comment': 'For power on-off testing', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/20414ABB-D6F0-4B3D-BB46-3822240BC4E9'} END OF SECTION} HEADERS{ {'content-length': '20079', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000gMeCzUqIZcs3oxLu4apVINO:a034238f-7921-42e3-862d-89cae58dc68a; Path=/; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'cache-control': 'no-cache="set-cookie, set-cookie2"', 'date': 'Tue, 30 Jul 2013 14:43:59 GMT', 'content-type': 'application/xml'} END OF SECTION} BODY{ ed556d14-0bed-304a-ad9d-4ad8db203166 JobResponse 2013-08-14T08:44:06.029-04:00 IBM Power Systems Management Console 1376474747145 1376484245981 1376484245982 FAILED_BEFORE_COMPLETION com.ibm.pmc.jaxb.api.server.jobs.PmcJobException: Unsupported commmand lssyscfg com.ibm.pmc.jaxb.api.server.jobs.PmcJobException: Unsupported commmand lssyscfg at com.ibm.pmc.rest.servlet.uom.phyp.internal.operations.uom.ManagementConsoleCLIRunner.execute(ManagementConsoleCLIRunner.java:75) at com.ibm.pmc.jaxb.api.server.jobs.AbstractPmcJob.execute(AbstractPmcJob.java:633) at org.quartz.core.JobRunShell.run(JobRunShell.java:216) at org.quartz.simpl.SimpleThreadPool$WorkerThread.run(SimpleThreadPool.java:549) Caused by: java.lang.Exception ... 4 more CLIRunner ManagementConsole cmd lssyscfg -r sys -F name END OF SECTION}pypowervm-1.1.24/pypowervm/tests/data/cluster_LULinkedClone_job_template.txt0000664000175000017500000000535413571367171027061 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh cluster_LULinkedClone_job_template.txt # #################################################### INFO{ {'comment': None, 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'Cluster/do/LULinkedClone'} END OF SECTION} HEADERS{ {'content-length': '1475', 'content-type': 'application/atom+xml', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000oUYEkL9QvE6kg3vgejRcao8:31d026d8-f0bc-4237-8211-43003f07d3ab; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_2_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Tue, 17 Feb 2015 05:23:58 GMT', 'x-transaction-id': 'XT10091724', 'etag': '-513714866', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Tue, 17 Feb 2015 05:23:58 GMT', 'x-transactionrecord-uuid': 'e80cec6d-4b01-4578-b3b4-65bd5058773a'} END OF SECTION} BODY{ f3c01a81-95b4-37cd-baaf-e7dd9dd298e2 JobRequest 2015-03-28T06:09:07.135Z IBM Power Systems Management Console LULinkedClone Cluster END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vios.txt0000664000175000017500000026730613571367171022120 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh upload_vios.txt # #################################################### INFO{ {'comment': 'Use for required vio checking', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/14B854F7-42CE-4FF0-BD57-1D117054E701'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 14B854F7-42CE-4FF0-BD57-1D117054E701 VirtualIOServer 2015-01-07T23:10:39.866Z IBM Power Systems Management Console -1930559556 14B854F7-42CE-4FF0-BD57-1D117054E701 1420672239770 false 191 POWER7 Disabled false false false false false false false normal 10D6DET1 VIOS 2.2.3.4 true true true true true 1 1000 false Empty slot 0 0 0 U78C9.001.WZS0095 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553713680 Empty slot U78C9.001.WZS0095-P1-C7 U78C9.001.WZS0095-P1-C7 C7 65535 true 553713680 U78C9.001.WZS0095-P1-C7 C7 false Empty slot 0 0 0 U78C9.001.WZS0095 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553713683 Empty slot U78C9.001.WZS0095-P1-C11 U78C9.001.WZS0095-P1-C11 C11 65535 true 553713683 U78C9.001.WZS0095-P1-C11 C11 false Empty slot 0 0 0 U78C9.001.WZS0095 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553779220 Empty slot U78C9.001.WZS0095-P1-C12 U78C9.001.WZS0095-P1-C12 C12 65535 true 553779220 U78C9.001.WZS0095-P1-C12 C12 false RAID Controller U78C9.001.WZS0095 842 260 842 1023 4116 1 4116 4116 false false false false false false false false false false 553844757 RAID Controller U78C9.001.WZS0095-P1-C14 U78C9.001.WZS0095-P1-C14 C14 842 true 553844757 U78C9.001.WZS0095-P1-C14 C14 false Empty slot 0 0 0 U78C9.001.WZS0095 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553713688 Empty slot U78C9.001.WZS0095-P1-C6 U78C9.001.WZS0095-P1-C6 C6 65535 true 553713688 U78C9.001.WZS0095-P1-C6 C6 false Universal Serial Bus UHC Spec U78C9.001.WZS0095 33345 3075 33345 1202 4172 2 4172 4116 false false false false false false false false false false 553713691 Universal Serial Bus UHC Spec U78C9.001.WZS0095-P1-T2 U78C9.001.WZS0095-P1-T2 T2 33345 true 553713691 U78C9.001.WZS0095-P1-T2 T2 false Empty slot 0 0 0 U78C9.001.WZS0095 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553844765 Empty slot U78C9.001.WZS0095-P1-C9 U78C9.001.WZS0095-P1-C9 C9 65535 true 553844765 U78C9.001.WZS0095-P1-C9 C9 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78C9.001.WZS0095 5719 512 5719 1056 5348 1 5348 4116 false false false false false false false false false false 553910302 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78C9.001.WZS0095-P1-C10 U78C9.001.WZS0095-P1-C10 C10 5719 true 553910302 U78C9.001.WZS0095-P1-C10 C10 false Empty slot 0 0 0 U78C9.001.WZS0095 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553975839 Empty slot U78C9.001.WZS0095-P1-C15 U78C9.001.WZS0095-P1-C15 C15 65535 true 553975839 U78C9.001.WZS0095-P1-C15 C15 false Empty slot 0 0 0 U78C9.001.WZS0095 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553713697 Empty slot U78C9.001.WZS0095-P1-C5 U78C9.001.WZS0095-P1-C5 C5 65535 true 553713697 U78C9.001.WZS0095-P1-C5 C5 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78C9.001.WZS0095 5719 512 5719 1056 5348 1 5348 4116 false false false false false false false false false false 553713704 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78C9.001.WZS0095-P1-C2 U78C9.001.WZS0095-P1-C2 C2 5719 true 553713704 U78C9.001.WZS0095-P1-C2 C2 false Empty slot 0 0 0 U78C9.001.WZS0095 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553713705 Empty slot U78C9.001.WZS0095-P1-C3 U78C9.001.WZS0095-P1-C3 C3 65535 true 553713705 U78C9.001.WZS0095-P1-C3 C3 false 8 Gigabit PCI Express Dual Port Fibre Channel Adapter 5735 5735 5735 U78C9.001.WZS0095 61696 3076 61696 906 4319 3 4319 4116 false false false false false false false false false false 553779228 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78C9.001.WZS0095-P1-C8 U78C9.001.WZS0095-P1-C8 C8 U78C9.001.WZS0095-P1-C8-T2 fcs1 1aU78C9.001.WZS0095-P1-C8-T2 10000090FA45473B U78C9.001.WZS0095-P1-C8-T1 fcs0 1aU78C9.001.WZS0095-P1-C8-T1 10000090FA45473A 64 64 553779228 U78C9.001.WZS0095-P1-C8 C8 false 8 Gigabit PCI Express Dual Port Fibre Channel Adapter 5735 5735 5735 U78C9.001.WZS0095 61696 3076 61696 906 4319 3 4319 4116 false false false false false false false false false false 553713696 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78C9.001.WZS0095-P1-C4 U78C9.001.WZS0095-P1-C4 C4 U78C9.001.WZS0095-P1-C4-T1 fcs2 1aU78C9.001.WZS0095-P1-C4-T1 10000090FA451758 U78C9.001.WZS0095-P1-C4-T2 fcs3 1aU78C9.001.WZS0095-P1-C4-T2 10000090FA451759 553713696 U78C9.001.WZS0095-P1-C4 C4 1000 false false 4096 0.0 7 4096 4096 0.0 7 0 0 4096 4096 0 4096 false true false false 0 4096 4096 false IO Server false 0.2 4 4 4 0.2 4 0 255 uncapped false uncapped false 4 4 0.2 0.2 0 255 4 4 0.2 255 running Virtual IO Server 14B854F7-42CE-4FF0-BD57-1D117054E701 default 0 0 active 9.1.2.4 156972164838656 false true true true blank_media1 0eblank_media1 rw 0.0977 blank_media_2 0eblank_media_2 rw 0.0488 bob_iso 0ebob_iso rw 0.000000 VMLibrary 11 true SAS RAID 0 Disk Array U78C9.001.WZS0095-P1-C14-R1-L405D828300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1RDgyODMwMDAwMDAwMDQw false 1089592 hdisk1 active 391BIBMIPR-0 5D8283000000004010IPR-0 5D82830003IBMsas false SAS RAID 0 Disk Array U78C9.001.WZS0095-P1-C14-R1-L205D828300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1RDgyODMwMDAwMDAwMDIw false 1089592 hdisk0 active 391BIBMIPR-0 5D8283000000002010IPR-0 5D82830003IBMsas false 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78C9.001.WZS0095-P1-C10-T1 U78C9.001.WZS0095-P1-C10-T1 13U78C9.001.WZS0095-P1-C10-T1 disabled ent9 false 2227 disabled 8192 true U8286.42A.10D6DET-V1-C2 U8286.42A.10D6DET-V1-C2 true true 2 ALL 8EC3EE1FF502 2227 false false 0 ent8 1 en9 9.1.2.4 255.255.255.0 Active 10103183065bdca682 true U8286.42A.10D6DET-V1-C2 U8286.42A.10D6DET-V1-C2 true true 2 ALL 8EC3EE1FF502 2227 false false 0 ent8 1 true 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent5 physicalEthernetAdpter U78C9.001.WZS0095-P1-C2-T2 U78C9.001.WZS0095-P1-C2-T2 13U78C9.001.WZS0095-P1-C2-T2 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent7 physicalEthernetAdpter U78C9.001.WZS0095-P1-C2-T4 U78C9.001.WZS0095-P1-C2-T4 13U78C9.001.WZS0095-P1-C2-T4 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 physicalEthernetAdpter U78C9.001.WZS0095-P1-C10-T3 U78C9.001.WZS0095-P1-C10-T3 13U78C9.001.WZS0095-P1-C10-T3 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent6 physicalEthernetAdpter U78C9.001.WZS0095-P1-C2-T3 U78C9.001.WZS0095-P1-C2-T3 13U78C9.001.WZS0095-P1-C2-T3 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 physicalEthernetAdpter U78C9.001.WZS0095-P1-C10-T4 U78C9.001.WZS0095-P1-C10-T4 13U78C9.001.WZS0095-P1-C10-T4 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 physicalEthernetAdpter U78C9.001.WZS0095-P1-C10-T2 U78C9.001.WZS0095-P1-C10-T2 13U78C9.001.WZS0095-P1-C10-T2 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent4 physicalEthernetAdpter U78C9.001.WZS0095-P1-C2-T1 U78C9.001.WZS0095-P1-C2-T1 13U78C9.001.WZS0095-P1-C2-T1 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent5 U78C9.001.WZS0095-P1-C2-T2 en5 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent7 U78C9.001.WZS0095-P1-C2-T4 en7 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 U78C9.001.WZS0095-P1-C10-T3 en2 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent6 U78C9.001.WZS0095-P1-C2-T3 en6 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 U78C9.001.WZS0095-P1-C10-T4 en3 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 U78C9.001.WZS0095-P1-C10-T2 en1 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent4 U78C9.001.WZS0095-P1-C2-T1 en4 Inactive END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/file_feed.txt0000664000175000017500000032621313571367171022045 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh file_feed.txt # #################################################### INFO{ {'comment': 'A file get', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'File'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ f4597d89-8fca-3ece-b0a3-a6b96b0d053c 2015-01-23T03:36:04.519Z IBM Power Systems Management Console 5cd8e4b0-083e-4c71-bcff-2432807cfdcc File 2015-01-23T03:36:04.601Z IBM Power Systems Management Console 5cd8e4b0-083e-4c71-bcff-2432807cfdcc 1421979400892 boot_9699a0f5 1421736166276 application/octet-stream 5cd8e4b0-083e-4c71-bcff-2432807cfdcc 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.28 25d43c02-bdca-4c9b-8cf4-c2a77b23b572 File 2015-01-23T03:36:04.602Z IBM Power Systems Management Console 25d43c02-bdca-4c9b-8cf4-c2a77b23b572 1421979400892 dsafjfsakjdhf_b2460263_userID_config.iso 1421384689772 application/octet-stream 25d43c02-bdca-4c9b-8cf4-c2a77b23b572 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 73ef5b08-000e-47b7-a88e-7156d67d8bbb File 2015-01-23T03:36:04.603Z IBM Power Systems Management Console 73ef5b08-000e-47b7-a88e-7156d67d8bbb 1421979400892 boot_ddfd2151 1421386380482 application/octet-stream 73ef5b08-000e-47b7-a88e-7156d67d8bbb 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.3 b65126b5-30b2-456d-89ed-91639ec439be File 2015-01-23T03:36:04.604Z IBM Power Systems Management Console b65126b5-30b2-456d-89ed-91639ec439be 1421979400894 drew7_fa820cb6_userID_config.iso 1421379304562 application/octet-stream b65126b5-30b2-456d-89ed-91639ec439be 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 8d57a726-21f9-403e-8f40-9063a142c1b5 File 2015-01-23T03:36:04.605Z IBM Power Systems Management Console 8d57a726-21f9-403e-8f40-9063a142c1b5 1421979400894 boot_9bba66ce 1421225000083 application/octet-stream 8d57a726-21f9-403e-8f40-9063a142c1b5 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.16 3fda68d0-1ef1-4ce9-8ec9-01812160c327 File 2015-01-23T03:36:04.606Z IBM Power Systems Management Console 3fda68d0-1ef1-4ce9-8ec9-01812160c327 1421979400896 test_aix_17cbd370_userID_config.iso 1421386579630 application/octet-stream 3fda68d0-1ef1-4ce9-8ec9-01812160c327 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 0e435870-4c11-4a87-aec1-fd8458653b94 File 2015-01-23T03:36:04.606Z IBM Power Systems Management Console 0e435870-4c11-4a87-aec1-fd8458653b94 1421979400896 boot_3bc2b715 1421384488794 application/octet-stream 0e435870-4c11-4a87-aec1-fd8458653b94 4550656 4550656 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.5 86f3bb66-4a3b-4aae-a8ae-022935f92ec5 File 2015-01-23T03:36:04.607Z IBM Power Systems Management Console 86f3bb66-4a3b-4aae-a8ae-022935f92ec5 1421979400896 boot_1ecdfefc 1421727535528 application/octet-stream 86f3bb66-4a3b-4aae-a8ae-022935f92ec5 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.18 3ebfeaa8-a83c-489a-97d2-85bc2ce1a4f9 File 2015-01-23T03:36:04.608Z IBM Power Systems Management Console 3ebfeaa8-a83c-489a-97d2-85bc2ce1a4f9 1421979400896 boot_b2460263 1421384687327 application/octet-stream 3ebfeaa8-a83c-489a-97d2-85bc2ce1a4f9 4550656 4550656 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.19 099aa24b-f38d-4970-8640-dfdb63f82d5c File 2015-01-23T03:36:04.609Z IBM Power Systems Management Console 099aa24b-f38d-4970-8640-dfdb63f82d5c 1421979400896 boot_17cbd370 1421386460590 application/octet-stream 099aa24b-f38d-4970-8640-dfdb63f82d5c 5032672768 5032672768 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.23 583896e6-5518-4c8c-8f84-359bbcceb6a3 File 2015-01-23T03:36:04.610Z IBM Power Systems Management Console 583896e6-5518-4c8c-8f84-359bbcceb6a3 1421979400896 boot_16eea13e 1421796064783 application/octet-stream 583896e6-5518-4c8c-8f84-359bbcceb6a3 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.31 8b971f7d-10d4-4a66-bc11-3265ff0d8d1b File 2015-01-23T03:36:04.611Z IBM Power Systems Management Console 8b971f7d-10d4-4a66-bc11-3265ff0d8d1b 1421979400897 boot_9306783f 1421719952384 application/octet-stream 8b971f7d-10d4-4a66-bc11-3265ff0d8d1b 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.11 795ad77b-8ea2-4681-9cce-16c24999a4cb File 2015-01-23T03:36:04.612Z IBM Power Systems Management Console 795ad77b-8ea2-4681-9cce-16c24999a4cb 1421979400897 boot_df4029a6 1421732459689 application/octet-stream 795ad77b-8ea2-4681-9cce-16c24999a4cb 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.21 3e1b2206-17f6-42f9-970a-9c47c90185c0 File 2015-01-23T03:36:04.612Z IBM Power Systems Management Console 3e1b2206-17f6-42f9-970a-9c47c90185c0 1421979400897 boot_9f5befd2 1421735790829 application/octet-stream 3e1b2206-17f6-42f9-970a-9c47c90185c0 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.27 74085e13-0747-481f-b642-4680724f2761 File 2015-01-23T03:36:04.613Z IBM Power Systems Management Console 74085e13-0747-481f-b642-4680724f2761 1421979400898 bob_iso 1420601133605 application/octet-stream 74085e13-0747-481f-b642-4680724f2761 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 10eb02fb-7e29-4ca9-a6f0-63ddcdbf5d28 File 2015-01-23T03:36:04.614Z IBM Power Systems Management Console 10eb02fb-7e29-4ca9-a6f0-63ddcdbf5d28 1421979400899 drew5_4039c1b1_userID_config.iso 1421379078690 application/octet-stream 10eb02fb-7e29-4ca9-a6f0-63ddcdbf5d28 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 6233b070-31cc-4b57-99bd-37f80e845de9 File 2015-01-23T03:36:04.615Z IBM Power Systems Management Console 6233b070-31cc-4b57-99bd-37f80e845de9 1421979400899 boot_c3f7b1e7 1421385016137 application/octet-stream 6233b070-31cc-4b57-99bd-37f80e845de9 4550656 4550656 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.17 42974f49-c568-47fa-a2e7-d861e8908c78 File 2015-01-23T03:36:04.616Z IBM Power Systems Management Console 42974f49-c568-47fa-a2e7-d861e8908c78 1421979400899 bob_iso 1420619818061 application/octet-stream 42974f49-c568-47fa-a2e7-d861e8908c78 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 50b9f7b8-ebd0-4a99-8246-6ea4a4b305cb File 2015-01-23T03:36:04.617Z IBM Power Systems Management Console 50b9f7b8-ebd0-4a99-8246-6ea4a4b305cb 1421979400899 aix_disk2 1421190142865 application/octet-stream 50b9f7b8-ebd0-4a99-8246-6ea4a4b305cb 10737418240 10737418240 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.15 d3fe7e72-1018-44e8-84b8-1bf829485b7a File 2015-01-23T03:36:04.618Z IBM Power Systems Management Console d3fe7e72-1018-44e8-84b8-1bf829485b7a 1421979400899 boot_30a56789 1421399142877 application/octet-stream d3fe7e72-1018-44e8-84b8-1bf829485b7a 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.5 652e0ba4-8e42-45ad-86ef-12868b1d8aae File 2015-01-23T03:36:04.618Z IBM Power Systems Management Console 652e0ba4-8e42-45ad-86ef-12868b1d8aae 1421979400900 asdcv_3bc2b715_userID_config.iso 1421384491215 application/octet-stream 652e0ba4-8e42-45ad-86ef-12868b1d8aae 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 4920dcd4-4faf-4acc-99ca-c1465989286a File 2015-01-23T03:36:04.619Z IBM Power Systems Management Console 4920dcd4-4faf-4acc-99ca-c1465989286a 1421979400900 boot_a919a184 1421395508606 application/octet-stream 4920dcd4-4faf-4acc-99ca-c1465989286a 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.3 b2088134-4298-4e8d-b6c5-ca7f78be6ffd File 2015-01-23T03:36:04.620Z IBM Power Systems Management Console b2088134-4298-4e8d-b6c5-ca7f78be6ffd 1421979400900 boot_bef997cd 1421717486048 application/octet-stream b2088134-4298-4e8d-b6c5-ca7f78be6ffd 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.8 9ffc9073-92c2-4485-bccd-e57e9b0f3939 File 2015-01-23T03:36:04.621Z IBM Power Systems Management Console 9ffc9073-92c2-4485-bccd-e57e9b0f3939 1421979400900 boot_e0dd0297 1421733136814 application/octet-stream 9ffc9073-92c2-4485-bccd-e57e9b0f3939 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.22 00ceab1a-cbe1-4259-bf94-bd2b5efedef3 File 2015-01-23T03:36:04.622Z IBM Power Systems Management Console 00ceab1a-cbe1-4259-bf94-bd2b5efedef3 1421979400900 boot_9123f6a2 1421225617706 application/octet-stream 00ceab1a-cbe1-4259-bf94-bd2b5efedef3 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.18 551c61d0-086d-4084-b013-0088f724e779 File 2015-01-23T03:36:04.623Z IBM Power Systems Management Console 551c61d0-086d-4084-b013-0088f724e779 1421979400900 bob_iso 1420611259989 application/octet-stream 551c61d0-086d-4084-b013-0088f724e779 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 ac1bfa47-516b-480e-a520-9008256f1f6a File 2015-01-23T03:36:04.624Z IBM Power Systems Management Console ac1bfa47-516b-480e-a520-9008256f1f6a 1421979400901 boot_e0d47c9a 1421381901790 application/octet-stream ac1bfa47-516b-480e-a520-9008256f1f6a 10737418240 10737418240 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.22 e791fe74-e266-4ff1-a052-24fb72eff3f4 File 2015-01-23T03:36:04.624Z IBM Power Systems Management Console e791fe74-e266-4ff1-a052-24fb72eff3f4 1421979400901 asdf_c3f7b1e7_userID_config.iso 1421385018365 application/octet-stream e791fe74-e266-4ff1-a052-24fb72eff3f4 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 c7ae02a9-dd68-4c54-82e7-195e54a2dac1 File 2015-01-23T03:36:04.625Z IBM Power Systems Management Console c7ae02a9-dd68-4c54-82e7-195e54a2dac1 1421979400901 boot_1ea155fd 1421375780753 application/octet-stream c7ae02a9-dd68-4c54-82e7-195e54a2dac1 10737418240 1255776256 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.20 bbb95d43-0544-4f25-a5f8-74391e637e3e File 2015-01-23T03:36:04.626Z IBM Power Systems Management Console bbb95d43-0544-4f25-a5f8-74391e637e3e 1421979400901 boot_6ec6f2cd 1421895042163 application/octet-stream bbb95d43-0544-4f25-a5f8-74391e637e3e 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.33 3fe7d439-becf-41b4-b0d1-940e2bc45a9a File 2015-01-23T03:36:04.627Z IBM Power Systems Management Console 3fe7d439-becf-41b4-b0d1-940e2bc45a9a 1421979400901 bob_disk5 1421127777888 application/octet-stream 3fe7d439-becf-41b4-b0d1-940e2bc45a9a 62 62 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.9 97e0ad9f-2c03-4d3d-811e-d95ed042a34b File 2015-01-23T03:36:04.628Z IBM Power Systems Management Console 97e0ad9f-2c03-4d3d-811e-d95ed042a34b 1421979400901 drew1_d87ebecb_userID_config.iso 1421378314697 application/octet-stream 97e0ad9f-2c03-4d3d-811e-d95ed042a34b 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 dffb27c8-0071-467e-8f84-371e499951e9 File 2015-01-23T03:36:04.629Z IBM Power Systems Management Console dffb27c8-0071-467e-8f84-371e499951e9 1421979400902 boot_34978ba4 1421384606816 application/octet-stream dffb27c8-0071-467e-8f84-371e499951e9 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.17 0a511bd2-77ab-4735-bc51-54beabda6048 File 2015-01-23T03:36:04.629Z IBM Power Systems Management Console 0a511bd2-77ab-4735-bc51-54beabda6048 1421979400902 boot_26610873 1421727146534 application/octet-stream 0a511bd2-77ab-4735-bc51-54beabda6048 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.16 3a546776-7b86-45fc-ab97-be91434f1b8a File 2015-01-23T03:36:04.630Z IBM Power Systems Management Console 3a546776-7b86-45fc-ab97-be91434f1b8a 1421979400902 boot_b853fd66 1421727813013 application/octet-stream 3a546776-7b86-45fc-ab97-be91434f1b8a 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.20 a4923f5e-0f0b-40c6-837a-8f515501d37c File 2015-01-23T03:36:04.631Z IBM Power Systems Management Console a4923f5e-0f0b-40c6-837a-8f515501d37c 1421979400902 bob_iso 1420611104046 application/octet-stream a4923f5e-0f0b-40c6-837a-8f515501d37c BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 294b2e8a-4b7d-469c-8555-1ba7628e2a37 File 2015-01-23T03:36:04.632Z IBM Power Systems Management Console 294b2e8a-4b7d-469c-8555-1ba7628e2a37 1421979400902 boot_96654a43 1421444952203 application/octet-stream 294b2e8a-4b7d-469c-8555-1ba7628e2a37 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.7 b5dd98d8-c65f-40b9-911f-18c76629f91c File 2015-01-23T03:36:04.633Z IBM Power Systems Management Console b5dd98d8-c65f-40b9-911f-18c76629f91c 1421979400902 boot_540240c8 1421737273729 application/octet-stream b5dd98d8-c65f-40b9-911f-18c76629f91c 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.30 18834581-309e-475b-919e-8767775673b5 File 2015-01-23T03:36:04.634Z IBM Power Systems Management Console 18834581-309e-475b-919e-8767775673b5 1421979400903 drew6_1ff4d84e_userID_config.iso 1421379220465 application/octet-stream 18834581-309e-475b-919e-8767775673b5 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 aeb7bb6a-7b78-4b41-80f7-7d07630182b4 File 2015-01-23T03:36:04.635Z IBM Power Systems Management Console aeb7bb6a-7b78-4b41-80f7-7d07630182b4 1421979400903 boot_eea81ef4 1421384336941 application/octet-stream aeb7bb6a-7b78-4b41-80f7-7d07630182b4 4550656 4550656 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.3 acbf327d-1988-407b-9f28-6be40b7cbdd1 File 2015-01-23T03:36:04.635Z IBM Power Systems Management Console acbf327d-1988-407b-9f28-6be40b7cbdd1 1421979400903 boot_e73c7a0c 1421376955302 application/octet-stream acbf327d-1988-407b-9f28-6be40b7cbdd1 10737418240 106274816 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.21 13f72717-d35a-44bd-9ba2-ce444d7c71ca File 2015-01-23T03:36:04.636Z IBM Power Systems Management Console 13f72717-d35a-44bd-9ba2-ce444d7c71ca 1421979400903 boot_d50cd8e4 1421736650939 application/octet-stream 13f72717-d35a-44bd-9ba2-ce444d7c71ca 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.29 d55f5669-4e92-476c-95b1-be3fc6956cd9 File 2015-01-23T03:36:04.637Z IBM Power Systems Management Console d55f5669-4e92-476c-95b1-be3fc6956cd9 1421979400903 boot_d2d886e1 1421733685813 application/octet-stream d55f5669-4e92-476c-95b1-be3fc6956cd9 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.24 5f8795d0-f2e4-4c57-baf5-7527c6a6874c File 2015-01-23T03:36:04.638Z IBM Power Systems Management Console 5f8795d0-f2e4-4c57-baf5-7527c6a6874c 1421979400903 boot_11bc8dc2 1421272624717 application/octet-stream 5f8795d0-f2e4-4c57-baf5-7527c6a6874c 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.19 e46ea7cf-7fdd-4272-a518-f5aaece9b455 File 2015-01-23T03:36:04.639Z IBM Power Systems Management Console e46ea7cf-7fdd-4272-a518-f5aaece9b455 1421979400903 efried0.182413 1421131299390 application/octet-stream e46ea7cf-7fdd-4272-a518-f5aaece9b455 1073741824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.11 dd02e08b-4311-4ca7-91e9-cd4f9aee832f File 2015-01-23T03:36:04.640Z IBM Power Systems Management Console dd02e08b-4311-4ca7-91e9-cd4f9aee832f 1421979400904 boot_21e1de68 1421718243764 application/octet-stream dd02e08b-4311-4ca7-91e9-cd4f9aee832f 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.9 7427a07e-d03f-41aa-81b8-d59c39ac8ea2 File 2015-01-23T03:36:04.640Z IBM Power Systems Management Console 7427a07e-d03f-41aa-81b8-d59c39ac8ea2 1421979400904 efried0.207216 1421131523206 application/octet-stream 7427a07e-d03f-41aa-81b8-d59c39ac8ea2 50cd27e6b965858cecd3cdfb3022f288daaf3773053ba9c2a34d605bf1ce0cd8 1073741824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.12 a49a67f6-eb5c-41f7-bd2f-4531f4df0241 File 2015-01-23T03:36:04.641Z IBM Power Systems Management Console a49a67f6-eb5c-41f7-bd2f-4531f4df0241 1421979400904 drew3_b0eaf56b_userID_config.iso 1421378708273 application/octet-stream a49a67f6-eb5c-41f7-bd2f-4531f4df0241 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 abcf9fe9-c471-44e4-8ad7-af3080f5dd40 File 2015-01-23T03:36:04.642Z IBM Power Systems Management Console abcf9fe9-c471-44e4-8ad7-af3080f5dd40 1421979400904 bob_iso 1420619996288 application/octet-stream abcf9fe9-c471-44e4-8ad7-af3080f5dd40 49 49 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 071c69d5-41f2-4cfb-819a-8b463efb0ccb File 2015-01-23T03:36:04.643Z IBM Power Systems Management Console 071c69d5-41f2-4cfb-819a-8b463efb0ccb 1421979400904 boot_42364069 1421444429358 application/octet-stream 071c69d5-41f2-4cfb-819a-8b463efb0ccb 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.6 bb62f6c3-3a80-4044-9085-7a8a392903cf File 2015-01-23T03:36:04.644Z IBM Power Systems Management Console bb62f6c3-3a80-4044-9085-7a8a392903cf 1421979400904 efried0.209018 1421131239758 application/octet-stream bb62f6c3-3a80-4044-9085-7a8a392903cf 1073741824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.10 2ca84235-88fa-4332-8d98-2e8682983e18 File 2015-01-23T03:36:04.644Z IBM Power Systems Management Console 2ca84235-88fa-4332-8d98-2e8682983e18 1421979400905 boot_f07b23e2 1421718979743 application/octet-stream 2ca84235-88fa-4332-8d98-2e8682983e18 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.10 b6dfebab-cd03-44ae-82b4-0576a0c5e68a File 2015-01-23T03:36:04.646Z IBM Power Systems Management Console b6dfebab-cd03-44ae-82b4-0576a0c5e68a 1421979400905 bob_disk3 1420620920654 application/octet-stream b6dfebab-cd03-44ae-82b4-0576a0c5e68a 49 49 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.7 2db072ee-ecfc-48d1-9ce6-98827b740679 File 2015-01-23T03:36:04.646Z IBM Power Systems Management Console 2db072ee-ecfc-48d1-9ce6-98827b740679 1421979400905 boot_37e3c8d3 1421894802205 application/octet-stream 2db072ee-ecfc-48d1-9ce6-98827b740679 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.32 e0c9eec7-a808-4ea3-a1c2-2564ac66d7f3 File 2015-01-23T03:36:04.647Z IBM Power Systems Management Console e0c9eec7-a808-4ea3-a1c2-2564ac66d7f3 1421979400905 drew4_3983ac1c_userID_config.iso 1421378800823 application/octet-stream e0c9eec7-a808-4ea3-a1c2-2564ac66d7f3 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 2223a0c1-0a3e-41b2-90f3-d8746ecf49f5 File 2015-01-23T03:36:04.648Z IBM Power Systems Management Console 2223a0c1-0a3e-41b2-90f3-d8746ecf49f5 1421979400905 boot_947a017f 1421733872854 application/octet-stream 2223a0c1-0a3e-41b2-90f3-d8746ecf49f5 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.25 d12d0a1c-0187-49e7-8465-54ca40502de5 File 2015-01-23T03:36:04.649Z IBM Power Systems Management Console d12d0a1c-0187-49e7-8465-54ca40502de5 1421979400905 bob_disk2 1420620891034 application/octet-stream d12d0a1c-0187-49e7-8465-54ca40502de5 49 49 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.6 8161f843-37ab-4ea5-8efc-d43c2240ce35 File 2015-01-23T03:36:04.650Z IBM Power Systems Management Console 8161f843-37ab-4ea5-8efc-d43c2240ce35 1421979400906 boot_925c5fa3 1421726856027 application/octet-stream 8161f843-37ab-4ea5-8efc-d43c2240ce35 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.13 f0254c0e-1791-4052-b95d-b1a26d2bc8a7 File 2015-01-23T03:36:04.650Z IBM Power Systems Management Console f0254c0e-1791-4052-b95d-b1a26d2bc8a7 1421979400906 drew2_895e8512_userID_config.iso 1421378543430 application/octet-stream f0254c0e-1791-4052-b95d-b1a26d2bc8a7 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 de46ecdc-ded2-4713-b3bf-fd6e62725048 File 2015-01-23T03:36:04.651Z IBM Power Systems Management Console de46ecdc-ded2-4713-b3bf-fd6e62725048 1421979400906 bob_disk 1420620727947 application/octet-stream de46ecdc-ded2-4713-b3bf-fd6e62725048 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.5 480809eb-09d1-4234-baa7-5416d8431a32 File 2015-01-23T03:36:04.652Z IBM Power Systems Management Console 480809eb-09d1-4234-baa7-5416d8431a32 1421979400906 efried1.355194 1421131633455 application/octet-stream 480809eb-09d1-4234-baa7-5416d8431a32 50cd27e6b965858cecd3cdfb3022f288daaf3773053ba9c2a34d605bf1ce0cd7 1073741824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.13 e6696a46-5ee2-462e-8ec9-b2919ba59f62 File 2015-01-23T03:36:04.653Z IBM Power Systems Management Console e6696a46-5ee2-462e-8ec9-b2919ba59f62 1421979400906 cirros_eea81ef4_userID_config.iso 1421384339441 application/octet-stream e6696a46-5ee2-462e-8ec9-b2919ba59f62 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 f3bcf06c-7fd1-4a91-a2fc-bc6e4af4af1d File 2015-01-23T03:36:04.654Z IBM Power Systems Management Console f3bcf06c-7fd1-4a91-a2fc-bc6e4af4af1d 1421979400906 drew_real_aix_e0d47c9a_userID_config.iso 1421382072820 application/octet-stream f3bcf06c-7fd1-4a91-a2fc-bc6e4af4af1d 419840 419840 BROKERED_MEDIA_ISO 14B854F7-42CE-4FF0-BD57-1D117054E701 2585d460-1077-406e-b583-744092509226 File 2015-01-23T03:36:04.656Z IBM Power Systems Management Console 2585d460-1077-406e-b583-744092509226 1421979400907 boot_810f7730 1421735515439 application/octet-stream 2585d460-1077-406e-b583-744092509226 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.26 6aac6131-caaa-4a10-868c-f64402c83994 File 2015-01-23T03:36:04.657Z IBM Power Systems Management Console 6aac6131-caaa-4a10-868c-f64402c83994 1421979400907 boot_8b816a22 1421216010699 application/octet-stream 6aac6131-caaa-4a10-868c-f64402c83994 25165824 25165824 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.17 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/managementconsole.txt0000664000175000017500000000773413571367171023646 0ustar neoneo00000000000000INFO{ {'comment': 'Created from query of ManagementConsole', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagementConsole'} END OF SECTION} HEADERS{ {'content-length': '3385', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000UQJf2tlhLW_9or9fo1Or2v3:a034238f-7921-42e3-862d-89cae58dc68a; Path=/; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Fri, 26 Jul 2013 12:10:52 GMT', 'etag': '526441970', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Fri, 26 Jul 2013 12:10:52 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 1051acae-86dc-3b80-a9c6-ac12ff893126 2013-07-26T08:10:52.919-04:00 IBM Power Systems Management Console b3416a32-46f9-3df9-819e-39349001af0c ManagementConsole 2013-07-26T08:10:53.191-04:00 IBM Power Systems Management Console b3416a32-46f9-3df9-819e-39349001af0c 1374840653035 Ve57 f93 2911559 hmc7 eth0 9.1.2.3 fe80:0:0:0:5054:ff:fed8:a951 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/ltm_feed_lpars.txt0000664000175000017500000000610413571367171023115 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'RawMetrics/LongTermMonitor'} END OF SECTION} HEADERS{ {'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 98498bed-c78a-3a4f-b90a-4b715418fcb6 2015-04-30T03:53:00.000Z LongTermMetrics ManagedSystem 98498bed-c78a-3a4f-b90a-4b715418fcb6 15161241-b72f-41d5-8154-557ff699fb75 2016-03-28T02:26:00.000-04:00 LTM_8247-22L*1111111_vios_2_20150430T035300+0000.json 2016-03-28T02:26:00.000-04:00 IBM Power Systems Management Console cf8bf632-b702-4f4f-9029-5ffc8934e886 2016-03-28T02:26:00.000-04:00 LTM_8247-22L*1111111_phyp_20150430T035300+0000.json 2016-03-28T02:26:00.000-04:00 IBM Power Systems Management Console aa05223a-0141-467e-86a5-4d57ede44ab8 2016-03-28T02:26:00.000-04:00 LTM_8247-22L*1111111_lpar_20160328T022600-0400.json 2016-03-28T02:26:00.000-04:00 IBM Power Systems Management Console END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vios_ssp_npiv.txt0000664000175000017500000031715713571367171024041 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_vios_ssp_npiv.txt # #################################################### INFO{ {'comment': 'Use for scsi_map_processor', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/75B373A5-B9FF-4799-B23B-202929517C64'} END OF SECTION} HEADERS{ {'x-powered-by': 'Servlet/3.0', 'transfer-encoding': 'chunked', 'set-cookie': 'JSESSIONID=0000N4dQTtVs6iVW1jRpJ54Q6F7:87025216-ad22-4a32-8e2c-3194816a5355; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 08 Jan 2014 17:05:32 GMT', 'etag': '1775366259', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 08 Jan 2014 17:05:31 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 75B373A5-B9FF-4799-B23B-202929517C64 VirtualIOServer 2014-01-08T12:05:32.070-05:00 IBM Power Systems Management Console 75B373A5-B9FF-4799-B23B-202929517C64 1389191360857 true 191 0 POWER7 On false false false false false false normal 0604CAA1 VIOS 2.2.3.1 true true true true true 1 80 false PCI-E SAS Controller 2053 2054 2055 5901 5909 5911 2053 2054 2055 5901 5909 5911 2053 2054 2055 5901 5909 5911 U78AB.001.WZSJBM3 825 260 825 826 4116 1 4116 4116 false false false false false false false false false false 553713674 PCI-E SAS Controller U78AB.001.WZSJBM3-P1-T9 U78AB.001.WZSJBM3-P1-T9 T9 825 553713674 U78AB.001.WZSJBM3-P1-T9 T9 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78AB.001.WZSJBM3 5719 512 5719 1056 5348 1 5348 4116 false false false false false false false false false false 553910285 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78AB.001.WZSJBM3-P1-C7 U78AB.001.WZSJBM3-P1-C7 C7 5719 553910285 U78AB.001.WZSJBM3-P1-C7 C7 false 8 Gigabit PCI Express Dual Port Fibre Channel Adapter 5735 5735 5735 U78AB.001.WZSJBM3 61696 3076 61696 906 4319 3 4319 4116 false false false false false false false false false false 553714177 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSJBM3-P1-C2 U78AB.001.WZSJBM3-P1-C2 C2 U78AB.001.WZSJBM3-P1-C2-T2 fcs1 1aU78AB.001.WZSJBM3-P1-C2-T2 10000090FA1B6303 U78AB.001.WZSJBM3-P1-C2-T1 MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L0 none SinglePath Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDAyQg== false 0 hdisk6 active 332136005076300838041300000000000002B04214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L1000000000000 none NoReserve Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCNQ== false 10240 hdisk7 active 33213600507630083804130000000000008B504214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L2000000000000 none NoReserve Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCMg== false 102400 hdisk8 active 33213600507630083804130000000000008B204214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L3000000000000 none NoReserve Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCMw== false 102400 hdisk9 active 33213600507630083804130000000000008B304214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L4000000000000 none NoReserve Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCNA== false 102400 hdisk10 active 33213600507630083804130000000000008B404214503IBMfcp true fcs0 1aU78AB.001.WZSJBM3-P1-C2-T1 10000090FA1B6302 64 64 553714177 U78AB.001.WZSJBM3-P1-C2 C2 80 false 0 8192 0.0 6 false 12288 4096 false 0 0.0 6 0 0 12288 8192 0 4096 false false 0 8192 4096 false vios1_181.68 false 0.4 4 1 10 0.4 2 0 128 uncapped false uncapped false 4 1 0.4 0.4 0 128 2 10 0.4 128 running Virtual IO Server 75B373A5-B9FF-4799-B23B-202929517C64 default 0 0 active 9.1.2.5 209915350385152 false true true true true vopt_12d2551a64034100ba570995594c9c93 0evopt_12d2551a64034100ba570995594c9c93 rw 0.000000 vopt_30a16f383715404a97845562c2dc4122 0evopt_30a16f383715404a97845562c2dc4122 rw 0.000000 vopt_54bec8116ba848c0af176c530efaea6c 0evopt_54bec8116ba848c0af176c530efaea6c rw 0.000000 vopt_b50227a569fd446da2bdc0b49b1ac9e3 0evopt_b50227a569fd446da2bdc0b49b1ac9e3 rw 0.000000 vopt_c1cc1c0b21cf48f189e8b81ee0060773 0evopt_c1cc1c0b21cf48f189e8b81ee0060773 rw 0.000000 VMLibrary 1 true Unlocked aes-cbc-essiv:sha256 512 sha512 SAS Disk Drive U78AB.001.WZSJBM3-P3-D1 NoReserve Failover 01M0lCTU1CRjI2MDBSQzUwMDAwMzk0NzgzQTUyQjg= false 572325 hdisk0 active 2811350000394783A52B809MBF2600RC03IBMsas false SAS Disk Drive U78AB.001.WZSJBM3-P3-D3 NoReserve Failover 01M0lCTU1CRjI2MDBSQzUwMDAwMzk0ODgwMTY4ODA= true 572325 hdisk2 active 28113500003948801688009MBF2600RC03IBMsas false SAS Disk Drive U78AB.001.WZSJBM3-P3-D2 NoReserve Failover 01M0lCTU1CRjI2MDBSQzUwMDAwMzk0NzgzQTU0QTA= true 572325 hdisk1 active 2811350000394783A54A009MBF2600RC03IBMsas false MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L3000000000000 none NoReserve Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCMw== false 102400 hdisk9 active 33213600507630083804130000000000008B304214503IBMfcp true SAS Disk Drive U78AB.001.WZSJBM3-P3-D6 NoReserve Failover 01M0lCTU1CRjI2MDBSQzUwMDAwMzk0ODgwMTZBQzg= true 572325 hdisk5 active 281135000039488016AC809MBF2600RC03IBMsas false MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L4000000000000 none NoReserve Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCNA== false 102400 hdisk10 active 33213600507630083804130000000000008B404214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L2000000000000 none NoReserve Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCMg== false 102400 hdisk8 active 33213600507630083804130000000000008B204214503IBMfcp true SAS Disk Drive U78AB.001.WZSJBM3-P3-D5 NoReserve Failover 01M0lCTU1CRjI2MDBSQzUwMDAwMzk0ODgwMTg1ODg= true 572325 hdisk4 active 28113500003948801858809MBF2600RC03IBMsas false SAS Disk Drive U78AB.001.WZSJBM3-P3-D4 NoReserve Failover 01M0lCTU1CRjI2MDBSQzUwMDAwMzk0ODgwQjFFOEM= true 572325 hdisk3 active 2811350000394880B1E8C09MBF2600RC03IBMsas false MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L0 none SinglePath Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDAyQg== false 0 hdisk6 active 332136005076300838041300000000000002B04214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSJBM3-P1-C2-T1-W500507680308104A-L1000000000000 none NoReserve Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCNQ== false 10240 hdisk7 active 33213600507630083804130000000000008B504214503IBMfcp true 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78AB.001.WZSJBM3-P1-C7-T1 U78AB.001.WZSJBM3-P1-C7-T1 13U78AB.001.WZSJBM3-P1-C7-T1 disabled ent6 false 1 disabled 8192 true U8246.L1C.0604CAA-V1-C2 U8246.L1C.0604CAA-V1-C2 false true 2 ALL BEEAB9E48202 1 false false 0 ent4 1 U8246.L1C.0604CAA-V1-C6 U8246.L1C.0604CAA-V1-C6 false true 6 ALL BEEAB9E48206 4094 false 2227 3000 3001 true 0 ent5 1 en6 9.1.2.4 255.255.255.0 Active 10db2af125975fa33a true U8246.L1C.0604CAA-V1-C2 U8246.L1C.0604CAA-V1-C2 false true 2 ALL BEEAB9E48202 1 false false 0 ent4 1 U8246.L1C.0604CAA-V1-C6 U8246.L1C.0604CAA-V1-C6 false true 6 ALL BEEAB9E48206 4094 false 2227 3000 3001 true 0 ent5 1 U8246.L1C.0604CAA-V1-C15 U8246.L1C.0604CAA-V1-C15 true true 15 ALL BEEAB9E48207 4093 false 1234 1235 1236 true 0 ent6 1 true Client U8246.L1C.0604CAA-V2-C3 U8246.L1C.0604CAA-V2-C3 2 false false 3 1 4 c05076065a8b005a c05076065a8b005b U78AB.001.WZSJBM3-P1-C2-T1 fcs0 1aU78AB.001.WZSJBM3-P1-C2-T1 10000090FA1B6302 64 64 Server U8246.L1C.0604CAA-V1-C4 U8246.L1C.0604CAA-V1-C4 1 false true 4 vfchost0 21 3 1dU8246.L1C.0604CAA-V1-C4 fcs0 U78AB.001.WZSJBM3-P1-C2-T1 fcs0 1aU78AB.001.WZSJBM3-P1-C2-T1 10000090FA1B6302 64 64 Client U8246.L1C.0604CAA-V7-C6 U8246.L1C.0604CAA-V7-C6 7 false true 6 1 12 c05076065a8b0060 c05076065a8b0061 Server U8246.L1C.0604CAA-V1-C12 U8246.L1C.0604CAA-V1-C12 1 false true 12 vfchost1 7 6 1dU8246.L1C.0604CAA-V1-C12 Client U8246.L1C.0604CAA-V7-C2 U8246.L1C.0604CAA-V7-C2 7 false true 2 1 7 U8246.L1C.0604CAA-V1-C7 Server U8246.L1C.0604CAA-V1-C7 U8246.L1C.0604CAA-V1-C7 1 false true 7 vhost1 volume-boot-8246L1C_0604CAA-iju_37-bc9cd9d6-00000029 7 2 U8246.L1C.0604CAA-V7-C2 1eU8246.L1C.0604CAA-V1-C7 true 274d7bb790666211e3bc1a00006cae8b01c96f590914bccbc8b7b88c37165c0485 10 VirtualIO_Disk 274d7bb790666211e3bc1a00006cae8b01c8b500af7db2f92b97728bffa6391619 volume-boot-8246L1C_0604CAA-iju_37-bc9cd9d6-00000029 0x8200000000000000 vtscsi0 0aee832d1c28727ffU8246.L1C.0604CAA-V1-C7 Server U8246.L1C.0604CAA-V1-C5 U8246.L1C.0604CAA-V1-C5 1 true true 5 vhost2 2 4 1eU8246.L1C.0604CAA-V1-C5 Client U8246.L1C.0604CAA-V2-C2 U8246.L1C.0604CAA-V2-C2 2 false false 2 1 3 U8246.L1C.0604CAA-V1-C3 Server U8246.L1C.0604CAA-V1-C3 U8246.L1C.0604CAA-V1-C3 1 false true 3 vhost0 2 2 U8246.L1C.0604CAA-V2-C2 1eU8246.L1C.0604CAA-V1-C3 Client U8246.L1C.0604CAA-V4-C2 U8246.L1C.0604CAA-V4-C2 4 false true 2 1 9 U8246.L1C.0604CAA-V1-C9 Server U8246.L1C.0604CAA-V1-C9 U8246.L1C.0604CAA-V1-C9 1 false true 9 vhost4 volume-boot-8246L1C_0604CAA-rhel_vs1_on_s-54bec811-00000015 4 2 U8246.L1C.0604CAA-V4-C2 1eU8246.L1C.0604CAA-V1-C9 true 274d7bb790666211e3bc1a00006cae8b01ac18997ab9bc23fb24756e9713a93f90 10 VirtualIO_Disk 274d7bb790666211e3bc1a00006cae8b01c8b500af7db2f92b97728bffa6391619 volume-boot-8246L1C_0604CAA-rhel_vs1_on_s-54bec811-00000015 0x8400000000000000 vtscsi2 0afa2c4b2babd4f8bU8246.L1C.0604CAA-V1-C9 Client U8246.L1C.0604CAA-V3-C2 U8246.L1C.0604CAA-V3-C2 3 false true 2 1 8 U8246.L1C.0604CAA-V1-C8 Server U8246.L1C.0604CAA-V1-C8 U8246.L1C.0604CAA-V1-C8 1 false true 8 vhost3 volume-boot-8246L1C_0604CAA-salsman66-00000004 3 2 U8246.L1C.0604CAA-V3-C2 1eU8246.L1C.0604CAA-V1-C8 true 274d7bb790666211e3bc1a00006cae8b013842794fa0b8e9dd771d6a32accde003 10 VirtualIO_Disk 274d7bb790666211e3bc1a00006cae8b01c8b500af7db2f92b97728bffa6391619 volume-boot-8246L1C_0604CAA-salsman66-00000004 0x8500000000000000 vtscsi1 0a5bedf0400f3e0943U8246.L1C.0604CAA-V1-C8 Client U8246.L1C.0604CAA-V3-C2 U8246.L1C.0604CAA-V3-C2 3 false true 2 1 8 U8246.L1C.0604CAA-V1-C8 Server U8246.L1C.0604CAA-V1-C8 U8246.L1C.0604CAA-V1-C8 1 false true 8 vhost3 vopt_19bbb46ad15747d79fe08f8464466144 3 2 U8246.L1C.0604CAA-V3-C2 1eU8246.L1C.0604CAA-V1-C8 vopt_19bbb46ad15747d79fe08f8464466144 0evopt_19bbb46ad15747d79fe08f8464466144 rw 0.000000 0x8100000000000000 vtopt4 1945307faea3bbcd2b Client U8246.L1C.0604CAA-V4-C2 U8246.L1C.0604CAA-V4-C2 4 false true 2 1 9 U8246.L1C.0604CAA-V1-C9 Server U8246.L1C.0604CAA-V1-C9 U8246.L1C.0604CAA-V1-C9 1 false true 9 vhost4 volume-csky_ssp_vol2_1gb 4 2 U8246.L1C.0604CAA-V4-C2 1eU8246.L1C.0604CAA-V1-C9 true 274d7bb790666211e3bc1a00006cae8b0148326cf1e5542c583ec14327771522b0 1 VirtualIO_Disk volume-csky_ssp_vol2_1gb 0x8300000000000000 vtscsi3 0a15fdb1ced05bc26U8246.L1C.0604CAA-V1-C9 Client U8246.L1C.0604CAA-V4-C55 U8246.L1C.0604CAA-V4-C55 55 false true 2 1 55 U8246.L1C.0604CAA-V1-C55 Server U8246.L1C.0604CAA-V1-C55 U8246.L1C.0604CAA-V1-C55 1 false true 55 vhost55 hdisk55 55 2 U8246.L1C.0604CAA-V4-C55 1eU8246.L1C.0604CAA-V1-C55 MPIO IBM 2076 FC Disk U78AB.001.WZSHPNK-P1-C2-T1-W5005076802232ADE-L7000000000000 none SinglePath Failover 01M0lCTTIxNDUzMTI2MDA1MDc2ODAyODIwQTlEQTgwMDAwMDAwMDAwNTJBOQ== false 1024 hdisk8 active 332136005076802820A9DA8000000000052A904214503IBMfcp false 0x8600000000000000 vtscsi55 085fc2b9488a5ff9f8 Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 3 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 Ubuntu1410 3 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 60 None Ubuntu1410 0300025d4a00007a000000014b36d9deaf.1 0x8700000000000000 vtscsi0 09b4fdf7bdec405d57 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 physicalEthernetAdpter U78AB.001.WZSJBM3-P1-C7-T4 U78AB.001.WZSJBM3-P1-C7-T4 13U78AB.001.WZSJBM3-P1-C7-T4 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 physicalEthernetAdpter U78AB.001.WZSJBM3-P1-C7-T3 U78AB.001.WZSJBM3-P1-C7-T3 13U78AB.001.WZSJBM3-P1-C7-T3 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 physicalEthernetAdpter U78AB.001.WZSJBM3-P1-C7-T2 U78AB.001.WZSJBM3-P1-C7-T2 13U78AB.001.WZSJBM3-P1-C7-T2 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 U78AB.001.WZSJBM3-P1-C7-T4 en3 Inactive 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent2 U78AF.001.WZS01HS-P1-C34-L2-T1 en2 10.10.10.5 255.255.255.128 Active 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 U78AB.001.WZSJBM3-P1-C7-T3 en2 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 U78AB.001.WZSJBM3-P1-C7-T2 en1 Inactive END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vios_feed.txt0000664000175000017500000251564113571367171023103 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_vios_feed.txt # #################################################### INFO{ {'comment': 'Used for multiple VIO Testing', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/e7344c5b-79b5-3e73-8f64-94821424bc25/VirtualIOServer'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 349eae6a-80eb-32ce-a8a5-ee4dfed459d0 2015-03-26T08:57:47.566Z IBM Power Systems Management Console 1300C76F-9814-4A4D-B1F0-5B69352A7DEA VirtualIOServer 2015-03-26T08:57:58.480Z IBM Power Systems Management Console -1287056625 1300C76F-9814-4A4D-B1F0-5B69352A7DEA 1427357798253 false 191 0 POWER7 On false false false false false false normal 21EF9FB2 VIOS 2.2.3.1 true true true true true 2 400 false EN4054 4-port 10Gb Ethernet Adapter U78AF.001.WZS04LA 1808 512 1808 59187 6562 2 6562 4319 false false false false false false false false false false 553714209 EN4054 4-port 10Gb Ethernet Adapter U78AF.001.WZS04LA-P1-C36-L1 U78AF.001.WZS04LA-P1-C36-L1 C36 1808 553714209 U78AF.001.WZS04LA-P1-C36-L1 C36 false EN4054 4-port 10Gb Ethernet Adapter U78AF.001.WZS04LA 1808 512 1808 59187 6562 2 6562 4319 false false false false false false false false false false 553714224 EN4054 4-port 10Gb Ethernet Adapter U78AF.001.WZS04LA-P1-C36-L2 U78AF.001.WZS04LA-P1-C36-L2 C36 1808 553714224 U78AF.001.WZS04LA-P1-C36-L2 C36 false FC5052 2-port 16Gb FC Adapter U78AF.001.WZS04LA 57856 3076 57856 57858 4319 16 4319 4319 false false false false false false false false false false 553714211 FC5052 2-port 16Gb FC Adapter U78AF.001.WZS04LA-P1-C37-L1 U78AF.001.WZS04LA-P1-C37-L1 C37 U78AF.001.WZS04LA-P1-C37-L1-T2 MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C37-L1-T2-W50050768022121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwNA== false 102400 hdisk0 active 332136005076D02810187E00000000000000404214503IBMfcp true fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 U78AF.001.WZS04LA-P1-C37-L1-T1 MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C37-L1-T2-W50050768022121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwNA== false 102400 hdisk0 active 332136005076D02810187E00000000000000404214503IBMfcp true fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 553714211 U78AF.001.WZS04LA-P1-C37-L1 C37 400 false false 0 24576 0.0 7 false 24576 24576 false 0 0.0 7 0 0 24576 24576 0 24576 false true false false 0 24576 24576 false nimbus-ch03-p2-vios2 false 1 2 1 2 1 2 0 capped false capped false 2 1 1 1 0 2 2 1 running Virtual IO Server 1300C76F-9814-4A4D-B1F0-5B69352A7DEA default 0 0 active 9.1.2.5 196281908884992 false true true vopt_32ab166a7a3c420895ffa8ce23c1c08c 0evopt_32ab166a7a3c420895ffa8ce23c1c08c rw 0.000000 vopt_783ab44695b84fb6b900742e4e832362 0evopt_783ab44695b84fb6b900742e4e832362 rw 0.000000 vopt_82eabf37001e4df8b379338c9225710b 0evopt_82eabf37001e4df8b379338c9225710b rw 0.000000 vopt_89eabc793ad048e98740af17d025d480 0evopt_89eabc793ad048e98740af17d025d480 rw 0.000000 vopt_9a6b287f4e294268a5fd99d89722e78b 0evopt_9a6b287f4e294268a5fd99d89722e78b rw 0.000000 vopt_d77cf4160f044c108f6a8d111c677990 0evopt_d77cf4160f044c108f6a8d111c677990 rw 0.000000 vopt_f54cb325a723479dad385b94d0105a6c 0evopt_f54cb325a723479dad385b94d0105a6c rw 0.000000 VMLibrary 1 true MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C37-L1-T2-W50050768022121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwNA== false 102400 hdisk0 active 332136005076D02810187E00000000000000404214503IBMfcp true ent5 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent0 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C36-L1-T1 U78AF.001.WZS04LA-P1-C36-L1-T1 13U78AF.001.WZS04LA-P1-C36-L1-T1 auto ent6 false 1 disabled 8192 true U7895.43X.21EF9FB-V2-C2 U7895.43X.21EF9FB-V2-C2 true true 2 ALL B28471747602 1 false false 0 ent4 2 U7895.43X.21EF9FB-V2-C4 U7895.43X.21EF9FB-V2-C4 false true 4 ALL B28471747604 4093 false 2134 2173 true 0 ent8 2 en6 Inactive 107f291dfe86e4fb5c true U7895.43X.21EF9FB-V2-C2 U7895.43X.21EF9FB-V2-C2 true true 2 ALL B28471747602 1 false false 0 ent4 2 U7895.43X.21EF9FB-V2-C4 U7895.43X.21EF9FB-V2-C4 false true 4 ALL B28471747604 4093 false 2134 2173 true 0 ent8 2 U7895.43X.21EF9FB-V2-C4 U7895.43X.21EF9FB-V2-C4 false true 8 ALL B28471747604 4092 false 2018 2019 true 0 ent11 2 U7895.43X.21EF9FB-V2-C4 U7895.43X.21EF9FB-V2-C32 false true 32 ALL B28471747605 2800 false 2801 true 1 ent12 2 true Client U7895.43X.21EF9FB-V63-C3 U7895.43X.21EF9FB-V63-C3 10 false true 3 2 94 c05076079cff0e56 c05076079cff0e57 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C94 U7895.43X.21EF9FB-V2-C94 2 false true 94 vfchost60 10 3 1dU7895.43X.21EF9FB-V2-C94 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V66-C4 U7895.43X.21EF9FB-V66-C4 10 false true 4 2 93 c05076079cff0e68 c05076079cff0e69 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C93 U7895.43X.21EF9FB-V2-C93 2 false true 93 vfchost63 10 4 1dU7895.43X.21EF9FB-V2-C93 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V62-C4 U7895.43X.21EF9FB-V62-C4 10 false true 4 2 92 c05076079cff0e4c c05076079cff0e4d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C92 U7895.43X.21EF9FB-V2-C92 2 false true 92 vfchost59 10 4 1dU7895.43X.21EF9FB-V2-C92 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V52-C4 U7895.43X.21EF9FB-V52-C4 52 false true 4 2 88 c05076079cff0e44 c05076079cff0e45 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C88 U7895.43X.21EF9FB-V2-C88 2 false true 88 vfchost53 52 4 1dU7895.43X.21EF9FB-V2-C88 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V61-C3 U7895.43X.21EF9FB-V61-C3 61 false true 3 2 85 c05076079cff08da c05076079cff08db U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C85 U7895.43X.21EF9FB-V2-C85 2 false true 85 vfchost49 61 3 1dU7895.43X.21EF9FB-V2-C85 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V54-C3 U7895.43X.21EF9FB-V54-C3 54 false true 3 2 75 c05076079cff0922 c05076079cff0923 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C75 U7895.43X.21EF9FB-V2-C75 2 false true 75 vfchost51 54 3 1dU7895.43X.21EF9FB-V2-C75 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V47-C4 U7895.43X.21EF9FB-V47-C4 47 false true 4 2 73 c05076079cff0858 c05076079cff0859 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C73 U7895.43X.21EF9FB-V2-C73 2 false true 73 vfchost45 47 4 1dU7895.43X.21EF9FB-V2-C73 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V43-C3 U7895.43X.21EF9FB-V43-C3 43 false true 3 2 70 c05076079cff0f9a c05076079cff0f9b U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C70 U7895.43X.21EF9FB-V2-C70 2 false true 70 vfchost40 43 3 1dU7895.43X.21EF9FB-V2-C70 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V32-C3 U7895.43X.21EF9FB-V32-C3 32 false true 3 2 68 c05076079cff0f96 c05076079cff0f97 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C68 U7895.43X.21EF9FB-V2-C68 2 false true 68 vfchost29 32 3 1dU7895.43X.21EF9FB-V2-C68 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V42-C4 U7895.43X.21EF9FB-V42-C4 42 false false 4 2 67 c05076079cff0f88 c05076079cff0f89 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C67 U7895.43X.21EF9FB-V2-C67 2 false true 67 vfchost39 42 4 1dU7895.43X.21EF9FB-V2-C67 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V44-C3 U7895.43X.21EF9FB-V44-C3 44 false true 3 2 66 c05076079cff0e0e c05076079cff0e0f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C66 U7895.43X.21EF9FB-V2-C66 2 false true 66 vfchost41 44 3 1dU7895.43X.21EF9FB-V2-C66 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V27-C3 U7895.43X.21EF9FB-V27-C3 27 false true 3 2 65 c05076079cff0e0a c05076079cff0e0b U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C65 U7895.43X.21EF9FB-V2-C65 2 false true 65 vfchost20 27 3 1dU7895.43X.21EF9FB-V2-C65 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V40-C3 U7895.43X.21EF9FB-V40-C3 40 false true 3 2 64 c05076079cff0f76 c05076079cff0f77 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C64 U7895.43X.21EF9FB-V2-C64 2 false true 64 vfchost37 40 3 1dU7895.43X.21EF9FB-V2-C64 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C63 U7895.43X.21EF9FB-V2-C63 2 false true 63 vfchost34 37 3 1dU7895.43X.21EF9FB-V2-C63 Client U7895.43X.21EF9FB-V33-C4 U7895.43X.21EF9FB-V33-C4 33 false true 4 2 62 c05076079cff0f38 c05076079cff0f39 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C62 U7895.43X.21EF9FB-V2-C62 2 false true 62 vfchost30 33 4 1dU7895.43X.21EF9FB-V2-C62 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V41-C4 U7895.43X.21EF9FB-V41-C4 41 false true 4 2 61 c05076079cff0df4 c05076079cff0df5 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C61 U7895.43X.21EF9FB-V2-C61 2 false true 61 vfchost38 41 4 1dU7895.43X.21EF9FB-V2-C61 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V51-C3 U7895.43X.21EF9FB-V51-C3 51 false true 3 2 58 c05076079cff0cf2 c05076079cff0cf3 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C58 U7895.43X.21EF9FB-V2-C58 2 false true 58 vfchost48 51 3 1dU7895.43X.21EF9FB-V2-C58 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V4-C4 U7895.43X.21EF9FB-V4-C4 4 false true 4 2 57 c05076079cff0f74 c05076079cff0f75 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C57 U7895.43X.21EF9FB-V2-C57 2 false true 57 vfchost1 4 4 1dU7895.43X.21EF9FB-V2-C57 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V39-C3 U7895.43X.21EF9FB-V39-C3 39 false false 3 2 56 c05076079cff0f6e c05076079cff0f6f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C56 U7895.43X.21EF9FB-V2-C56 2 false true 56 vfchost36 39 3 1dU7895.43X.21EF9FB-V2-C56 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C55 U7895.43X.21EF9FB-V2-C55 2 false true 55 vfchost35 38 4 1dU7895.43X.21EF9FB-V2-C55 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V36-C3 U7895.43X.21EF9FB-V36-C3 36 false false 3 2 54 c05076079cff0f26 c05076079cff0f27 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C54 U7895.43X.21EF9FB-V2-C54 2 false true 54 vfchost33 36 3 1dU7895.43X.21EF9FB-V2-C54 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V14-C4 U7895.43X.21EF9FB-V14-C4 14 false true 4 2 53 c05076079cff0eb0 c05076079cff0eb1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C53 U7895.43X.21EF9FB-V2-C53 2 false true 53 vfchost21 14 4 1dU7895.43X.21EF9FB-V2-C53 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V85-C3 U7895.43X.21EF9FB-V85-C3 85 false true 3 2 52 c05076079cff0d3e c05076079cff0d3f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C52 U7895.43X.21EF9FB-V2-C52 2 false true 52 vfchost82 85 3 1dU7895.43X.21EF9FB-V2-C52 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V35-C4 U7895.43X.21EF9FB-V35-C4 35 false true 4 2 50 c05076079cff0ddc c05076079cff0ddd U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C50 U7895.43X.21EF9FB-V2-C50 2 false true 50 vfchost31 35 4 1dU7895.43X.21EF9FB-V2-C50 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V46-C4 U7895.43X.21EF9FB-V46-C4 46 false true 4 2 140 c05076079cff0bfc c05076079cff0bfd U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C140 U7895.43X.21EF9FB-V2-C140 2 false true 140 vfchost43 46 4 1dU7895.43X.21EF9FB-V2-C140 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V7-C3 U7895.43X.21EF9FB-V7-C3 7 false true 3 2 45 c05076079cff0f92 c05076079cff0f93 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C45 U7895.43X.21EF9FB-V2-C45 2 false true 45 vfchost5 7 3 1dU7895.43X.21EF9FB-V2-C45 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V29-C3 U7895.43X.21EF9FB-V29-C3 29 false true 3 2 44 c05076079cff0f4e c05076079cff0f4f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C44 U7895.43X.21EF9FB-V2-C44 2 false true 44 vfchost25 29 3 1dU7895.43X.21EF9FB-V2-C44 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V19-C4 U7895.43X.21EF9FB-V19-C4 19 false true 4 2 43 c05076079cff0ed8 c05076079cff0ed9 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C43 U7895.43X.21EF9FB-V2-C43 2 false true 43 vfchost17 19 4 1dU7895.43X.21EF9FB-V2-C43 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V25-C4 U7895.43X.21EF9FB-V25-C4 25 false false 4 2 42 c05076079cff0f44 c05076079cff0f45 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C42 U7895.43X.21EF9FB-V2-C42 2 false true 42 vfchost23 25 4 1dU7895.43X.21EF9FB-V2-C42 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V73-C4 U7895.43X.21EF9FB-V73-C4 73 false true 4 2 135 c05076079cff0d80 c05076079cff0d81 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C135 U7895.43X.21EF9FB-V2-C135 2 false true 135 vfchost70 73 4 1dU7895.43X.21EF9FB-V2-C135 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V20-C4 U7895.43X.21EF9FB-V20-C4 20 false true 4 2 40 c05076079cff0f84 c05076079cff0f85 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C40 U7895.43X.21EF9FB-V2-C40 2 false true 40 vfchost13 20 4 1dU7895.43X.21EF9FB-V2-C40 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V18-C3 U7895.43X.21EF9FB-V18-C3 18 false true 3 2 37 c05076079cff00d8 c05076079cff00d9 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C37 U7895.43X.21EF9FB-V2-C37 2 false true 37 vfchost22 18 3 1dU7895.43X.21EF9FB-V2-C37 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V23-C3 U7895.43X.21EF9FB-V23-C3 23 false false 3 2 36 c05076079cff0f7e c05076079cff0f7f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C36 U7895.43X.21EF9FB-V2-C36 2 false true 36 vfchost15 23 3 1dU7895.43X.21EF9FB-V2-C36 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V75-C4 U7895.43X.21EF9FB-V75-C4 75 false true 4 2 130 c05076079cff0d30 c05076079cff0d31 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C130 U7895.43X.21EF9FB-V2-C130 2 false true 130 vfchost72 75 4 1dU7895.43X.21EF9FB-V2-C130 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V24-C3 U7895.43X.21EF9FB-V24-C3 24 false true 3 2 35 c05076079cff0f32 c05076079cff0f33 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C35 U7895.43X.21EF9FB-V2-C35 2 false true 35 vfchost18 24 3 1dU7895.43X.21EF9FB-V2-C35 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V5-C4 U7895.43X.21EF9FB-V5-C4 5 false true 4 2 34 c05076079cff0f7c c05076079cff0f7d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C34 U7895.43X.21EF9FB-V2-C34 2 false true 34 vfchost2 5 4 1dU7895.43X.21EF9FB-V2-C34 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V22-C3 U7895.43X.21EF9FB-V22-C3 22 false true 3 2 32 c05076079cff0dc6 c05076079cff0dc7 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C32 U7895.43X.21EF9FB-V2-C32 2 false true 32 vfchost19 22 3 1dU7895.43X.21EF9FB-V2-C32 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V21-C3 U7895.43X.21EF9FB-V21-C3 21 false false 3 2 31 c05076079cff0f2e c05076079cff0f2f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C31 U7895.43X.21EF9FB-V2-C31 2 false true 31 vfchost16 21 3 1dU7895.43X.21EF9FB-V2-C31 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V83-C3 U7895.43X.21EF9FB-V83-C3 83 false true 3 2 125 c05076079cff0b82 c05076079cff0b83 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C125 U7895.43X.21EF9FB-V2-C125 2 false true 125 vfchost80 83 3 1dU7895.43X.21EF9FB-V2-C125 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V31-C3 U7895.43X.21EF9FB-V31-C3 31 false true 3 2 30 c05076079cff0ea6 c05076079cff0ea7 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C30 U7895.43X.21EF9FB-V2-C30 2 false true 30 vfchost27 31 3 1dU7895.43X.21EF9FB-V2-C30 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V26-C3 U7895.43X.21EF9FB-V26-C3 26 false true 3 2 29 c05076079cff0f1a c05076079cff0f1b U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C29 U7895.43X.21EF9FB-V2-C29 2 false true 29 vfchost24 26 3 1dU7895.43X.21EF9FB-V2-C29 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V80-C4 U7895.43X.21EF9FB-V80-C4 80 false true 4 2 123 c05076079cff0838 c05076079cff0839 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C123 U7895.43X.21EF9FB-V2-C123 2 false true 123 vfchost77 80 4 1dU7895.43X.21EF9FB-V2-C123 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V34-C3 U7895.43X.21EF9FB-V34-C3 34 false true 3 2 26 c05076079cff0f66 c05076079cff0f67 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C26 U7895.43X.21EF9FB-V2-C26 2 false true 26 vfchost32 34 3 1dU7895.43X.21EF9FB-V2-C26 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V11-C3 U7895.43X.21EF9FB-V11-C3 11 false false 3 2 25 c05076079cff04aa c05076079cff04ab U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C25 U7895.43X.21EF9FB-V2-C25 2 false true 25 vfchost14 11 3 1dU7895.43X.21EF9FB-V2-C25 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V10-C4 U7895.43X.21EF9FB-V10-C4 10 false true 4 2 24 c05076079cff0d90 c05076079cff0d91 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C24 U7895.43X.21EF9FB-V2-C24 2 false true 24 vfchost7 10 4 1dU7895.43X.21EF9FB-V2-C24 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V30-C3 U7895.43X.21EF9FB-V30-C3 30 false true 3 2 22 c05076079cff04fe c05076079cff04ff U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C22 U7895.43X.21EF9FB-V2-C22 2 false true 22 vfchost28 30 3 1dU7895.43X.21EF9FB-V2-C22 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V15-C4 U7895.43X.21EF9FB-V15-C4 15 false false 4 2 20 c05076079cff0fa0 c05076079cff0fa1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C20 U7895.43X.21EF9FB-V2-C20 2 false true 20 vfchost10 15 4 1dU7895.43X.21EF9FB-V2-C20 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V9-C4 U7895.43X.21EF9FB-V9-C4 9 false true 4 2 19 c05076079cff0db8 c05076079cff0db9 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C19 U7895.43X.21EF9FB-V2-C19 2 false true 19 vfchost8 9 4 1dU7895.43X.21EF9FB-V2-C19 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V74-C4 U7895.43X.21EF9FB-V74-C4 74 false true 4 2 113 c05076079cff0e84 c05076079cff0e85 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C113 U7895.43X.21EF9FB-V2-C113 2 false true 113 vfchost71 74 4 1dU7895.43X.21EF9FB-V2-C113 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V17-C4 U7895.43X.21EF9FB-V17-C4 17 false false 4 2 18 c05076079cff0f3c c05076079cff0f3d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C18 U7895.43X.21EF9FB-V2-C18 2 false true 18 vfchost12 17 4 1dU7895.43X.21EF9FB-V2-C18 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V13-C4 U7895.43X.21EF9FB-V13-C4 13 false true 4 2 16 c05076079cff0d7c c05076079cff0d7d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C16 U7895.43X.21EF9FB-V2-C16 2 false true 16 vfchost9 13 4 1dU7895.43X.21EF9FB-V2-C16 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V6-C4 U7895.43X.21EF9FB-V6-C4 6 false true 4 2 14 c05076079cff0f90 c05076079cff0f91 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C14 U7895.43X.21EF9FB-V2-C14 2 false true 14 vfchost4 6 4 1dU7895.43X.21EF9FB-V2-C14 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V68-C4 U7895.43X.21EF9FB-V68-C4 68 false true 4 2 108 c05076079cff0c6c c05076079cff0c6d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C108 U7895.43X.21EF9FB-V2-C108 2 false true 108 vfchost65 68 4 1dU7895.43X.21EF9FB-V2-C108 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V8-C3 U7895.43X.21EF9FB-V8-C3 8 false true 3 2 12 c05076079cff0f56 c05076079cff0f57 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C12 U7895.43X.21EF9FB-V2-C12 2 false true 12 vfchost6 8 3 1dU7895.43X.21EF9FB-V2-C12 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V3-C4 U7895.43X.21EF9FB-V3-C4 3 false true 4 2 11 c05076079cff07bc c05076079cff07bd U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C11 U7895.43X.21EF9FB-V2-C11 2 false true 11 vfchost0 3 4 1dU7895.43X.21EF9FB-V2-C11 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V12-C3 U7895.43X.21EF9FB-V12-C3 12 false true 3 2 9 c05076079cff0f4a c05076079cff0f4b U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C9 U7895.43X.21EF9FB-V2-C9 2 false true 9 vfchost3 12 3 1dU7895.43X.21EF9FB-V2-C9 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V53-C3 U7895.43X.21EF9FB-V53-C3 53 false false 3 2 100 c05076079cff045e c05076079cff045f U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C100 U7895.43X.21EF9FB-V2-C100 2 false true 100 vfchost50 53 3 1dU7895.43X.21EF9FB-V2-C100 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V53-C3 U7895.43X.21EF9FB-V53-C3 100 false false 3 2 100 c05076079cff045e c05076079cff045f U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C100 U7895.43X.21EF9FB-V2-C100 2 false true 100 vfchost50 53 3 1dU7895.43X.21EF9FB-V2-C100 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V42-C2 U7895.43X.21EF9FB-V42-C2 42 false false 2 2 46 U7895.43X.21EF9FB-V2-C46 Server U7895.43X.21EF9FB-V2-C46 U7895.43X.21EF9FB-V2-C46 2 false true 46 vhost19 42 2 U7895.43X.21EF9FB-V42-C2 1eU7895.43X.21EF9FB-V2-C46 Client U7895.43X.21EF9FB-V46-C2 U7895.43X.21EF9FB-V46-C2 46 false true 2 2 139 U7895.43X.21EF9FB-V2-C139 Server U7895.43X.21EF9FB-V2-C139 U7895.43X.21EF9FB-V2-C139 2 false true 139 vhost51 46 2 U7895.43X.21EF9FB-V46-C2 1eU7895.43X.21EF9FB-V2-C139 Client U7895.43X.21EF9FB-V62-C2 U7895.43X.21EF9FB-V62-C2 62 false true 2 2 91 U7895.43X.21EF9FB-V2-C91 Server U7895.43X.21EF9FB-V2-C91 U7895.43X.21EF9FB-V2-C91 2 false true 91 vhost31 62 2 U7895.43X.21EF9FB-V62-C2 1eU7895.43X.21EF9FB-V2-C91 Client U7895.43X.21EF9FB-V19-C2 U7895.43X.21EF9FB-V19-C2 19 false true 2 2 41 U7895.43X.21EF9FB-V2-C41 Server U7895.43X.21EF9FB-V2-C41 U7895.43X.21EF9FB-V2-C41 2 false true 41 vhost16 19 2 U7895.43X.21EF9FB-V19-C2 1eU7895.43X.21EF9FB-V2-C41 Client U7895.43X.21EF9FB-V75-C2 U7895.43X.21EF9FB-V75-C2 75 false true 2 2 87 U7895.43X.21EF9FB-V2-C87 Server U7895.43X.21EF9FB-V2-C87 U7895.43X.21EF9FB-V2-C87 2 false true 87 vhost40 75 2 U7895.43X.21EF9FB-V75-C2 1eU7895.43X.21EF9FB-V2-C87 Client U7895.43X.21EF9FB-V33-C2 U7895.43X.21EF9FB-V33-C2 33 false true 2 2 39 U7895.43X.21EF9FB-V2-C39 Server U7895.43X.21EF9FB-V2-C39 U7895.43X.21EF9FB-V2-C39 2 false true 39 vhost3 33 2 U7895.43X.21EF9FB-V33-C2 1eU7895.43X.21EF9FB-V2-C39 Client U7895.43X.21EF9FB-V14-C3 U7895.43X.21EF9FB-V14-C3 14 false true 3 2 38 U7895.43X.21EF9FB-V2-C38 Server U7895.43X.21EF9FB-V2-C38 U7895.43X.21EF9FB-V2-C38 2 false true 38 vhost10 14 3 U7895.43X.21EF9FB-V14-C3 1eU7895.43X.21EF9FB-V2-C38 Client U7895.43X.21EF9FB-V66-C2 U7895.43X.21EF9FB-V66-C2 66 false true 2 2 81 U7895.43X.21EF9FB-V2-C81 Server U7895.43X.21EF9FB-V2-C81 U7895.43X.21EF9FB-V2-C81 2 false true 81 vhost23 66 2 U7895.43X.21EF9FB-V66-C2 1eU7895.43X.21EF9FB-V2-C81 Client U7895.43X.21EF9FB-V5-C2 U7895.43X.21EF9FB-V5-C2 5 false true 2 2 33 U7895.43X.21EF9FB-V2-C33 Server U7895.43X.21EF9FB-V2-C33 U7895.43X.21EF9FB-V2-C33 2 false true 33 vhost7 5 2 U7895.43X.21EF9FB-V5-C2 1eU7895.43X.21EF9FB-V2-C33 Client U7895.43X.21EF9FB-V52-C3 U7895.43X.21EF9FB-V52-C3 52 false true 3 2 77 U7895.43X.21EF9FB-V2-C77 Server U7895.43X.21EF9FB-V2-C77 U7895.43X.21EF9FB-V2-C77 2 false true 77 vhost26 52 3 U7895.43X.21EF9FB-V52-C3 1eU7895.43X.21EF9FB-V2-C77 Client U7895.43X.21EF9FB-V80-C2 U7895.43X.21EF9FB-V80-C2 80 false true 2 2 122 U7895.43X.21EF9FB-V2-C122 Server U7895.43X.21EF9FB-V2-C122 U7895.43X.21EF9FB-V2-C122 2 false true 122 vhost38 80 2 U7895.43X.21EF9FB-V80-C2 1eU7895.43X.21EF9FB-V2-C122 Client U7895.43X.21EF9FB-V18-C2 U7895.43X.21EF9FB-V18-C2 18 false true 2 2 28 U7895.43X.21EF9FB-V2-C28 Server U7895.43X.21EF9FB-V2-C28 U7895.43X.21EF9FB-V2-C28 2 false true 28 vhost5 18 2 U7895.43X.21EF9FB-V18-C2 1eU7895.43X.21EF9FB-V2-C28 Client U7895.43X.21EF9FB-V25-C2 U7895.43X.21EF9FB-V25-C2 25 false false 2 2 27 U7895.43X.21EF9FB-V2-C27 Server U7895.43X.21EF9FB-V2-C27 U7895.43X.21EF9FB-V2-C27 2 false true 27 vhost11 25 2 U7895.43X.21EF9FB-V25-C2 1eU7895.43X.21EF9FB-V2-C27 Client U7895.43X.21EF9FB-V73-C2 U7895.43X.21EF9FB-V73-C2 73 false true 2 2 71 U7895.43X.21EF9FB-V2-C71 Server U7895.43X.21EF9FB-V2-C71 U7895.43X.21EF9FB-V2-C71 2 false true 71 vhost47 73 2 U7895.43X.21EF9FB-V73-C2 1eU7895.43X.21EF9FB-V2-C71 Client U7895.43X.21EF9FB-V74-C2 U7895.43X.21EF9FB-V74-C2 74 false true 2 2 112 U7895.43X.21EF9FB-V2-C112 Server U7895.43X.21EF9FB-V2-C112 U7895.43X.21EF9FB-V2-C112 2 false true 112 vhost35 74 2 U7895.43X.21EF9FB-V74-C2 1eU7895.43X.21EF9FB-V2-C112 Client U7895.43X.21EF9FB-V17-C2 U7895.43X.21EF9FB-V17-C2 17 false false 2 2 17 U7895.43X.21EF9FB-V2-C17 Server U7895.43X.21EF9FB-V2-C17 U7895.43X.21EF9FB-V2-C17 2 false true 17 vhost4 17 2 U7895.43X.21EF9FB-V17-C2 1eU7895.43X.21EF9FB-V2-C17 Client U7895.43X.21EF9FB-V10-C2 U7895.43X.21EF9FB-V10-C2 10 false true 2 2 15 U7895.43X.21EF9FB-V2-C15 Server U7895.43X.21EF9FB-V2-C15 U7895.43X.21EF9FB-V2-C15 2 false true 15 vhost6 10 2 U7895.43X.21EF9FB-V10-C2 1eU7895.43X.21EF9FB-V2-C15 Client U7895.43X.21EF9FB-V47-C2 U7895.43X.21EF9FB-V47-C2 47 false true 2 2 60 U7895.43X.21EF9FB-V2-C60 Server U7895.43X.21EF9FB-V2-C60 U7895.43X.21EF9FB-V2-C60 2 false true 60 vhost15 47 2 U7895.43X.21EF9FB-V47-C2 1eU7895.43X.21EF9FB-V2-C60 Client U7895.43X.21EF9FB-V6-C3 U7895.43X.21EF9FB-V6-C3 6 false true 3 2 13 U7895.43X.21EF9FB-V2-C13 Server U7895.43X.21EF9FB-V2-C13 U7895.43X.21EF9FB-V2-C13 2 false true 13 vhost20 6 3 U7895.43X.21EF9FB-V6-C3 1eU7895.43X.21EF9FB-V2-C13 Client U7895.43X.21EF9FB-V41-C3 U7895.43X.21EF9FB-V41-C3 41 false true 3 2 59 U7895.43X.21EF9FB-V2-C59 Server U7895.43X.21EF9FB-V2-C59 U7895.43X.21EF9FB-V2-C59 2 false true 59 vhost17 41 3 U7895.43X.21EF9FB-V41-C3 1eU7895.43X.21EF9FB-V2-C59 Client U7895.43X.21EF9FB-V68-C2 U7895.43X.21EF9FB-V68-C2 68 false true 2 2 105 U7895.43X.21EF9FB-V2-C105 Server U7895.43X.21EF9FB-V2-C105 U7895.43X.21EF9FB-V2-C105 2 false true 105 vhost27 68 2 U7895.43X.21EF9FB-V68-C2 1eU7895.43X.21EF9FB-V2-C105 Client U7895.43X.21EF9FB-V13-C2 U7895.43X.21EF9FB-V13-C2 13 false true 2 2 10 U7895.43X.21EF9FB-V2-C10 Server U7895.43X.21EF9FB-V2-C10 U7895.43X.21EF9FB-V2-C10 2 false true 10 vhost1 13 2 U7895.43X.21EF9FB-V13-C2 1eU7895.43X.21EF9FB-V2-C10 Client U7895.43X.21EF9FB-V9-C2 U7895.43X.21EF9FB-V9-C2 9 false true 2 2 8 U7895.43X.21EF9FB-V2-C8 Server U7895.43X.21EF9FB-V2-C8 U7895.43X.21EF9FB-V2-C8 2 false true 8 vhost0 9 2 U7895.43X.21EF9FB-V9-C2 1eU7895.43X.21EF9FB-V2-C8 Client U7895.43X.21EF9FB-V20-C2 U7895.43X.21EF9FB-V20-C2 20 false true 2 2 7 U7895.43X.21EF9FB-V2-C7 Server U7895.43X.21EF9FB-V2-C7 U7895.43X.21EF9FB-V2-C7 2 false true 7 vhost9 20 2 U7895.43X.21EF9FB-V20-C2 1eU7895.43X.21EF9FB-V2-C7 Client U7895.43X.21EF9FB-V4-C2 U7895.43X.21EF9FB-V4-C2 4 false true 2 2 6 U7895.43X.21EF9FB-V2-C6 Server U7895.43X.21EF9FB-V2-C6 U7895.43X.21EF9FB-V2-C6 2 false true 6 vhost18 4 2 U7895.43X.21EF9FB-V4-C2 1eU7895.43X.21EF9FB-V2-C6 Client U7895.43X.21EF9FB-V53-C2 U7895.43X.21EF9FB-V53-C2 53 false false 2 2 99 U7895.43X.21EF9FB-V2-C99 Server U7895.43X.21EF9FB-V2-C99 U7895.43X.21EF9FB-V2-C99 2 false true 99 vhost42 53 2 U7895.43X.21EF9FB-V53-C2 1eU7895.43X.21EF9FB-V2-C99 Client U7895.43X.21EF9FB-V3-C2 U7895.43X.21EF9FB-V3-C2 3 false true 2 2 5 U7895.43X.21EF9FB-V2-C5 Server U7895.43X.21EF9FB-V2-C5 U7895.43X.21EF9FB-V2-C5 2 false true 5 vhost2 3 2 U7895.43X.21EF9FB-V3-C2 1eU7895.43X.21EF9FB-V2-C5 Server U7895.43X.21EF9FB-V2-C51 U7895.43X.21EF9FB-V2-C51 2 false true 51 vhost13 38 2 1eU7895.43X.21EF9FB-V2-C51 Client U7895.43X.21EF9FB-V35-C2 U7895.43X.21EF9FB-V35-C2 35 false true 2 2 49 U7895.43X.21EF9FB-V2-C49 Server U7895.43X.21EF9FB-V2-C49 U7895.43X.21EF9FB-V2-C49 2 false true 49 vhost14 35 2 U7895.43X.21EF9FB-V35-C2 1eU7895.43X.21EF9FB-V2-C49 Client U7895.43X.21EF9FB-V37-C2 U7895.43X.21EF9FB-V37-C2 37 false false 2 2 21 U7895.43X.21EF9FB-V2-C21 Server U7895.43X.21EF9FB-V2-C21 U7895.43X.21EF9FB-V2-C21 2 false true 21 vhost8 37 2 U7895.43X.21EF9FB-V37-C2 1eU7895.43X.21EF9FB-V2-C21 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent3 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C36-L2-T2 U78AF.001.WZS04LA-P1-C36-L2-T2 13U78AF.001.WZS04LA-P1-C36-L2-T2 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent1 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C36-L1-T2 U78AF.001.WZS04LA-P1-C36-L1-T2 13U78AF.001.WZS04LA-P1-C36-L1-T2 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent2 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C36-L2-T1 U78AF.001.WZS04LA-P1-C36-L2-T1 13U78AF.001.WZS04LA-P1-C36-L2-T1 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent3 U78AF.001.WZS04LA-P1-C36-L2-T2 en3 Inactive 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent1 U78AF.001.WZS04LA-P1-C36-L1-T2 en1 Inactive 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent2 U78AF.001.WZS04LA-P1-C36-L2-T1 en2 9.1.2.5 255.255.255.0 Active 7DBBE705-E4C4-4458-8223-3EBE07015CA9 VirtualIOServer 2015-03-26T08:57:58.767Z IBM Power Systems Management Console 608448218 7DBBE705-E4C4-4458-8223-3EBE07015CA9 1427360278204 false 191 0 POWER7 On false true false false false false false normal 21EF9FB1 VIOS 2.2.3.1 true true true true true 1 400 false Ethernet controller U78AF.001.WZS04LA 1808 512 1808 59187 6562 3 6562 4319 false false false false false false false false false false 553714177 Ethernet controller U78AF.001.WZS04LA-P1-C34-L1 U78AF.001.WZS04LA-P1-C34-L1 C34 1808 553714177 U78AF.001.WZS04LA-P1-C34-L1 C34 false Ethernet controller U78AF.001.WZS04LA 1808 512 1808 59187 6562 3 6562 4319 false false false false false false false false false false 553714192 Ethernet controller U78AF.001.WZS04LA-P1-C34-L2 U78AF.001.WZS04LA-P1-C34-L2 C34 1808 553714192 U78AF.001.WZS04LA-P1-C34-L2 C34 false FC5052 2-port 16Gb FC Adapter U78AF.001.WZS04LA 57856 3076 57856 57858 4319 16 4319 4319 false false false false false false false false false false 553714179 FC5052 2-port 16Gb FC Adapter U78AF.001.WZS04LA-P1-C35-L1 U78AF.001.WZS04LA-P1-C35-L1 C35 U78AF.001.WZS04LA-P1-C35-L1-T2 MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C35-L1-T1-W50050768021121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwMw== false 102400 hdisk0 active 332136005076D02810187E00000000000000304214503IBMfcp true fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 U78AF.001.WZS04LA-P1-C35-L1-T1 MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C35-L1-T1-W50050768021121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwMw== false 102400 hdisk0 active 332136005076D02810187E00000000000000304214503IBMfcp true fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 553714179 U78AF.001.WZS04LA-P1-C35-L1 C35 400 false false 0 24576 0.0 7 false 24576 24576 false 0 0.0 7 0 0 24576 24576 0 24576 false true false false 0 24576 24576 false nimbus-ch03-p2-vios1 false 1 2 1 2 1 2 0 capped false capped false 2 1 1 1 0 2 2 1 running Virtual IO Server 7DBBE705-E4C4-4458-8223-3EBE07015CA9 default 0 0 active 9.1.2.4 196281927536384 false true true vopt_4556d33e7f404e72b5fccc126e2038d9 0evopt_4556d33e7f404e72b5fccc126e2038d9 rw 0.000000 vopt_4bd422fb29d24b369cd93d674606d9ee 0evopt_4bd422fb29d24b369cd93d674606d9ee rw 0.000000 vopt_56c31f83256f49b8ab71810dd3bcf115 0evopt_56c31f83256f49b8ab71810dd3bcf115 rw 0.000000 vopt_8228f1bb5b6941ce8cf7947bdbbd4123 0evopt_8228f1bb5b6941ce8cf7947bdbbd4123 rw 0.000000 VMLibrary 7 true MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C35-L1-T1-W50050768021121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwMw== false 102400 hdisk0 active 332136005076D02810187E00000000000000304214503IBMfcp true ent5 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent0 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C34-L1-T1 U78AF.001.WZS04LA-P1-C34-L1-T1 13U78AF.001.WZS04LA-P1-C34-L1-T1 auto ent6 false 1 disabled 8192 true U7895.43X.21EF9FB-V1-C2 U7895.43X.21EF9FB-V1-C2 true true 2 ALL B28472910F02 1 false false 0 ent4 1 U7895.43X.21EF9FB-V1-C4 U7895.43X.21EF9FB-V1-C4 false true 4 ALL B28472910F04 4093 false 2134 2173 true 0 ent8 1 en6 Inactive 10beee7d111da8da1f true U7895.43X.21EF9FB-V1-C2 U7895.43X.21EF9FB-V1-C2 true true 2 ALL B28472910F02 1 false false 0 ent4 1 U7895.43X.21EF9FB-V1-C4 U7895.43X.21EF9FB-V1-C4 false true 4 ALL B28472910F04 4093 false 2134 2173 true 0 ent8 1 true Client U7895.43X.21EF9FB-V63-C4 U7895.43X.21EF9FB-V63-C4 63 false true 4 1 94 c05076079cff0e58 c05076079cff0e59 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C94 U7895.43X.21EF9FB-V1-C94 1 false true 94 vfchost58 63 4 1dU7895.43X.21EF9FB-V1-C94 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V62-C3 U7895.43X.21EF9FB-V62-C3 62 false true 3 1 92 c05076079cff0e4a c05076079cff0e4b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C92 U7895.43X.21EF9FB-V1-C92 1 false true 92 vfchost57 62 3 1dU7895.43X.21EF9FB-V1-C92 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V61-C4 U7895.43X.21EF9FB-V61-C4 61 false true 4 1 91 c05076079cff08dc c05076079cff08dd U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C91 U7895.43X.21EF9FB-V1-C91 1 false true 91 vfchost49 61 4 1dU7895.43X.21EF9FB-V1-C91 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V68-C3 U7895.43X.21EF9FB-V68-C3 68 false true 3 1 85 c05076079cff0c6a c05076079cff0c6b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C85 U7895.43X.21EF9FB-V1-C85 1 false true 85 vfchost63 68 3 1dU7895.43X.21EF9FB-V1-C85 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V52-C2 U7895.43X.21EF9FB-V52-C2 52 false true 2 1 83 c05076079cff0e42 c05076079cff0e43 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C83 U7895.43X.21EF9FB-V1-C83 1 false true 83 vfchost52 52 2 1dU7895.43X.21EF9FB-V1-C83 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V31-C4 U7895.43X.21EF9FB-V31-C4 31 false true 4 1 81 c05076079cff0ea8 c05076079cff0ea9 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C81 U7895.43X.21EF9FB-V1-C81 1 false true 81 vfchost27 31 4 1dU7895.43X.21EF9FB-V1-C81 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V44-C4 U7895.43X.21EF9FB-V44-C4 44 false true 4 1 80 c05076079cff0e10 c05076079cff0e11 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C80 U7895.43X.21EF9FB-V1-C80 1 false true 80 vfchost41 44 4 1dU7895.43X.21EF9FB-V1-C80 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V47-C3 U7895.43X.21EF9FB-V47-C3 47 false true 3 1 77 c05076079cff0856 c05076079cff0857 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C77 U7895.43X.21EF9FB-V1-C77 1 false true 77 vfchost44 47 3 1dU7895.43X.21EF9FB-V1-C77 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V41-C2 U7895.43X.21EF9FB-V41-C2 41 false true 2 1 74 c05076079cff0df2 c05076079cff0df3 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C74 U7895.43X.21EF9FB-V1-C74 1 false true 74 vfchost38 41 2 1dU7895.43X.21EF9FB-V1-C74 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V51-C4 U7895.43X.21EF9FB-V51-C4 51 false true 4 1 73 c05076079cff0cf4 c05076079cff0cf5 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C73 U7895.43X.21EF9FB-V1-C73 1 false true 73 vfchost48 51 4 1dU7895.43X.21EF9FB-V1-C73 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V46-C3 U7895.43X.21EF9FB-V46-C3 46 false true 3 1 72 c05076079cff0bfa c05076079cff0bfb U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C72 U7895.43X.21EF9FB-V1-C72 1 false true 72 vfchost43 46 3 1dU7895.43X.21EF9FB-V1-C72 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V43-C4 U7895.43X.21EF9FB-V43-C4 43 false true 4 1 67 c05076079cff0f9c c05076079cff0f9d U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C67 U7895.43X.21EF9FB-V1-C67 1 false true 67 vfchost40 43 4 1dU7895.43X.21EF9FB-V1-C67 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V42-C3 U7895.43X.21EF9FB-V42-C3 42 false false 3 1 66 c05076079cff0f86 c05076079cff0f87 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C66 U7895.43X.21EF9FB-V1-C66 1 false true 66 vfchost39 42 3 1dU7895.43X.21EF9FB-V1-C66 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V23-C4 U7895.43X.21EF9FB-V23-C4 23 false false 4 1 65 c05076079cff0f80 c05076079cff0f81 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C65 U7895.43X.21EF9FB-V1-C65 1 false true 65 vfchost17 23 4 1dU7895.43X.21EF9FB-V1-C65 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V40-C4 U7895.43X.21EF9FB-V40-C4 40 false true 4 1 63 c05076079cff0f78 c05076079cff0f79 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C63 U7895.43X.21EF9FB-V1-C63 1 false true 63 vfchost37 40 4 1dU7895.43X.21EF9FB-V1-C63 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V30-C4 U7895.43X.21EF9FB-V30-C4 30 false true 4 1 62 c05076079cff0500 c05076079cff0501 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C62 U7895.43X.21EF9FB-V1-C62 1 false true 62 vfchost24 30 4 1dU7895.43X.21EF9FB-V1-C62 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V39-C4 U7895.43X.21EF9FB-V39-C4 39 false false 4 1 61 c05076079cff0f70 c05076079cff0f71 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C61 U7895.43X.21EF9FB-V1-C61 1 false true 61 vfchost36 39 4 1dU7895.43X.21EF9FB-V1-C61 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C59 U7895.43X.21EF9FB-V1-C59 1 false true 59 vfchost35 38 3 1dU7895.43X.21EF9FB-V1-C59 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V34-C4 U7895.43X.21EF9FB-V34-C4 34 false true 4 1 58 c05076079cff0f68 c05076079cff0f69 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C58 U7895.43X.21EF9FB-V1-C58 1 false true 58 vfchost32 34 4 1dU7895.43X.21EF9FB-V1-C58 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V8-C4 U7895.43X.21EF9FB-V8-C4 8 false true 4 1 57 c05076079cff0f58 c05076079cff0f59 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C57 U7895.43X.21EF9FB-V1-C57 1 false true 57 vfchost6 8 4 1dU7895.43X.21EF9FB-V1-C57 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V29-C4 U7895.43X.21EF9FB-V29-C4 29 false true 4 1 56 c05076079cff0f50 c05076079cff0f51 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C56 U7895.43X.21EF9FB-V1-C56 1 false true 56 vfchost28 29 4 1dU7895.43X.21EF9FB-V1-C56 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V73-C3 U7895.43X.21EF9FB-V73-C3 73 false true 3 1 55 c05076079cff0d7e c05076079cff0d7f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C55 U7895.43X.21EF9FB-V1-C55 1 false true 55 vfchost68 73 3 1dU7895.43X.21EF9FB-V1-C55 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V35-C3 U7895.43X.21EF9FB-V35-C3 35 false true 3 1 49 c05076079cff0dda c05076079cff0ddb U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C49 U7895.43X.21EF9FB-V1-C49 1 false true 49 vfchost31 35 3 1dU7895.43X.21EF9FB-V1-C49 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V33-C3 U7895.43X.21EF9FB-V33-C3 33 false true 3 1 48 c05076079cff0f36 c05076079cff0f37 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C48 U7895.43X.21EF9FB-V1-C48 1 false true 48 vfchost30 33 3 1dU7895.43X.21EF9FB-V1-C48 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V36-C4 U7895.43X.21EF9FB-V36-C4 36 false false 4 1 47 c05076079cff0f28 c05076079cff0f29 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C47 U7895.43X.21EF9FB-V1-C47 1 false true 47 vfchost33 36 4 1dU7895.43X.21EF9FB-V1-C47 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V11-C4 U7895.43X.21EF9FB-V11-C4 11 false false 4 1 46 c05076079cff04ac c05076079cff04ad U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C46 U7895.43X.21EF9FB-V1-C46 1 false true 46 vfchost14 11 4 1dU7895.43X.21EF9FB-V1-C46 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V12-C4 U7895.43X.21EF9FB-V12-C4 12 false true 4 1 45 c05076079cff0f4c c05076079cff0f4d U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C45 U7895.43X.21EF9FB-V1-C45 1 false true 45 vfchost3 12 4 1dU7895.43X.21EF9FB-V1-C45 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V26-C4 U7895.43X.21EF9FB-V26-C4 26 false true 4 1 42 c05076079cff0f1c c05076079cff0f1d U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C42 U7895.43X.21EF9FB-V1-C42 1 false true 42 vfchost25 26 4 1dU7895.43X.21EF9FB-V1-C42 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V32-C4 U7895.43X.21EF9FB-V32-C4 32 false true 4 1 41 c05076079cff0f98 c05076079cff0f99 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C41 U7895.43X.21EF9FB-V1-C41 1 false true 41 vfchost29 32 4 1dU7895.43X.21EF9FB-V1-C41 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V24-C4 U7895.43X.21EF9FB-V24-C4 24 false true 4 1 39 c05076079cff0f34 c05076079cff0f35 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C39 U7895.43X.21EF9FB-V1-C39 1 false true 39 vfchost18 24 4 1dU7895.43X.21EF9FB-V1-C39 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V22-C4 U7895.43X.21EF9FB-V22-C4 22 false true 4 1 36 c05076079cff0dc8 c05076079cff0dc9 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C36 U7895.43X.21EF9FB-V1-C36 1 false true 36 vfchost19 22 4 1dU7895.43X.21EF9FB-V1-C36 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V27-C4 U7895.43X.21EF9FB-V27-C4 27 false true 4 1 35 c05076079cff0e0c c05076079cff0e0d U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C35 U7895.43X.21EF9FB-V1-C35 1 false true 35 vfchost23 27 4 1dU7895.43X.21EF9FB-V1-C35 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V25-C3 U7895.43X.21EF9FB-V25-C3 25 false false 3 1 33 c05076079cff0f42 c05076079cff0f43 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C33 U7895.43X.21EF9FB-V1-C33 1 false true 33 vfchost20 25 3 1dU7895.43X.21EF9FB-V1-C33 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V18-C4 U7895.43X.21EF9FB-V18-C4 18 false true 4 1 32 c05076079cff00da c05076079cff00db U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C32 U7895.43X.21EF9FB-V1-C32 1 false true 32 vfchost22 18 4 1dU7895.43X.21EF9FB-V1-C32 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V17-C3 U7895.43X.21EF9FB-V17-C3 17 false false 3 1 31 c05076079cff0f3a c05076079cff0f3b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C31 U7895.43X.21EF9FB-V1-C31 1 false true 31 vfchost12 17 3 1dU7895.43X.21EF9FB-V1-C31 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V20-C3 U7895.43X.21EF9FB-V20-C3 20 false true 3 1 30 c05076079cff0f82 c05076079cff0f83 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C30 U7895.43X.21EF9FB-V1-C30 1 false true 30 vfchost13 20 3 1dU7895.43X.21EF9FB-V1-C30 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V14-C2 U7895.43X.21EF9FB-V14-C2 14 false true 2 1 29 c05076079cff0eae c05076079cff0eaf U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C29 U7895.43X.21EF9FB-V1-C29 1 false true 29 vfchost21 14 2 1dU7895.43X.21EF9FB-V1-C29 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V10-C3 U7895.43X.21EF9FB-V10-C3 10 false true 3 1 27 c05076079cff0d8e c05076079cff0d8f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C27 U7895.43X.21EF9FB-V1-C27 1 false true 27 vfchost7 10 3 1dU7895.43X.21EF9FB-V1-C27 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V21-C4 U7895.43X.21EF9FB-V21-C4 21 false false 4 1 25 c05076079cff0f30 c05076079cff0f31 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C25 U7895.43X.21EF9FB-V1-C25 1 false true 25 vfchost15 21 4 1dU7895.43X.21EF9FB-V1-C25 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V85-C4 U7895.43X.21EF9FB-V85-C4 85 false true 4 1 119 c05076079cff0d40 c05076079cff0d41 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C119 U7895.43X.21EF9FB-V1-C119 1 false true 119 vfchost77 85 4 1dU7895.43X.21EF9FB-V1-C119 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V80-C3 U7895.43X.21EF9FB-V80-C3 80 false true 3 1 118 c05076079cff0836 c05076079cff0837 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C118 U7895.43X.21EF9FB-V1-C118 1 false true 118 vfchost75 80 3 1dU7895.43X.21EF9FB-V1-C118 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V19-C3 U7895.43X.21EF9FB-V19-C3 19 false true 3 1 23 c05076079cff0ed6 c05076079cff0ed7 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C23 U7895.43X.21EF9FB-V1-C23 1 false true 23 vfchost16 19 3 1dU7895.43X.21EF9FB-V1-C23 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V83-C4 U7895.43X.21EF9FB-V83-C4 83 false true 4 1 116 c05076079cff0b84 c05076079cff0b85 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C116 U7895.43X.21EF9FB-V1-C116 1 false true 116 vfchost78 83 4 1dU7895.43X.21EF9FB-V1-C116 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V15-C3 U7895.43X.21EF9FB-V15-C3 15 false false 3 1 17 c05076079cff0f9e c05076079cff0f9f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C17 U7895.43X.21EF9FB-V1-C17 1 false true 17 vfchost10 15 3 1dU7895.43X.21EF9FB-V1-C17 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V9-C3 U7895.43X.21EF9FB-V9-C3 9 false true 3 1 16 c05076079cff0db6 c05076079cff0db7 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C16 U7895.43X.21EF9FB-V1-C16 1 false true 16 vfchost8 9 3 1dU7895.43X.21EF9FB-V1-C16 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V13-C3 U7895.43X.21EF9FB-V13-C3 13 false true 3 1 15 c05076079cff0d7a c05076079cff0d7b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C15 U7895.43X.21EF9FB-V1-C15 1 false true 15 vfchost9 13 3 1dU7895.43X.21EF9FB-V1-C15 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V74-C3 U7895.43X.21EF9FB-V74-C3 74 false true 3 1 109 c05076079cff0e82 c05076079cff0e83 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C109 U7895.43X.21EF9FB-V1-C109 1 false true 109 vfchost69 74 3 1dU7895.43X.21EF9FB-V1-C109 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V54-C4 U7895.43X.21EF9FB-V54-C4 54 false true 4 1 107 c05076079cff0924 c05076079cff0925 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C107 U7895.43X.21EF9FB-V1-C107 1 false true 107 vfchost50 54 4 1dU7895.43X.21EF9FB-V1-C107 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V7-C4 U7895.43X.21EF9FB-V7-C4 7 false true 4 1 12 c05076079cff0f94 c05076079cff0f95 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C12 U7895.43X.21EF9FB-V1-C12 1 false true 12 vfchost5 7 4 1dU7895.43X.21EF9FB-V1-C12 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V6-C2 U7895.43X.21EF9FB-V6-C2 6 false true 2 1 9 c05076079cff0f8e c05076079cff0f8f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C9 U7895.43X.21EF9FB-V1-C9 1 false true 9 vfchost4 6 2 1dU7895.43X.21EF9FB-V1-C9 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V75-C3 U7895.43X.21EF9FB-V75-C3 75 false true 3 1 102 c05076079cff0d2e c05076079cff0d2f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C102 U7895.43X.21EF9FB-V1-C102 1 false true 102 vfchost70 75 3 1dU7895.43X.21EF9FB-V1-C102 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V5-C3 U7895.43X.21EF9FB-V5-C3 5 false true 3 1 7 c05076079cff0f7a c05076079cff0f7b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C7 U7895.43X.21EF9FB-V1-C7 1 false true 7 vfchost2 5 3 1dU7895.43X.21EF9FB-V1-C7 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V4-C3 U7895.43X.21EF9FB-V4-C3 4 false true 3 1 6 c05076079cff0f72 c05076079cff0f73 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C6 U7895.43X.21EF9FB-V1-C6 1 false true 6 vfchost1 4 3 1dU7895.43X.21EF9FB-V1-C6 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V66-C3 U7895.43X.21EF9FB-V66-C3 66 false true 3 1 100 c05076079cff0e66 c05076079cff0e67 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C100 U7895.43X.21EF9FB-V1-C100 1 false true 100 vfchost61 66 3 1dU7895.43X.21EF9FB-V1-C100 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V3-C3 U7895.43X.21EF9FB-V3-C3 3 false true 3 1 5 c05076079cff07ba c05076079cff07bb U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C5 U7895.43X.21EF9FB-V1-C5 1 false true 5 vfchost0 3 3 1dU7895.43X.21EF9FB-V1-C5 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C5 U7895.43X.21EF9FB-V1-C5 1 false true 5 vfchost0 3 3 1dU7895.43X.21EF9FB-V1-C5 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V43-C2 U7895.43X.21EF9FB-V43-C2 43 false true 2 1 44 U7895.43X.21EF9FB-V1-C44 Server U7895.43X.21EF9FB-V1-C44 U7895.43X.21EF9FB-V1-C44 1 false true 44 vhost8 43 2 U7895.43X.21EF9FB-V43-C2 1eU7895.43X.21EF9FB-V1-C44 Client U7895.43X.21EF9FB-V36-C2 U7895.43X.21EF9FB-V36-C2 36 false false 2 1 43 U7895.43X.21EF9FB-V1-C43 Server U7895.43X.21EF9FB-V1-C43 U7895.43X.21EF9FB-V1-C43 1 false true 43 vhost0 36 2 U7895.43X.21EF9FB-V36-C2 1eU7895.43X.21EF9FB-V1-C43 Client U7895.43X.21EF9FB-V54-C2 U7895.43X.21EF9FB-V54-C2 54 false true 2 1 89 U7895.43X.21EF9FB-V1-C89 Server U7895.43X.21EF9FB-V1-C89 U7895.43X.21EF9FB-V1-C89 1 false true 89 vhost37 54 2 U7895.43X.21EF9FB-V54-C2 1eU7895.43X.21EF9FB-V1-C89 Client U7895.43X.21EF9FB-V30-C2 U7895.43X.21EF9FB-V30-C2 30 false true 2 1 40 U7895.43X.21EF9FB-V1-C40 Server U7895.43X.21EF9FB-V1-C40 U7895.43X.21EF9FB-V1-C40 1 false true 40 vhost26 30 2 U7895.43X.21EF9FB-V30-C2 1eU7895.43X.21EF9FB-V1-C40 Client U7895.43X.21EF9FB-V63-C2 U7895.43X.21EF9FB-V63-C2 63 false true 2 1 86 U7895.43X.21EF9FB-V1-C86 Server U7895.43X.21EF9FB-V1-C86 U7895.43X.21EF9FB-V1-C86 1 false true 86 vhost9 63 2 U7895.43X.21EF9FB-V63-C2 1eU7895.43X.21EF9FB-V1-C86 Client U7895.43X.21EF9FB-V44-C2 U7895.43X.21EF9FB-V44-C2 44 false true 2 1 38 U7895.43X.21EF9FB-V1-C38 Server U7895.43X.21EF9FB-V1-C38 U7895.43X.21EF9FB-V1-C38 1 false true 38 vhost13 44 2 U7895.43X.21EF9FB-V44-C2 1eU7895.43X.21EF9FB-V1-C38 Client U7895.43X.21EF9FB-V26-C2 U7895.43X.21EF9FB-V26-C2 26 false true 2 1 37 U7895.43X.21EF9FB-V1-C37 Server U7895.43X.21EF9FB-V1-C37 U7895.43X.21EF9FB-V1-C37 1 false true 37 vhost6 26 2 U7895.43X.21EF9FB-V26-C2 1eU7895.43X.21EF9FB-V1-C37 Client U7895.43X.21EF9FB-V22-C2 U7895.43X.21EF9FB-V22-C2 22 false true 2 1 34 U7895.43X.21EF9FB-V1-C34 Server U7895.43X.21EF9FB-V1-C34 U7895.43X.21EF9FB-V1-C34 1 false true 34 vhost10 22 2 U7895.43X.21EF9FB-V22-C2 1eU7895.43X.21EF9FB-V1-C34 Client U7895.43X.21EF9FB-V61-C2 U7895.43X.21EF9FB-V61-C2 61 false true 2 1 78 U7895.43X.21EF9FB-V1-C78 Server U7895.43X.21EF9FB-V1-C78 U7895.43X.21EF9FB-V1-C78 1 false true 78 vhost29 61 2 U7895.43X.21EF9FB-V61-C2 1eU7895.43X.21EF9FB-V1-C78 Client U7895.43X.21EF9FB-V24-C2 U7895.43X.21EF9FB-V24-C2 24 false true 2 1 28 U7895.43X.21EF9FB-V1-C28 Server U7895.43X.21EF9FB-V1-C28 U7895.43X.21EF9FB-V1-C28 1 false true 28 vhost7 24 2 U7895.43X.21EF9FB-V24-C2 1eU7895.43X.21EF9FB-V1-C28 Client U7895.43X.21EF9FB-V51-C2 U7895.43X.21EF9FB-V51-C2 51 false true 2 1 71 U7895.43X.21EF9FB-V1-C71 Server U7895.43X.21EF9FB-V1-C71 U7895.43X.21EF9FB-V1-C71 1 false true 71 vhost18 51 2 U7895.43X.21EF9FB-V51-C2 1eU7895.43X.21EF9FB-V1-C71 Client U7895.43X.21EF9FB-V8-C2 U7895.43X.21EF9FB-V8-C2 8 false true 2 1 24 U7895.43X.21EF9FB-V1-C24 Server U7895.43X.21EF9FB-V1-C24 U7895.43X.21EF9FB-V1-C24 1 false true 24 vhost17 8 2 U7895.43X.21EF9FB-V8-C2 1eU7895.43X.21EF9FB-V1-C24 Client U7895.43X.21EF9FB-V21-C2 U7895.43X.21EF9FB-V21-C2 21 false false 2 1 22 U7895.43X.21EF9FB-V1-C22 Server U7895.43X.21EF9FB-V1-C22 U7895.43X.21EF9FB-V1-C22 1 false true 22 vhost5 21 2 U7895.43X.21EF9FB-V21-C2 1eU7895.43X.21EF9FB-V1-C22 Client U7895.43X.21EF9FB-V83-C2 U7895.43X.21EF9FB-V83-C2 83 false true 2 1 115 U7895.43X.21EF9FB-V1-C115 Server U7895.43X.21EF9FB-V1-C115 U7895.43X.21EF9FB-V1-C115 1 false true 115 vhost33 83 2 U7895.43X.21EF9FB-V83-C2 1eU7895.43X.21EF9FB-V1-C115 Client U7895.43X.21EF9FB-V11-C2 U7895.43X.21EF9FB-V11-C2 11 false false 2 1 21 U7895.43X.21EF9FB-V1-C21 Server U7895.43X.21EF9FB-V1-C21 U7895.43X.21EF9FB-V1-C21 1 false true 21 vhost12 11 2 U7895.43X.21EF9FB-V11-C2 1eU7895.43X.21EF9FB-V1-C21 Client U7895.43X.21EF9FB-V27-C2 U7895.43X.21EF9FB-V27-C2 27 false true 2 1 20 U7895.43X.21EF9FB-V1-C20 Server U7895.43X.21EF9FB-V1-C20 U7895.43X.21EF9FB-V1-C20 1 false true 20 vhost4 27 2 U7895.43X.21EF9FB-V27-C2 1eU7895.43X.21EF9FB-V1-C20 Client U7895.43X.21EF9FB-V16-C2 U7895.43X.21EF9FB-V16-C2 16 false false 2 1 19 U7895.43X.21EF9FB-V1-C19 Server U7895.43X.21EF9FB-V1-C19 U7895.43X.21EF9FB-V1-C19 1 false true 19 vhost22 16 2 U7895.43X.21EF9FB-V16-C2 1eU7895.43X.21EF9FB-V1-C19 Client U7895.43X.21EF9FB-V12-C2 U7895.43X.21EF9FB-V12-C2 12 false true 2 1 18 U7895.43X.21EF9FB-V1-C18 Server U7895.43X.21EF9FB-V1-C18 U7895.43X.21EF9FB-V1-C18 1 false true 18 vhost15 12 2 U7895.43X.21EF9FB-V12-C2 1eU7895.43X.21EF9FB-V1-C18 Client U7895.43X.21EF9FB-V85-C2 U7895.43X.21EF9FB-V85-C2 85 false true 2 1 111 U7895.43X.21EF9FB-V1-C111 Server U7895.43X.21EF9FB-V1-C111 U7895.43X.21EF9FB-V1-C111 1 false true 111 vhost30 85 2 U7895.43X.21EF9FB-V85-C2 1eU7895.43X.21EF9FB-V1-C111 Client U7895.43X.21EF9FB-V15-C2 U7895.43X.21EF9FB-V15-C2 15 false false 2 1 14 U7895.43X.21EF9FB-V1-C14 Server U7895.43X.21EF9FB-V1-C14 U7895.43X.21EF9FB-V1-C14 1 false true 14 vhost11 15 2 U7895.43X.21EF9FB-V15-C2 1eU7895.43X.21EF9FB-V1-C14 Client U7895.43X.21EF9FB-V39-C2 U7895.43X.21EF9FB-V39-C2 39 false false 2 1 60 U7895.43X.21EF9FB-V1-C60 Server U7895.43X.21EF9FB-V1-C60 U7895.43X.21EF9FB-V1-C60 1 false true 60 vhost20 39 2 U7895.43X.21EF9FB-V39-C2 1eU7895.43X.21EF9FB-V1-C60 Client U7895.43X.21EF9FB-V32-C2 U7895.43X.21EF9FB-V32-C2 32 false true 2 1 13 U7895.43X.21EF9FB-V1-C13 Server U7895.43X.21EF9FB-V1-C13 U7895.43X.21EF9FB-V1-C13 1 false true 13 vhost2 32 2 U7895.43X.21EF9FB-V32-C2 1eU7895.43X.21EF9FB-V1-C13 Client U7895.43X.21EF9FB-V40-C2 U7895.43X.21EF9FB-V40-C2 40 false true 2 1 11 U7895.43X.21EF9FB-V1-C11 Server U7895.43X.21EF9FB-V1-C11 U7895.43X.21EF9FB-V1-C11 1 false true 11 vhost3 40 2 U7895.43X.21EF9FB-V40-C2 1eU7895.43X.21EF9FB-V1-C11 Client U7895.43X.21EF9FB-V7-C2 U7895.43X.21EF9FB-V7-C2 7 false true 2 1 10 U7895.43X.21EF9FB-V1-C10 Server U7895.43X.21EF9FB-V1-C10 U7895.43X.21EF9FB-V1-C10 1 false true 10 vhost1 7 2 U7895.43X.21EF9FB-V7-C2 1eU7895.43X.21EF9FB-V1-C10 Client U7895.43X.21EF9FB-V23-C2 U7895.43X.21EF9FB-V23-C2 23 false false 2 1 8 U7895.43X.21EF9FB-V1-C8 Server U7895.43X.21EF9FB-V1-C8 U7895.43X.21EF9FB-V1-C8 1 false true 8 vhost21 23 2 U7895.43X.21EF9FB-V23-C2 1eU7895.43X.21EF9FB-V1-C8 Client U7895.43X.21EF9FB-V29-C2 U7895.43X.21EF9FB-V29-C2 29 false true 2 1 54 U7895.43X.21EF9FB-V1-C54 Server U7895.43X.21EF9FB-V1-C54 U7895.43X.21EF9FB-V1-C54 1 false true 54 vhost16 29 2 U7895.43X.21EF9FB-V29-C2 1eU7895.43X.21EF9FB-V1-C54 Client U7895.43X.21EF9FB-V31-C2 U7895.43X.21EF9FB-V31-C2 31 false true 2 1 53 U7895.43X.21EF9FB-V1-C53 Server U7895.43X.21EF9FB-V1-C53 U7895.43X.21EF9FB-V1-C53 1 false true 53 vhost34 31 2 U7895.43X.21EF9FB-V31-C2 1eU7895.43X.21EF9FB-V1-C53 Client U7895.43X.21EF9FB-V34-C2 U7895.43X.21EF9FB-V34-C2 34 false true 2 1 50 U7895.43X.21EF9FB-V1-C50 Server U7895.43X.21EF9FB-V1-C50 U7895.43X.21EF9FB-V1-C50 1 false true 50 vhost19 34 2 U7895.43X.21EF9FB-V34-C2 1eU7895.43X.21EF9FB-V1-C50 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent3 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C34-L2-T2 U78AF.001.WZS04LA-P1-C34-L2-T2 13U78AF.001.WZS04LA-P1-C34-L2-T2 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent1 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C34-L1-T2 U78AF.001.WZS04LA-P1-C34-L1-T2 13U78AF.001.WZS04LA-P1-C34-L1-T2 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent2 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C34-L2-T1 U78AF.001.WZS04LA-P1-C34-L2-T1 13U78AF.001.WZS04LA-P1-C34-L2-T1 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent3 U78AF.001.WZS04LA-P1-C34-L2-T2 en3 Inactive 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent1 U78AF.001.WZS04LA-P1-C34-L1-T2 en1 Inactive 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent2 U78AF.001.WZS04LA-P1-C34-L2-T1 en2 9.1.2.4 255.255.255.0 Active END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_network_bridge.txt0000664000175000017500000010305213571367171024130 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_network_bridge.txt # #################################################### INFO{ {'comment': 'Used for testing network bridge wrapper.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'LogicalPartition/0A68CFAB-F62B-46D4-A6A0-F4EBE0264AD5/ClientNetworkAdapter/6445b54b-b9dc-3bc2-b1d3-f8cc22ba95b8'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ bf9db11e-1571-31c3-a079-ce8f3aa2d68b 2014-11-18T20:46:05.782Z IBM Power Systems Management Console 764f3423-04c5-3b96-95a3-4764065400bd NetworkBridge 2014-11-18T20:46:06.825Z IBM Power Systems Management Console -2143062455 764f3423-04c5-3b96-95a3-4764065400bd 1412723263073 99 false true 4094 U8246.L2C.0604C7A-V4-C3 U8246.L2C.0604C7A-V4-C3 false true 3 ALL 1683B6A32203 4094 false 100 150 175 200 250 300 333 350 900 1001 2227 2228 true 2 ent5 1 1 U8246.L2C.0604C7A-V4-C2 U8246.L2C.0604C7A-V4-C2 false true 2 ALL 1683B6A32202 1 false false 2 ent4 1 1 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent0 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C4-T1 U78AB.001.WZSH5ZY-P1-C4-T1 13U78AB.001.WZSH5ZY-P1-C4-T1 disabled ent8 false 1 disabled 8192 true U8246.L2C.0604C7A-V4-C3 U8246.L2C.0604C7A-V4-C3 false true 3 ALL 1683B6A32203 4094 false 100 150 175 200 250 300 333 350 900 1001 2227 2228 true 2 ent5 1 U8246.L2C.0604C7A-V4-C2 U8246.L2C.0604C7A-V4-C2 false true 2 ALL 1683B6A32202 1 false false 2 ent4 1 true en8 9.1.2.4 255.255.255.0 Active 10298abf2b234f52cb true Configured 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent0 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C4-T1 U78AB.001.WZSH5ZY-P1-C4-T1 13U78AB.001.WZSH5ZY-P1-C4-T1 disabled ent8 false 1 disabled 8192 true true en8 9.1.2.4 255.255.255.0 Active 10298abf2b234f52cb true Configured 2 d648eb60-4d39-34ad-ae2b-928d8c9577ad NetworkBridge 2014-11-18T20:46:06.828Z IBM Power Systems Management Console 814883813 d648eb60-4d39-34ad-ae2b-928d8c9577ad 1412723263073 false false 1 U8246.L2C.0604C7A-V1-C2 U8246.L2C.0604C7A-V1-C2 false true 2 ALL 1683B625E702 1 false false 0 ent10 1 4094 U8246.L2C.0604C7A-V1-C3 U8246.L2C.0604C7A-V1-C3 false true 3 ALL 1683B625E703 4094 false 28 29 100 123 1000 2227 2881 true 0 ent11 1 1 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent6 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C3-T1 U78AB.001.WZSH5ZY-P1-C3-T1 13U78AB.001.WZSH5ZY-P1-C3-T1 disabled ent14 false 1 disabled 8192 true U8246.L2C.0604C7A-V1-C2 U8246.L2C.0604C7A-V1-C2 false true 2 ALL 1683B625E702 1 false false 0 ent10 1 U8246.L2C.0604C7A-V1-C3 U8246.L2C.0604C7A-V1-C3 false true 3 ALL 1683B625E703 4094 false 28 29 100 123 1000 2227 2881 true 0 ent11 1 true en14 9.1.2.4 255.255.255.0 Active 10b3fb44b976a3dc51 true 0 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/lpar.txt0000664000175000017500000067036213571367171021110 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh lpar.txt # #################################################### INFO{ {'comment': 'Created from query of LogicalPartition', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'LogicalPartition'} END OF SECTION} HEADERS{ {'x-powered-by': 'Servlet/3.0', 'transfer-encoding': 'chunked', 'set-cookie': 'JSESSIONID=00002EyMEecWDIzdx_K0LwQNiUO:aa95eb5b-d145-4cd8-9030-8b370106cfee; Path=/; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Fri, 16 Aug 2013 09:49:34 GMT', 'etag': '959374938', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Fri, 16 Aug 2013 09:49:40 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 9fb9e6bf-5fa9-3c64-90c1-c9ff54863ffb 2013-09-09T12:04:13.919-04:00 IBM Power Systems Management Console 089FFB20-5D19-4A8C-BB80-13650627D985 LogicalPartition 2013-09-09T12:04:24.364-04:00 IBM Power Systems Management Console 089FFB20-5D19-4A8C-BB80-13650627D985 1378742169504 false 127 0 POWER6_Plus On true false false false false false normal 0604C6A9 Linux/Red Hat 2.6.32-358.el6.ppc64 6.4 true false true false false 9 64 64 false 0 0 512 0.0 6 0 512 0 512 0 0.0 6 0 0 512 512 0 512 false false 0 256 512 false z3-9-5-126-127-00000001 false 1.5 2 2.5 3 0.5 1 9 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 089FFB20-5D19-4A8C-BB80-13650627D985 default 0 0 Unknown inactive false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 true Normal 2 1 false true false 668B0882-C24A-4AE9-91C8-297E95E3FE29 LogicalPartition 2013-09-09T12:04:24.326-04:00 IBM Power Systems Management Console 668B0882-C24A-4AE9-91C8-297E95E3FE29 1378742169148 false 127 0 POWER7 On true false false false false false normal 0604C6AB Unknown false false false false false 11 6 6 false 0 0 2048 0.0 6 0 2048 0 512 0 0.0 6 0 0 2048 2048 0 512 false false 0 256 512 false z3-9-5-126-208-000001f0 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 668B0882-C24A-4AE9-91C8-297E95E3FE29 default 0 0 Unknown inactive false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 3D3DE3FA-5DD6-439D-BF4C-1835C340A9BD LogicalPartition 2013-09-09T12:04:24.223-04:00 IBM Power Systems Management Console 3D3DE3FA-5DD6-439D-BF4C-1835C340A9BD 1378719657237 false 127 0 POWER7 On true false false false false false normal 0604CAA6 Linux/Red Hat 2.6.32-220.el6.ppc64 6.2 true true true true true 6 64 64 false 0 0 512 0.0 6 0 512 0 512 0 0.0 6 0 0 512 512 0 512 false false 0 512 512 false z3-9-5-127-7-0000002e false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 3D3DE3FA-5DD6-439D-BF4C-1835C340A9BD default 0 0 Unknown active 9.1.2.4 209915397421568 false Linux ppc64 time_stamp=08/13/2016 23:52:08,refcode=Linux ppc64,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 69689160-1BC7-44FF-AEBC-DE5A059BFDC9 LogicalPartition 2013-09-09T12:04:24.265-04:00 IBM Power Systems Management Console 69689160-1BC7-44FF-AEBC-DE5A059BFDC9 1378742168467 false 127 0 POWER7 On true false false false false false normal 0604C6AH Unknown false false false false false 17 64 64 false 0 0 512 0.0 6 0 512 0 512 0 0.0 6 0 0 512 512 0 512 false false 0 256 512 false z3-9-5-126-127-00000015 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 69689160-1BC7-44FF-AEBC-DE5A059BFDC9 default 0 0 Unknown inactive false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 23FEE581-94AF-4E80-843D-B14409D9A101 LogicalPartition 2013-09-09T12:04:24.162-04:00 IBM Power Systems Management Console 23FEE581-94AF-4E80-843D-B14409D9A101 1378585526038 false 101 0 POWER7 On true false false false false false normal 0604CAA3 Linux/Red Hat 2.6.32-220.el6.ppc64 6.2 true true true true true 3 64 64 false 0 0 512 0.0 6 0 2048 0 256 0 0.0 6 0 0 2048 512 0 256 false false 0 512 256 false z3-9-5-127-7-00000029 1 4 1 true 0 keep idle procs true keep idle procs 4 1 1 1 true running AIX/Linux 23FEE581-94AF-4E80-843D-B14409D9A101 default 0 0 Unknown active 9.1.2.5 209915330446336 false false false 2B80D049-67E1-43E9-AD8F-AB2E55A41EEC LogicalPartition 2013-09-09T12:04:24.384-04:00 IBM Power Systems Management Console 2B80D049-67E1-43E9-AD8F-AB2E55A41EEC 1378742169792 false 127 0 POWER7 On true false false false false false normal 0604C6A8 Unknown false false false false false 8 6 6 false 0 0 512 0.0 6 0 512 0 512 0 0.0 6 0 0 512 512 0 512 false false 0 256 512 false z3-9-5-127-104-00000029 false 0.5 1 0.5 1 0.5 1 0 capped false capped false 1 0.5 0.5 0.5 0 1 1 0.00 not activated AIX/Linux 2B80D049-67E1-43E9-AD8F-AB2E55A41EEC default 0 0 Unknown inactive false false false 5314F0D6-FC3B-4BCB-9266-15FA0A515040 LogicalPartition 2013-09-09T12:04:24.237-04:00 IBM Power Systems Management Console 5314F0D6-FC3B-4BCB-9266-15FA0A515040 1378742659687 false 127 0 POWER7 On false false false false false false normal 0604CAA7 Unknown false false false false false 7 64 8 false 0 0 512 6 0 512 0 512 0 6 0 0 0 0 0 0 false false 0 0 0 false brnelson-z3-9-5-126-188-00000051 false 0.5 1 0.5 1 0.5 1 0 Unknown true sre idle proces 0 0 0 0 true not activated AIX/Linux 5314F0D6-FC3B-4BCB-9266-15FA0A515040 default 0 0 Unknown inactive false false false 5B3DBD60-3461-41BC-ADD4-D5D9AF55A2CF LogicalPartition 2013-09-09T12:04:24.280-04:00 IBM Power Systems Management Console 5B3DBD60-3461-41BC-ADD4-D5D9AF55A2CF 1378742168632 false 127 0 POWER7 On true false false false false false normal 0604C6AG Unknown false false false false false 16 64 64 false 0 0 512 0.0 6 0 512 0 512 0 0.0 6 0 0 512 512 0 512 false false 0 256 512 false z3-9-5-125-63-0000003d false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 5B3DBD60-3461-41BC-ADD4-D5D9AF55A2CF default 0 0 Unknown inactive false false false 44B59243-C2F5-4F68-9C45-2AF2DC12F698 LogicalPartition 2013-09-09T12:04:24.416-04:00 IBM Power Systems Management Console 44B59243-C2F5-4F68-9C45-2AF2DC12F698 1378742170386 false 127 0 POWER7 On true false false false false false normal 0604C6A5 Unknown false false false false false 5 6 6 false 0 0 0 0.0 6 0 0 0 0 0 0.0 6 0 0 0 0 0 0 false false 0 0 0 false z3-9-5-126-106-00000001 false 0.00 0 0.00 0 0.00 0 0 capped false capped false 0 0.00 0.00 0.00 0 0 0 0.00 not activated AIX/Linux 44B59243-C2F5-4F68-9C45-2AF2DC12F698 default 0 0 Unknown inactive false false false 6D459A1E-D028-4F36-83FF-207F29700FB8 LogicalPartition 2013-09-09T12:04:24.400-04:00 IBM Power Systems Management Console 6D459A1E-D028-4F36-83FF-207F29700FB8 1378742170235 false 127 0 POWER7 Disabled false false false false false false normal 0604C6A7 Unknown false false false false false 7 10 10 false 0 0 256 0.0 6 0 256 0 256 0 0.0 6 0 0 256 256 0 256 false false 0 256 256 false kevin1 false 0.1 1 0.1 1 0.1 1 0 capped false capped false 1 0.1 0.1 0.1 0 1 1 0.00 not activated AIX/Linux 6D459A1E-D028-4F36-83FF-207F29700FB8 default 0 0 Unknown inactive false false false 120C6BDE-0217-4F8F-876F-30261C9D6F8F LogicalPartition 2013-09-09T12:04:24.478-04:00 IBM Power Systems Management Console 120C6BDE-0217-4F8F-876F-30261C9D6F8F 1378742170929 true 127 0 POWER7 Disabled true false false false false false normal 0604C6A2 Unknown false false false false false 2 10 10 false 0 0 0 0.0 6 0 0 0 0 0 0.0 6 0 0 0 0 0 0 false false 0 0 0 false powervc1_126 false 0.00 0 0.00 0 0.00 0 0 128 uncapped false uncapped false 0 0.00 0.00 0.00 0 128 0 0 0.00 0 not activated AIX/Linux 120C6BDE-0217-4F8F-876F-30261C9D6F8F default 0 0 Unknown inactive false false false 7AB1AB7B-178C-4C7C-B5AB-1F011A61335C LogicalPartition 2013-09-09T12:04:24.204-04:00 IBM Power Systems Management Console 7AB1AB7B-178C-4C7C-B5AB-1F011A61335C 1378652980712 false 127 0 POWER7 On true false false false false false normal 0604CAA5 Linux/Red Hat 2.6.32-220.el6.ppc64 6.2 false false false false false 5 64 64 false 0 0 512 0.0 6 0 512 0 512 0 0.0 6 0 0 512 512 0 512 false false 0 256 512 false z3-9-5-127-7-0000002c false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 7AB1AB7B-178C-4C7C-B5AB-1F011A61335C default 0 0 Unknown inactive 9.1.2.6 209915266431488 false false false 28765B00-E4EE-45B0-A405-E9EB9C9AA255 LogicalPartition 2013-09-09T12:04:24.462-04:00 IBM Power Systems Management Console 28765B00-E4EE-45B0-A405-E9EB9C9AA255 1378742170813 false 127 0 POWER7 On true false false false false false normal 0604C6A3 Unknown false false false false false 3 6 6 false 0 0 2048 0.0 6 0 2048 0 512 0 0.0 6 0 0 2048 2048 0 512 false false 0 256 512 false z3-9-5-126-208-000001c9 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 28765B00-E4EE-45B0-A405-E9EB9C9AA255 default 0 0 Unknown inactive false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 9068B0FB-1CF0-4D23-8A23-31AC87D5F5D2 LogicalPartition 2013-09-09T12:04:24.250-04:00 IBM Power Systems Management Console 9068B0FB-1CF0-4D23-8A23-31AC87D5F5D2 1378742168300 false 127 default Disabled false false false false false Unknown false false false false false 2 false true true false false linux1 true sre idle proces true sre idle proces 0 0 0 true not activated AIX/Linux 9068B0FB-1CF0-4D23-8A23-31AC87D5F5D2 default 0 0 Unknown none false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 5ADCEC07-B213-41FD-9B14-5130D65CCCBB LogicalPartition 2013-09-09T12:04:24.495-04:00 IBM Power Systems Management Console 5ADCEC07-B213-41FD-9B14-5130D65CCCBB 1378742171016 false 127 0 POWER7 On true false false false false false normal 0604C6AC Unknown false false false false false 12 64 64 false 0 0 1024 0.0 6 0 1024 0 1024 0 0.0 6 0 0 1024 1024 0 1024 false false 0 1024 1024 false z3-9-5-125-87-00000005 false 0.6 1 0.6 1 0.5 1 0 128 uncapped false uncapped false 1 0.6 0.5 0.6 0 128 1 1 0.6 128 running AIX/Linux 5ADCEC07-B213-41FD-9B14-5130D65CCCBB default 0 0 Unknown inactive 152185856462592 false Linux ppc64 time_stamp=08/13/2016 23:52:08,refcode=Linux ppc64,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 357FDE91-837E-471A-9CF7-503DF18E1EE8 LogicalPartition 2013-09-09T12:04:24.310-04:00 IBM Power Systems Management Console 357FDE91-837E-471A-9CF7-503DF18E1EE8 1378742168976 false 127 0 POWER7 On true false false false false false normal 0604C6AD Unknown false false false false false 13 6 6 false 0 0 0 0.0 6 0 0 0 0 0 0.0 6 0 0 0 0 0 0 false false 0 0 0 false z3-9-5-126-153-0000002a false 0.00 0 0.00 0 0.00 0 0 0 uncapped false uncapped false 0 0.00 0.00 0.00 0 0 0 0 0.00 0 not activated AIX/Linux 357FDE91-837E-471A-9CF7-503DF18E1EE8 default 0 0 Unknown inactive false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 799DCB11-5D0D-49BB-813E-315658EDCD1D LogicalPartition 2013-09-09T12:04:24.191-04:00 IBM Power Systems Management Console 799DCB11-5D0D-49BB-813E-315658EDCD1D 1378585528049 false 127 0 POWER7 On true false false false false false normal 0604CAA4 Linux/Red Hat 2.6.32-220.el6.ppc64 6.2 false false false false false 4 64 64 false 0 0 768 0.0 6 0 768 0 768 0 0.0 6 0 0 768 768 0 768 false false 0 256 768 false z3-9-5-125-87-00000007 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 799DCB11-5D0D-49BB-813E-315658EDCD1D default 0 0 Unknown inactive 9.1.2.7 209915443973376 false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 6341B7CE-CE20-424C-B407-7E446CB4A9EC LogicalPartition 2013-09-09T12:04:24.341-04:00 IBM Power Systems Management Console 6341B7CE-CE20-424C-B407-7E446CB4A9EC 1378742169324 false 127 0 POWER7 On true false false false false false normal 0604C6AA Unknown false false false false false 10 6 6 false 0 0 512 0.0 6 0 512 0 512 0 0.0 6 0 0 512 512 0 512 false false 0 256 512 false z3-9-5-126-208-000001eb false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 6341B7CE-CE20-424C-B407-7E446CB4A9EC default 0 0 Unknown inactive false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 1E45BAD5-8CE2-48DD-97BB-7FC0057D99EB LogicalPartition 2013-09-09T12:04:24.432-04:00 IBM Power Systems Management Console 1E45BAD5-8CE2-48DD-97BB-7FC0057D99EB 1378742170545 false 127 0 POWER7 On true false false false false false normal 0604C6A4 Unknown false false false false false 4 6 6 false 0 0 256 0.0 6 0 256 0 256 0 0.0 6 0 0 256 256 0 256 false false 0 256 256 false test_scsi 1 1 1 true sre idle procs always true sre idle procs always 1 1 1 0 true not activated AIX/Linux 1E45BAD5-8CE2-48DD-97BB-7FC0057D99EB default 0 0 Unknown inactive false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 09D4D2E7-162B-4478-8DFB-87CBC3E76343 LogicalPartition 2013-09-09T12:04:24.177-04:00 IBM Power Systems Management Console 09D4D2E7-162B-4478-8DFB-87CBC3E76343 1378585527859 false 127 0 POWER7 On true false false false false false normal 0604CAA2 Linux/Red Hat 2.6.32-220.el6.ppc64 6.2 true true true true true 2 64 64 false 0 0 512 0.0 6 0 512 0 512 0 0.0 6 0 0 512 512 0 512 false false 0 512 512 false z3-9-5-125-87-00000006 false 0.7 1 0.7 1 0.5 1 0 128 uncapped false uncapped false 1 0.7 0.5 0.7 0 128 1 1 0.7 128 running AIX/Linux 09D4D2E7-162B-4478-8DFB-87CBC3E76343 default 0 0 Unknown active 9.1.2.8 209915398340864 false Linux ppc64 time_stamp=08/13/2016 23:52:08,refcode=Linux ppc64,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false 42DF39A2-3A4A-4748-998F-25B15352E8A7 LogicalPartition 2013-09-09T12:04:24.296-04:00 IBM Power Systems Management Console 42DF39A2-3A4A-4748-998F-25B15352E8A7 1378742168803 false 127 0 POWER6_Plus Disabled true false false false false false normal 0604C6AF Linux/Red Hat 2.6.32-358.el6.ppc64 6.4 false false false false false 15 6 6 false 0 0 512 0.0 6 0 512 0 512 0 0.0 6 0 0 512 512 0 512 false false 0 256 512 false z3-9-5-126-168-00000002 2 3 1 true sre idle proces true sre idle proces 1 1 1 0 true not activated AIX/Linux 42DF39A2-3A4A-4748-998F-25B15352E8A7 default 0 0 Unknown inactive false 00000000 time_stamp=08/13/2016 23:52:08,refcode=00000000,word2=03D00000,fru_call_out_loc_codes=#47-Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016 false false TheNVRAMis20KofBASE64encodedDATA 1185681 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/nbbr_network_bridge.txt0000664000175000017500000003316213571367171024151 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh nbbr_network_bridge.txt # #################################################### INFO{ {'comment': 'Created by thorst.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/NetworkBridge'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 0ad02639-3eb2-3c5b-807a-e933548d556a 2015-02-17T23:02:20.911Z IBM Power Systems Management Console b6a027a8-5c0b-3ac0-8547-b516f5ba6151 NetworkBridge 2015-02-17T23:02:21.590Z IBM Power Systems Management Console -43845602 b6a027a8-5c0b-3ac0-8547-b516f5ba6151 1424131786240 false false 2227 U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 4094 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D03 4094 false 1000 true 0 ent6 1 2227 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T1 U78CB.001.WZS007Y-P1-C10-T1 13U78CB.001.WZS007Y-P1-C10-T1 en0 9.1.2.4 255.255.255.0 Disconnected disabled ent5 false 2227 disabled 8192 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D03 4094 false 1000 true 0 ent6 1 true en5 9.1.2.4 255.255.255.0 Active 105a9dd36a17958199 true 0 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/cluster.txt0000664000175000017500000001516113571367171021621 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh cluster.txt # #################################################### INFO{ {'comment': None, 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'Cluster/17248b3b-9be7-3bc0-9b89-24a285b5d7ac'} END OF SECTION} HEADERS{ {'content-length': '4567', 'content-type': 'application/atom+xml', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000uDwaHQGGdRKbfwOM1pbCcYt:c8963131-fc5d-48ff-a2f4-346b019f3f2c; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_2_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Fri, 06 Feb 2015 07:01:28 GMT', 'x-transaction-id': 'XT10009741', 'etag': '128840171', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Fri, 06 Feb 2015 07:01:28 GMT', 'x-transactionrecord-uuid': 'f65426ff-08cd-47e1-a43d-7c633754db4a'} END OF SECTION} BODY{ 17248b3b-9be7-3bc0-9b89-24a285b5d7ac Cluster 2015-02-06T07:01:28.760Z IBM Power Systems Management Console 128840171 17248b3b-9be7-3bc0-9b89-24a285b5d7ac 1423205969593 neoclust1 22cfc907d2abf511e4b2d540f2e95daf30 MPIO IBM 2076 FC Disk 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAwMg== 10240 hdisk2 active true foo.example.com 2 8247 22L 2125D1A 2.2.3.4 Up bar.example.com 1 8247 22L 2125D0A 2.2.4.0 Down bar.example.com 3 8247 22L 2125D0A 2.2.4.0 Unknown END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/sriov_lp_feed.txt0000664000175000017500000003410113571367171022753 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh vios_with_sriov_lp.txt # #################################################### INFO{ {'comment': None, 'path': 'VirtualIOServer/55F37E22-E616-4004-A60B-33EA11EFE91A/SRIOVEthernetLogicalPort', 'reason': 'OK', 'status': 200} END OF SECTION} HEADERS{ {'Content-Length': '3178', 'X-Powered-By': 'Servlet/3.1', 'Set-Cookie': 'JSESSIONID=0000ZnxUV-fSBOQFueaGXWRFjja:8cb3ca96-3fe0-43d2-ba02-95aa4c95fa2f; Path=/; Secure; HttpOnly', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Fri, 03 Jun 2016 20:30:39 GMT', 'X-Transaction-ID': 'XT10129661', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Date': 'Fri, 03 Jun 2016 20:30:39 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'X-TransactionRecord-Uuid': '85febdb4-c6ac-4969-96c7-e3900d6a0cae', 'ETag': '1743038974'} END OF SECTION} BODY{ a1209dfb-8ad6-382c-a3ac-8f3deb99411d 2016-06-03T16:30:39.769-04:00 IBM Power Systems Management Console 080c7f1f-29e9-333a-8034-a5c8a358786e SRIOVEthernetLogicalPort 2016-06-03T16:30:39.796-04:00 IBM Power Systems Management Console 1743038943 080c7f1f-29e9-333a-8034-a5c8a358786e 0 0 654327810 1 PHB 4098 true false false false PHB 4098 2.0% 100.0% 2 0 U78CB.001.WZS0485-P1-C5-T3-S2 -1 NOT_VNIC NONE 8ACA227C6E00 000000000000 0 0 ALL 6d9cfdda-ae8b-3c58-bc0c-379a0c5ab154 SRIOVEthernetLogicalPort 2016-06-03T16:30:39.796-04:00 IBM Power Systems Management Console 1743038943 6d9cfdda-ae8b-3c58-bc0c-379a0c5ab154 0 0 654327809 1 PHB 4097 true false false false unavailable 2.0% 80.0% 0 1095 U78CB.001.WZS0JYF-P1-C10-T1-S1 0 DEDICATED_VNIC NONE A65BDDBBAD66 A65BDDBBAD66 0 0 0 NONE 6d9cfdda-ae8b-3c58-bc0c-379a0c5ab155 SRIOVEthernetLogicalPort 2016-06-03T16:30:39.796-04:00 IBM Power Systems Management Console 1743038943 6d9cfdda-ae8b-3c58-bc0c-379a0c5ab155 0 0 654327810 2 PHB 4098 true true false false unavailable 18.0% 10.0% 1 42 U78CB.001.WZS0JYF-P1-C10-T1-S2 0 DEDICATED_VNIC ALL A65BDDBBAD67 A65BDDBBAD67 0 0 10 ALL 6d9cfdda-ae8b-3c58-bc0c-379a0c5ab156 SRIOVEthernetLogicalPort 2016-06-03T16:30:39.796-04:00 IBM Power Systems Management Console 1743038943 6d9cfdda-ae8b-3c58-bc0c-379a0c5ab156 0 0 654327811 2 PHB 4099 true false false false unavailable 2.0% 100.0% 1 42 U78CB.001.WZS0JYF-P1-C10-T1-S3 0 DEDICATED_VNIC NONE A65BDDBBAD68 A65BDDBBAD68 0 0 1 ALL END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/logon.xml0000664000175000017500000000101413571367171021227 0ustar neoneo00000000000000 PUIoR6x0kP6fQqA7qZ8sLZQJ8MLx9JHfLCYzT4oGFSE2WaGIhaFXIyQYvbqdKNS8QagjBpPi9NP7YR_h61SOJ3krS_RvKAp-oCf2p8x8uvQrrDv-dUzc17IT5DkR7_jv2qc8iUD7DJ6Rw53a17rY0p63KqPg9oUGd6Bn3fNDLiEwaBR4WICftVxUFj-tfWMOyZZY2hWEtN2K8ScXvyFMe-w3SleyRbGnlR34jb0A99s= pypowervm-1.1.24/pypowervm/tests/data/event.xml0000664000175000017500000000356313571367171021245 0ustar neoneo00000000000000 69cf8099-87a7-3852-9a90-7cc0fdfe7d2d 2014-07-23T04:29:09.130Z IBM Power Systems Management Console 248c622f-d4aa-3202-b833-ccbeceefaf35 Event 2014-07-23T04:29:09.137Z IBM Power Systems Management Console 581561747 248c622f-d4aa-3202-b833-ccbeceefaf35 1406089749131 NEW_CLIENT 1404864263191 pypowervm-1.1.24/pypowervm/tests/data/vio_multi_vscsi_mapping.txt0000664000175000017500000021553613571367171025101 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh vio_multi_vscsi_mapping.txt # #################################################### INFO{ {'comment': 'Created by thorst.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/3443DB77-AED1-47ED-9AA5-3DB9C6CF7089'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 VirtualIOServer 2015-03-20T20:52:43.026Z IBM Power Systems Management Console -2022933226 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 1426821039925 false 191 POWER7 Disabled false false false false false false normal 2125D4A1 VIOS 2.2.4.0 6100-09-04-1441 true true true true true 1 2000 false RAID Controller U78CB.001.WZS007Y 842 260 842 1023 4116 2 4116 4116 false false false false false false false false false false 553844757 RAID Controller U78CB.001.WZS007Y-P1-C14 U78CB.001.WZS007Y-P1-C14 C14 842 true 553844757 U78CB.001.WZS007Y-P1-C14 C14 false Universal Serial Bus UHC Spec U78CB.001.WZS007Y 33345 3075 33345 1202 4172 2 4172 4116 false false false false false false false false false false 553713691 Universal Serial Bus UHC Spec U78CB.001.WZS007Y-P1-T2 U78CB.001.WZS007Y-P1-T2 T2 33345 true 553713691 U78CB.001.WZS007Y-P1-T2 T2 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78CB.001.WZS007Y 5719 512 5719 1056 5348 1 5348 4116 false false false false false false false false false false 553910302 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78CB.001.WZS007Y-P1-C10 U78CB.001.WZS007Y-P1-C10 C10 5719 true 553910302 U78CB.001.WZS007Y-P1-C10 C10 false Quad 8 Gigabit Fibre Channel LP Adapter U78CB.001.WZS007Y 9522 4 9522 1054 4215 2 4215 4116 false false false false false false false false false false 553713705 Quad 8 Gigabit Fibre Channel LP Adapter U78CB.001.WZS007Y-P1-C3 U78CB.001.WZS007Y-P1-C3 C3 U78CB.001.WZS007Y-P1-C3-T1 MPIO IBM 2076 FC Disk U78CB.001.WZS007Y-P1-C3-T1-W500507680210E522-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAxMg== true 1024 hdisk5 active 33213600507680282861D880000000000001204214503IBMfcp true MPIO IBM 2076 FC Disk U78CB.001.WZS007Y-P1-C3-T1-W500507680210E522-L1000000000000 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAxMw== true 102400 hdisk6 active 33213600507680282861D880000000000001304214503IBMfcp true fcs0 1aU78CB.001.WZS007Y-P1-C3-T1 21000024FF649104 64 64 U78CB.001.WZS007Y-P1-C3-T4 fcs3 1aU78CB.001.WZS007Y-P1-C3-T4 21000024FF649107 U78CB.001.WZS007Y-P1-C3-T3 fcs2 1aU78CB.001.WZS007Y-P1-C3-T3 21000024FF649106 U78CB.001.WZS007Y-P1-C3-T2 fcs1 1aU78CB.001.WZS007Y-P1-C3-T2 21000024FF649105 553713705 U78CB.001.WZS007Y-P1-C3 C3 2000 false false 4096 0.0 7 4096 4096 0.0 7 0 0 4096 4096 0 4096 false true false false 0 4096 4096 false IOServer - SN2125D4A false 0.4 4 0.4 4 0.4 4 0 255 uncapped false uncapped false 4 0.4 0.4 0.4 0 255 4 4 0.4 255 running Virtual IO Server 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 default 0 0 active 9.1.2.4 25006020324608 false false true true asdf_0184e0cb_userID_config.iso 0easdf_0184e0cb_userID_config.iso rw 0.000000 asdf_3288fac2_userID_config.iso 0easdf_3288fac2_userID_config.iso rw 0.000000 bldr1_dfe05349_kyleh_config.iso 0ebldr1_dfe05349_kyleh_config.iso rw 0.000000 ubuntu1410 0eubuntu1410 rw 0.5449 VMLibrary 1 true SAS RAID 0 Disk Array U78CB.001.WZS007Y-P1-C14-T1-L405DB60300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1REI2MDMwMDAwMDAwMDQw false 270648 hdisk1 active 391BIBMIPR-0 5DB603000000004010IPR-0 5DB6030003IBMsas false SAS RAID 0 Disk Array U78CB.001.WZS007Y-P1-C14-T1-L205DB60300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1REI2MDMwMDAwMDAwMDIw false 270648 hdisk0 active 391BIBMIPR-0 5DB603000000002010IPR-0 5DB6030003IBMsas false NjAwNTA3NjgwMjgyODYxRDg4MDAwMDAwMDAwMDAwQjU= MPIO IBM 2076 FC Disk U78CB.001.WZS007Y-P1-C3-T1-W500507680210E522-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAxMg== true 1024 hdisk5 active 33213600507680282861D880000000000001204214503IBMfcp true This is a bogus value MPIO IBM 2076 FC Disk U78CB.001.WZS007Y-P1-C3-T1-W500507680210E522-L1000000000000 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAxMw== true 102400 hdisk6 active 33213600507680282861D880000000000001304214503IBMfcp true 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T1 U78CB.001.WZS007Y-P1-C10-T1 13U78CB.001.WZS007Y-P1-C10-T1 en0 9.1.2.4 255.255.255.0 Disconnected disabled ent5 false 2227 disabled 8192 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 en5 9.1.2.4 255.255.255.0 Active 105a9dd36a17958199 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 true Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 Ubuntu1410 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 60 None Ubuntu1410 0300025d4a00007a000000014b36d9deaf.1 0x8200000000000000 vtscsi0 09b4fdf7bdec405d57 Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 Ubuntu1410 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 MPIO IBM 2076 FC Disk U78CB.001.WZS007Y-P1-C3-T1-W500507680210E522-L5000000000000 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDA2MA== false 2048 hdisk10 active 33213600507680282861D880000000000006004214503IBMfcp false 0x8200000000000000 vtscsi0 09b4fdf7bdec405d57 Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 Ubuntu1410 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 bldr1_dfe05349_kyleh_config.iso 0ebldr1_dfe05349_kyleh_config.iso rw 0.000000 0x8200000000000000 vtscsi0 09b4fdf7bdec405d57 Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 Ubuntu1410 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 true 270c88f8e2d36711e490ce40f2e95daf30a6d61c0dee5ec6f6a011b300b9d0830d 1 VirtualIO_Disk 270c88f8e2d36711e490ce40f2e95daf3036ba45c3517660646bca27f4503ffb8f boot_cdfbb633 0x8200000000000000 vtscsi0 09b4fdf7bdec405d57 Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 Ubuntu1410 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T4 U78CB.001.WZS007Y-P1-C10-T4 13U78CB.001.WZS007Y-P1-C10-T4 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T3 U78CB.001.WZS007Y-P1-C10-T3 13U78CB.001.WZS007Y-P1-C10-T3 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T2 U78CB.001.WZS007Y-P1-C10-T2 13U78CB.001.WZS007Y-P1-C10-T2 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 U78CB.001.WZS007Y-P1-C10-T4 en3 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 U78CB.001.WZS007Y-P1-C10-T3 en2 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 U78CB.001.WZS007Y-P1-C10-T2 en1 Inactive END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_virtual_network_feed.txt0000664000175000017500000000643113571367171025350 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_virtual_network_feed.txt # #################################################### INFO{ {'comment': 'Used for testing virtual network wrapper.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'LogicalPartition/0A68CFAB-F62B-46D4-A6A0-F4EBE0264AD5/VirtualNetwork'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 00b2be3f-79f6-3d6e-951c-fabc1d6c7d0c 2015-02-06T03:47:14.730Z IBM Power Systems Management Console 20714b36-f66c-329f-b1c2-763c1c1a65a2 VirtualNetwork 2015-02-06T03:47:15.036Z IBM Power Systems Management Console -1893269394 20714b36-f66c-329f-b1c2-763c1c1a65a2 1423194434792 VLAN2227-ETHERNET0 2227 0 false END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/cluster_create_job_template.txt0000664000175000017500000000477213571367171025677 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh cluster_create_job_template.txt # #################################################### INFO{ {'comment': None, 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'Cluster/do/Create'} END OF SECTION} HEADERS{ {'content-length': '1475', 'content-type': 'application/atom+xml', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000oUYEkL9QvE6kg3vgejRcao8:31d026d8-f0bc-4237-8211-43003f07d3ab; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_2_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Tue, 17 Feb 2015 05:23:58 GMT', 'x-transaction-id': 'XT10091724', 'etag': '-513714866', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Tue, 17 Feb 2015 05:23:58 GMT', 'x-transactionrecord-uuid': 'e80cec6d-4b01-4578-b3b4-65bd5058773a'} END OF SECTION} BODY{ c1334068-9834-3131-891d-e06d56a3e4b2 JobRequest 2015-02-17T05:23:58.266Z IBM Power Systems Management Console Create Cluster END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/logon_file.xml0000664000175000017500000000050713571367171022234 0ustar neoneo00000000000000 pypowervm/tests/data/token_file pypowervm-1.1.24/pypowervm/tests/data/managementconsole_ssh.txt0000664000175000017500000001146413571367171024516 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh managementconsole_ssh.txt # #################################################### INFO{ {'comment': None, 'status': 200, 'pw': 'passw0rd', 'reason': 'OK', 'host': '9.0.0.0', 'user': 'user', 'path': 'ManagementConsole'} END OF SECTION} HEADERS{ {'content-length': '3708', 'x-transactionrecord-uuid': 'bd503387-b39b-4beb-868d-e592d85fad7a', 'x-powered-by': 'Servlet/3.1', 'set-cookie': 'JSESSIONID=0000_auvgCDt_q2sam9gdGIGdw7:2e950dd3-ffe7-465a-89be-2872724aef38; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_3_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Mon, 03 Aug 2015 14:25:48 GMT', 'x-transaction-id': 'XT10000048', 'etag': '1373645757', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Mon, 03 Aug 2015 14:25:48 GMT', 'x-mc-type': 'PVM', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ c9d6068f-6a1d-390c-bf6c-c8b60a068a14 2015-08-03T10:25:48.292-04:00 IBM Power Systems Management Console 8ce93bce-da59-368e-b840-3182589dec00 ManagementConsole 2015-08-03T10:25:48.482-04:00 IBM Power Systems Management Console 1373645726 8ce93bce-da59-368e-b840-3182589dec00 0 8247 21L 212A63A localhost ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuA/Av0jMYlG54YiaaaQXho8iOZfY+WkBnuFfweESZOy824Ce9FvPqXsNL+nPAgKWG3TONwJldYgCgnBsFXUizkcne9Dt/T/zs2Bzl7b1YPrXyYS1hxKFrV/pYEERUiFa9ppR+M8mxdNYO0+ph356LO3mbxOM6nEZ1L6l6RUvbUwV9Zuw3Hpiz1lAV6d6EwMHJZ+WFlipJ2wxpM4QUKmb0V2UJoHAb7tp3zipr3CCo0NtnpcD7wxsFhtz2ccRvNMbGhe1i9KikmBtQQDl1adMSbBL2+tGmyqHNq/H6d75bfXOUCl7NKtUq7VVGcXDOlTS1CDdLdmUn0l4z0AlyciQt wlp@9.0.0.0 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyl3+yXyoYAzvScxTiWqxe0PDwYvTHwLsIkgAY7s7n+8tUR7zA0dYWggl4aCfOAE2RMF0zKoFyRK8a9M/I1kVCYLb9y1rWp76jnxZpRBD/1DjjQ0qW5e1fbdrS52mJcFLL1+MzeoLT7+6GeMUcgNrmZQMUqSbwF+Rdxv56YTdx9u0EH1qaT/H0syp1Y8EHCaBVwdZcmNQLBFaYnVxHNHTQMYMTqokkyrZ9whSaK98OiYQO//5gnJzESOxOURYTzLKLz8WPkiONM6QgF+E5Zobt/REr3Tq8l1e1V/e2+7owFkMMte14I2sfK8QnZUrpJziXv3gwOpUP34gDud6ceBlv wlp@9.0.0.0 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3shE8yLGII+BPaPMIOdNgA6ZyDYKobCtXE6td8X9dgI0Sz08YCUQY9pOeWr/D63LwJaYsgqVspQaUEM5WH6s2eNKAERYayog6iCEaqApDDQETuf4XQ0JXo08izRPpMeRZwp3/RhNJVrxNheUp9nkHI3Mbx7jHvgwih48BTeqfj8L1Nnp4srhYDuzuN6NhUvbWLKJAjaQojRLSYEtys5ASq7v+D+OEXqVBSRheKf5eWOdEF68sBYpOaS4qLycZjd5YGPUg0b+DfME2jr8kjbig1js8omgljSvKIwHIKfrfWPwKbWxtHaqWzTT+fUPygD7IDxPqsSEQIAjNPWmWQM+D wlp@9.0.0.0 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/upload_volgrp2.txt0000664000175000017500000001703513571367171023101 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh upload_volgrp2.txt # #################################################### INFO{ {'comment': 'Used for volume group testing. Has extra virtual disk', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/14B854F7-42CE-4FF0-BD57-1D117054E701/VolumeGroup/b6bdbf1f-eddf-3c81-8801-9859eb6fedcb'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ b6bdbf1f-eddf-3c81-8801-9859eb6fedcb VolumeGroup 2015-01-07T23:10:27.037Z IBM Power Systems Management Console 513757221 b6bdbf1f-eddf-3c81-8801-9859eb6fedcb 1420672227030 1045 1045 1063 image_pool 00f8d6de00004b000000014a54555cd9 1024 blank_media1 0eblank_media1 rw 0.0977 blank_media_2 0eblank_media_2 rw 0.0488 bob_iso 0ebob_iso rw 0.000000 VMLibrary 11 SAS RAID 0 Disk Array U78C9.001.WZS0095-P1-C14-R1-L405D828300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1RDgyODMwMDAwMDAwMDQw false 1089592 hdisk1 active 391BIBMIPR-0 5D8283000000004010IPR-0 5D82830003IBMsas false 0400f8d6de00004b000000014a54555cd9 1 None test 0300f8d6de00004b000000014a54555cd9.1 1 None test2 0300f8d6de00004b000000014a54555cd9.3 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/stm_feed.txt0000664000175000017500000000605213571367171021725 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'RawMetrics/ShortTermMonitor'} END OF SECTION} HEADERS{ {'content-length': '2518', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 06:11:42 GMT', 'etag': '1430374302041', 'date': 'Thu, 30 Apr 2015 06:11:40 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 98498bed-c78a-3a4f-b90a-4b715418fcb6 2015-04-30T06:11:40.000Z ShortTermMetrics ManagedSystem 98498bed-c78a-3a4f-b90a-4b715418fcb6 28cb2328-ca14-48ef-a3bd-691debef53dd 2015-04-30T06:11:35.002Z STM_8247-22L*1111111_phyp_20150430T061135+0000.json 2015-04-30T06:11:35.000-05:00 IBM Power Systems Management Console aa4c491b-d2e7-490c-b034-3d84c5999d52 2015-04-30T06:11:30.000Z STM_8247-22L*1111111_phyp_20150430T061130+0000.json 2015-04-30T06:11:30.000Z IBM Power Systems Management Console da3094bc-ddde-4ef5-8055-733a90e1efa8 2015-04-30T06:11:40.000Z STM_8247-22L*1111111_phyp_20150430T061140+0000.json 2015-04-30T06:11:40.000Z IBM Power Systems Management Console END OF SECTION}pypowervm-1.1.24/pypowervm/tests/data/nbbr_network_bridge_peer.txt0000664000175000017500000006303413571367171025165 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE #################################################### INFO{ {'comment': 'Created by thorst.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/NetworkBridge'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 0ad02639-3eb2-3c5b-807a-e933548d556a 2015-02-17T23:02:20.911Z IBM Power Systems Management Console b6a027a8-5c0b-3ac0-8547-b516f5ba6151 NetworkBridge 2015-02-17T23:02:21.590Z IBM Power Systems Management Console -43845602 b6a027a8-5c0b-3ac0-8547-b516f5ba6151 1424131786240 false false 2227 U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 4094 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D03 4094 false 1000 true 0 ent6 1 2227 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T1 U78CB.001.WZS007Y-P1-C10-T1 13U78CB.001.WZS007Y-P1-C10-T1 en0 9.1.2.4 255.255.255.0 Disconnected disabled ent5 false 2227 disabled 8192 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D03 4094 false 1000 true 0 ent6 1 true en5 9.1.2.4 255.255.255.0 Active 105a9dd36a17958199 true 0 9af89d52-5892-11e5-885d-feff819cdc9f NetworkBridge 2015-02-17T23:02:21.590Z IBM Power Systems Management Console -43845602 9af89d52-5892-11e5-885d-feff819cdc9f 1424131786240 false false 2828 U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2828 false false 0 ent4 1 4091 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D03 4091 false 1001 true 0 ent6 1 2828 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T1 U78CB.001.WZS007Y-P1-C10-T1 13U78CB.001.WZS007Y-P1-C10-T1 en0 9.1.2.4 255.255.255.0 Disconnected disabled ent5 false 2828 disabled 8192 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2828 false false 0 ent4 1 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D03 4091 false 1001 true 0 ent6 1 true en5 9.1.2.4 255.255.255.0 Active 105a9dd36a17958199 true 0 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/lpar_sections.txt0000664000175000017500000000776613571367171023021 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # SECTION:shared_procs false 1.2021.2021.2020128 uncapped END OF SECTION # SECTION:ded_procs 222 true sre idle proces END OF SECTION # # SECTION:lpar_1 64 102410241024 the_name 222truesre idle proces OS400 END OF SECTION pypowervm-1.1.24/pypowervm/tests/data/upload_volgrp.txt0000664000175000017500000001561313571367171023017 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh upload_volgrp.txt # #################################################### INFO{ {'comment': 'Used for volume group testing.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/14B854F7-42CE-4FF0-BD57-1D117054E701/VolumeGroup/b6bdbf1f-eddf-3c81-8801-9859eb6fedcb'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ b6bdbf1f-eddf-3c81-8801-9859eb6fedcb VolumeGroup 2015-01-07T23:10:27.037Z IBM Power Systems Management Console 513757221 b6bdbf1f-eddf-3c81-8801-9859eb6fedcb 1420672227030 1045 1045 1063 image_pool 00f8d6de00004b000000014a54555cd9 1024 blank_media1 0eblank_media1 rw 0.0977 blank_media_2 0eblank_media_2 rw 0.0488 bob_iso 0ebob_iso rw 0.000000 VMLibrary 11 SAS RAID 0 Disk Array U78C9.001.WZS0095-P1-C14-R1-L405D828300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1RDgyODMwMDAwMDAwMDQw false 1089592 hdisk1 active 391BIBMIPR-0 5D8283000000004010IPR-0 5D82830003IBMsas false 0400f8d6de00004b000000014a54555cd9 1 None test 0300f8d6de00004b000000014a54555cd9.1 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_httperror.txt0000664000175000017500000000503013571367171023151 0ustar neoneo00000000000000#################################################### INFO{ {'comment': 'User for HttpErrorResponse', 'status': '500', 'pw': 'abc123', 'reason': 'Internal Server Error', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/uom/SharedStoragePool/3dc777cb-e7b1-3f3d-b904-b5e34b47c6fe?group=None' } END OF SECTION} HEADERS{ {'content-length': '1746', 'x-transactionrecord-uuid': 'f3d8e93a-3ca5-4308-9c38-2ffb9529002e', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000IpIgP5r5Z7GMlTUhrOhOyF4:7e93a4c1-31cf-47c7-a2e1-3ba43f7626f9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'connection': 'Close', 'x-transaction-id': 'XT10140897', 'cache-control': 'no-cache="set-cookie, set-cookie2"', 'date': 'Fri, 17 Apr 2015 23:51:45 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 1a14745f-ebfd-4d4c-b22b-e6a6ab33e777 HttpErrorResponse 2015-04-17T23:51:46.756Z IBM Power Systems Management Console 500 /rest/api/uom/SharedStoragePool/3dc777cb-e7b1-3f3d-b904-b5e34b47c6fe Unknown internal error. Unexpected error occurred while fetching Cluster/SSP information : 9999-99Z*2125D4A/1 : Unable to send command to VIOS at this moment. VIOS 1*9999-99Z*2125D4A is busy processing some other request. Please retry the operation after sometime. {If-None-Match=-1208061982, X-Audit-Memento=root, User-Agent=python-requests/2.5.3 CPython/2.7.6 Linux/3.13.0-49-generic, Accept=application/atom+xml, Accept-Encoding=gzip, deflate, Host=9.1.2.3:12443, Connection=keep-alive, X-API-Session=*******, X-Transaction-ID=XT99999999} END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/vios_pcm_data.txt0000664000175000017500000002257113571367171022753 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/pcm/ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/RawMetrics/LongTermMonitor/LTM_8247-22L*2125D4A_vios_1_20150527T081730+0000.json'} END OF SECTION} HEADERS{ {'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ { "systemUtil": { "utilInfo": { "version": "1.0.0", "metricType": "Raw", "monitoringType": "LTM", "mtms": "8247-22L*2125D4A" }, "utilSample": { "timeStamp": "2015-05-27T00:22:00+0000", "viosUtil": [ { "id": "1", "name": "IOServer - SN2125D4A", "memory": { "utilizedMem": 1715 }, "network": { "genericAdapters": [ { "id": "ent4", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C2-T1", "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "receivedBytes": 0, "sentBytes": 0 }, { "id": "ent0", "type": "physical", "physicalLocation": "U78CB.001.WZS007Y-P1-C10-T1", "receivedPackets": 1703083, "sentPackets": 65801, "droppedPackets": 0, "receivedBytes": 187004823, "sentBytes": 71198950 }, { "id": "ent1", "type": "physical", "physicalLocation": "U78CB.001.WZS007Y-P1-C10-T2", "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "receivedBytes": 0, "sentBytes": 0 }, { "id": "ent2", "type": "physical", "physicalLocation": "U78CB.001.WZS007Y-P1-C10-T3", "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "receivedBytes": 0, "sentBytes": 0 }, { "id": "ent3", "type": "physical", "physicalLocation": "U78CB.001.WZS007Y-P1-C10-T4", "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "receivedBytes": 0, "sentBytes": 0 }, { "id": "ent5", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C12-T1", "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "receivedBytes": 0, "sentBytes": 0 } ], "sharedAdapters": [ { "id": "ent6", "type": "sea", "physicalLocation": "U8247.22L.2125D4A-V1-C12-T1", "receivedPackets": 0, "sentPackets": 0, "droppedPackets": 0, "receivedBytes": 0, "sentBytes": 0, "bridgedAdapters": [ "ent3", "ent5" ] } ] }, "storage": { "fiberChannelAdapters": [ { "id": "fcs0", "wwpn": "21000024ff649104", "physicalLocation": "U78CB.001.WZS007Y-P1-C3-T1", "numOfReads": 0, "numOfWrites": 0, "readBytes": 0, "writeBytes": 0, "runningSpeed": 8 }, { "id": "fcs1", "wwpn": "21000024ff649105", "physicalLocation": "U78CB.001.WZS007Y-P1-C3-T2", "numOfReads": 15989, "numOfWrites": 11007, "readBytes": 349011722, "writeBytes": 11252240, "runningSpeed": 8, "ports": [ { "id": "vfc1", "wwpn": "21000024ff649159", "numOfReads": 1234, "numOfWrites": 1235, "readBytes": 184184, "writeBytes": 138523, "runningSpeed": 8, "physicalLocation": "U78CB.001.WZS007Y-P1-C3-T2000" } ] }, { "id": "fcs2", "wwpn": "21000024ff649106", "physicalLocation": "U78CB.001.WZS007Y-P1-C3-T3", "numOfReads": 0, "numOfWrites": 0, "readBytes": 0, "writeBytes": 0, "runningSpeed": 0, "ports": [] }, { "id": "fcs3", "wwpn": "21000024ff649107", "physicalLocation": "U78CB.001.WZS007Y-P1-C3-T4", "numOfReads": 0, "numOfWrites": 0, "readBytes": 0, "writeBytes": 0, "runningSpeed": 0, "ports": [] } ], "genericPhysicalAdapters": [ { "id": "sissas0", "type": "sas", "physicalLocation": "U78CB.001.WZS007Y-P1-C14-T1", "numOfReads": 1089692, "numOfWrites": 1288936, "readBytes": 557922304, "writeBytes": 659935232 } ], "genericVirtualAdapters": [ { "id": "vhost5", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C7", "numOfReads": 0, "numOfWrites": 1, "readBytes": 0, "writeBytes": 512 }, { "id": "vhost6", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C8", "numOfReads": 0, "numOfWrites": 1, "readBytes": 0, "writeBytes": 512 }, { "id": "vhost4", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C6", "numOfReads": 0, "numOfWrites": 1, "readBytes": 0, "writeBytes": 512 }, { "id": "vhost7", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C9", "numOfReads": 0, "numOfWrites": 1, "readBytes": 0, "writeBytes": 512 }, { "id": "vhost8", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C10", "numOfReads": 0, "numOfWrites": 1, "readBytes": 0, "writeBytes": 512 }, { "id": "vhost1", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C3", "numOfReads": 0, "numOfWrites": 1, "readBytes": 0, "writeBytes": 512 }, { "id": "vhost3", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C5", "numOfReads": 0, "numOfWrites": 1, "readBytes": 0, "writeBytes": 512 }, { "id": "vhost2", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C4", "numOfReads": 0, "numOfWrites": 1, "readBytes": 0, "writeBytes": 512 }, { "id": "vhost9", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C11", "numOfReads": 0, "numOfWrites": 1, "readBytes": 0, "writeBytes": 512 }, { "id": "vhost0", "type": "virtual", "physicalLocation": "U8247.22L.2125D4A-V1-C1000", "numOfReads": 1074, "numOfWrites": 1075, "readBytes": 549888, "writeBytes": 550400 } ], "sharedStoragePools": [ { "id": "ssp1", "poolDisks": ["sissas0"], "numOfReads": 12346, "numOfWrites": 17542, "totalSpace": 18352435, "usedSpace": 123452, "readBytes": 123825, "writeBytes": 375322 } ] } } ], "status": 0, "errorInfo": [] } } } END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/job_request_power_off.txt0000664000175000017500000000343513571367171024531 0ustar neoneo00000000000000INFO{ {'comment': 'For power on-off testing', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/20414ABB-D6F0-4B3D-BB46-3822240BC4E9'} END OF SECTION} HEADERS{ {'content-length': '20079', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000gMeCzUqIZcs3oxLu4apVINO:a034238f-7921-42e3-862d-89cae58dc68a; Path=/; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'cache-control': 'no-cache="set-cookie, set-cookie2"', 'date': 'Tue, 30 Jul 2013 14:43:59 GMT', 'content-type': 'application/xml'} END OF SECTION} BODY{ b4c1df63-ad28-444b-a63e-8c5b4946b8bb JobRequest 2013-08-02T11:18:44.030-04:00 IBM Power Systems Management Console PowerOff LogicalPartition END OF SECTION}pypowervm-1.1.24/pypowervm/tests/data/fake_vios_feed2.txt0000664000175000017500000021720513571367171023156 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_vios_feed.txt # #################################################### INFO{ {'comment': 'Use for vios feed testing', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/VirtualIOServer'} END OF SECTION} HEADERS{ {'x-powered-by': 'Servlet/3.0', 'transfer-encoding': 'chunked', 'set-cookie': 'JSESSIONID=0000N4dQTtVs6iVW1jRpJ54Q6F7:87025216-ad22-4a32-8e2c-3194816a5355; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 08 Jan 2014 17:05:32 GMT', 'etag': '1775366259', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 08 Jan 2014 17:05:31 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ c7da8bab-8703-317d-91c8-cfd57cac2edb 2015-02-27T05:33:32.295Z IBM Power Systems Management Console 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 VirtualIOServer 2015-02-27T05:33:33.784Z IBM Power Systems Management Console 881130005 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 1425015213565 false 191 POWER7 Disabled false false false false false false normal 2125D4A1 VIOS 2.2.4.0 true true true true true 1 2000 false RAID Controller U78CB.001.WZS007Y 842 260 842 1023 4116 2 4116 4116 false false false false false false false false false false 553844757 RAID Controller U78CB.001.WZS007Y-P1-C14 U78CB.001.WZS007Y-P1-C14 C14 842 true 553844757 U78CB.001.WZS007Y-P1-C14 C14 false Universal Serial Bus UHC Spec U78CB.001.WZS007Y 33345 3075 33345 1202 4172 2 4172 4116 false false false false false false false false false false 553713691 Universal Serial Bus UHC Spec U78CB.001.WZS007Y-P1-T2 U78CB.001.WZS007Y-P1-T2 T2 33345 true 553713691 U78CB.001.WZS007Y-P1-T2 T2 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78CB.001.WZS007Y 5719 512 5719 1056 5348 1 5348 4116 false false false false false false false false false false 553910302 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78CB.001.WZS007Y-P1-C10 U78CB.001.WZS007Y-P1-C10 C10 5719 true 553910302 U78CB.001.WZS007Y-P1-C10 C10 false Quad 8 Gigabit Fibre Channel LP Adapter U78CB.001.WZS007Y 9522 4 9522 1054 4215 2 4215 4116 false false false false false false false false false false 553713705 Quad 8 Gigabit Fibre Channel LP Adapter U78CB.001.WZS007Y-P1-C3 U78CB.001.WZS007Y-P1-C3 C3 U78CB.001.WZS007Y-P1-C3-T1 fcs0 1aU78CB.001.WZS007Y-P1-C3-T1 21000024FF649104 63 64 U78CB.001.WZS007Y-P1-C3-T4 fcs3 1aU78CB.001.WZS007Y-P1-C3-T4 21000024FF649107 U78CB.001.WZS007Y-P1-C3-T3 fcs2 1aU78CB.001.WZS007Y-P1-C3-T3 21000024FF649106 U78CB.001.WZS007Y-P1-C3-T2 fcs1 1aU78CB.001.WZS007Y-P1-C3-T2 21000024FF649105 553713705 U78CB.001.WZS007Y-P1-C3 C3 2000 false false 4096 0.0 7 4096 4096 0.0 7 0 0 4096 4096 0 4096 false true false false 0 4096 4096 false IOServer - SN2125D4A false 0.4 4 0.4 4 0.4 4 0 255 uncapped false uncapped false 4 0.4 0.4 0.4 0 255 4 4 0.4 255 running Virtual IO Server 3443DB77-AED1-47ED-9AA5-3DB9C6CF7089 default 0 0 active 9.1.2.4 25006020324608 false false true true c_8b2bda0c_userID_config.iso 0ec_8b2bda0c_userID_config.iso rw 0.000000 d_fd4626ae_userID_config.iso 0ed_fd4626ae_userID_config.iso rw 0.000000 inst1_140e4f56_kyleh_config.iso 0einst1_140e4f56_kyleh_config.iso rw 0.000000 ubuntu1410 0eubuntu1410 rw 0.5449 VMLibrary 1 true SAS RAID 0 Disk Array U78CB.001.WZS007Y-P1-C14-T1-L405DB60300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1REI2MDMwMDAwMDAwMDQw false 270648 hdisk1 active 391BIBMIPR-0 5DB603000000004010IPR-0 5DB6030003IBMsas false SAS RAID 0 Disk Array U78CB.001.WZS007Y-P1-C14-T1-L205DB60300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1REI2MDMwMDAwMDAwMDIw false 270648 hdisk0 active 391BIBMIPR-0 5DB603000000002010IPR-0 5DB6030003IBMsas false 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T1 U78CB.001.WZS007Y-P1-C10-T1 13U78CB.001.WZS007Y-P1-C10-T1 en0 9.1.2.4 255.255.248.0 Disconnected disabled ent5 false 2227 disabled 8192 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 en5 9.1.2.4 255.255.255.0 Active 105a9dd36a17958199 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 true Client U7895.43X.21EF9FB-V15-C4 U7895.43X.21EF9FB-V15-C4 15 false false 4 2 20 c05076079cff0fa0 c05076079cff0fa1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 21000024FF649104 63 64 Server U7895.43X.21EF9FB-V2-C20 U7895.43X.21EF9FB-V2-C20 2 false true 20 vfchost10 15 4 1dU7895.43X.21EF9FB-V2-C20 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 21000024FF649104 26 64 Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 Ubuntu1410 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 60 None Ubuntu1410 0300025d4a00007a000000014b36d9deaf.1 0x8200000000000000 vtscsi0 09bddd1603b373bbf Server U8247.22L.2125D4A-V1-C4 U8247.22L.2125D4A-V1-C4 1 false true 4 3 2 Server U8247.22L.2125D4A-V1-C5 U8247.22L.2125D4A-V1-C5 1 false true 5 3 2 Server U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 1 false true 3 3 3 Server U8247.22L.2125D4A-V1-C7 U8247.22L.2125D4A-V1-C7 1 false true 7 3 2 Server U8247.22L.2125D4A-V1-C6 U8247.22L.2125D4A-V1-C6 1 false true 6 3 3 Client U8247.22L.2125D4A-V4-C2 U8247.22L.2125D4A-V4-C2 4 false true 2 1 9 U8247.22L.2125D4A-V1-C9 Server U8247.22L.2125D4A-V1-C9 U8247.22L.2125D4A-V1-C9 1 false true 9 vhost1 boot_140e4f56 4 2 U8247.22L.2125D4A-V4-C2 1eU8247.22L.2125D4A-V1-C9 1 None boot_140e4f56 0300025d4a00007a000000014b36d9deaf.2 0x8100000000000000 vtscsi1 098f611b2840d229cf Client U8247.22L.2125D4A-V4-C3 U8247.22L.2125D4A-V4-C3 4 false true 3 1 10 U8247.22L.2125D4A-V1-C10 Server U8247.22L.2125D4A-V1-C10 U8247.22L.2125D4A-V1-C10 1 false true 10 vhost2 inst1_140e4f56_kyleh_config.iso 4 3 U8247.22L.2125D4A-V4-C3 1eU8247.22L.2125D4A-V1-C10 inst1_140e4f56_kyleh_config.iso 0einst1_140e4f56_kyleh_config.iso rw 0.000000 0x8100000000000000 vtopt1 195e4efef939d84e90 Client U8247.22L.2125D4A-V2-C3 U8247.22L.2125D4A-V2-C3 2 true true 3 1 1000 U8247.22L.2125D4A-V1-C1000 Server U8247.22L.2125D4A-V1-C1000 U8247.22L.2125D4A-V1-C1000 1 false true 1000 vhost0 ubuntu1410 2 3 U8247.22L.2125D4A-V2-C3 1eU8247.22L.2125D4A-V1-C1000 ubuntu1410 0eubuntu1410 rw 0.5449 0x8100000000000000 vtopt0 197993aace0a82198c 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T4 U78CB.001.WZS007Y-P1-C10-T4 13U78CB.001.WZS007Y-P1-C10-T4 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T3 U78CB.001.WZS007Y-P1-C10-T3 13U78CB.001.WZS007Y-P1-C10-T3 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T2 U78CB.001.WZS007Y-P1-C10-T2 13U78CB.001.WZS007Y-P1-C10-T2 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 U78CB.001.WZS007Y-P1-C10-T4 en3 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 U78CB.001.WZS007Y-P1-C10-T3 en2 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 U78CB.001.WZS007Y-P1-C10-T2 en1 Inactive END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/get_volume_group_no_rep.txt0000664000175000017500000000647213571367171025071 0ustar neoneo00000000000000INFO{ {'comment': 'Use for media rep processor testing. Get of a volume group with no media repository', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': '/; HttpOnly'} END OF SECTION} HEADERS{ {'cache-control': 'no-cache="set-cookie, set-cookie2"', 'content-length': '16539', 'x-powered-by': 'Servlet/3.0', 'date': 'Wed, 07 Aug 2013 11:42:46 GMT', 'set-cookie': 'JSESSIONID=0000NmgK-pjcupBNwMN5_d4RMRf:537630eb-a35f-4f87-b20c-6e02059b963e; Path=/; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'content-type': 'application/xml', 'etag': '1782236641'} END OF SECTION} BODY{ 4be4d406-5626-35bf-9d68-e7611d9782c9 VolumeGroup 2013-08-15T17:12:43.998-04:00 IBM Power Systems Management Console 4be4d406-5626-35bf-9d68-e7611d9782c9 1376601163978 370 370 558 rootvg 00004c6a00007a000000013d8e2e09ae 256 1 SAS Disk Drive U78AB.001.WZSJA7T-P3-D1 NoReserve Failover false 572325 hdisk0 active 281135000039488006C7C09MBF2600RC03IBMsas false END OF SECTION}pypowervm-1.1.24/pypowervm/tests/data/cna_feed.txt0000664000175000017500000001554213571367171021667 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # #################################################### INFO{ {'comment': 'Use for Client Network Adapter Testing.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/056D8CBE-82B5-4890-B581-FC3DB3161DB3/ClientNetworkAdapter'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ fcc16de4-513d-3ea7-a321-575805b6f65e 2015-09-09T11:11:43.636-05:00 IBM Power Systems Management Console 4204d084-77a5-329c-9d3b-2f5ccffe2803 ClientNetworkAdapter 2015-09-09T11:11:43.665-05:00 IBM Power Systems Management Console -132918571 4204d084-77a5-329c-9d3b-2f5ccffe2803 0 U8247.22L.2125D0A-V2-C3 U8247.22L.2125D0A-V2-C3 2 3 2AE44C673DFA 1 true 0 1 1500 br-int iface-id=994580c7-df12-4865-b066-6e5587475b7c,iface-status=active,attached-mac=fa:6c:c2:55:0a:20,vm-uuid=8702dd36-e405-407f-bd75-224babc04da5 6a7bda96-bac7-3b96-9208-991e024fc0f9 ClientNetworkAdapter 2015-09-09T11:11:43.666-05:00 IBM Power Systems Management Console -788086984 6a7bda96-bac7-3b96-9208-991e024fc0f9 0 U8247.22L.2125D0A-V2-C6 U8247.22L.2125D0A-V2-C6 2 6 92B22C0F30BD 4094 true 1 1500 br-int iface-id=f35362c3-5213-4398-b1d8-c06dce3a50ca,iface-status=active,attached-mac=fa:a7:49:c0:1a:20,vm-uuid=abf12ade-9743-4a5a-af5e-1bc88d505886 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/job_response_completed_ok.txt0000664000175000017500000001264013571367171025354 0ustar neoneo00000000000000INFO{ {'comment': 'For power on-off testing', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/20414ABB-D6F0-4B3D-BB46-3822240BC4E9'} END OF SECTION} HEADERS{ {'content-length': '20079', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000gMeCzUqIZcs3oxLu4apVINO:a034238f-7921-42e3-862d-89cae58dc68a; Path=/; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'cache-control': 'no-cache="set-cookie, set-cookie2"', 'date': 'Tue, 30 Jul 2013 14:43:59 GMT', 'content-type': 'application/xml'} END OF SECTION} BODY{ 716224d8-afc2-4bcd-84f0-763873291dd0 JobResponse 2013-08-02T07:01:44.225-04:00 IBM Power Systems Management Console 6D459A1E-D028-4F36-83FF-207F29700FB8 1375391227297 1375441290642 1375441301998 COMPLETED_OK ActivateCurrentProfile LogicalPartition Waiting WAITING Complete COMPLETE ActivateCurrentProfile not yet started NOT STARTED ActivateCurrentProfile in progress ACTIVATECURRENTPROFILE INPROGRESS ActivateCurrentProfile Completed ACTIVATECURRENTPROFILE COMPLETED returnCode 0 END OF SECTION}pypowervm-1.1.24/pypowervm/tests/data/nbbr_network_bridge_failover.txt0000664000175000017500000005431213571367171026040 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh nbbr_network_bridge_failover.txt # #################################################### INFO{ {'comment': 'Created by thorst.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/NetworkBridge'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 0ad02639-3eb2-3c5b-807a-e933548d556a 2015-02-17T23:02:20.911Z IBM Power Systems Management Console b6a027a8-5c0b-3ac0-8547-b516f5ba6151 NetworkBridge 2015-02-17T23:02:21.590Z IBM Power Systems Management Console -43845602 b6a027a8-5c0b-3ac0-8547-b516f5ba6151 1424131786240 false false 2227 U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 4094 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D03 4094 false 1000 true 0 ent6 1 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D03 4094 false 1000 true 0 ent6 1 2227 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C10-T1 U78CB.001.WZS007Y-P1-C10-T1 13U78CB.001.WZS007Y-P1-C10-T1 en0 9.1.2.4 255.255.255.0 Disconnected disabled ent5 false 2227 disabled 8192 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D02 2227 false false 0 ent4 1 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D03 4094 false 1000 true 0 ent6 1 true en5 9.1.2.4 255.255.255.0 Active 105a9dd36a17958199 true 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78CB.001.WZS007Y-P1-C6-T1 U78CB.001.WZS007Y-P1-C6-T1 13U78CB.001.WZS007Y-P1-C6-T1 en0 9.1.2.5 255.255.255.0 Disconnected disabled ent5 false 2227 disabled 8192 true U8247.22L.2125D4A-V1-C2 U8247.22L.2125D4A-V1-C2 true true 2 ALL 16BE2AF56D04 2227 false false 0 ent4 1 U8247.22L.2125D4A-V1-C3 U8247.22L.2125D4A-V1-C3 false true 3 ALL 16BE2AF56D05 4094 false 1000 true 0 ent6 1 true en5 9.1.2.4 255.255.255.0 Active 105a9dd36a17958199 true 0 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_managedsystem.txt0000664000175000017500000005125113571367171023767 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh KYLE_MGDSYS.txt # #################################################### INFO{ {'comment': None, 'status': 200, 'pw': 'passw0rd', 'reason': 'OK', 'host': '9.0.0.0', 'user': 'user', 'path': 'ManagedSystem'} END OF SECTION} HEADERS{ {'content-length': '19970', 'x-transactionrecord-uuid': '1ced07b7-7813-4126-b509-e82fde2bec0b', 'x-powered-by': 'Servlet/3.1', 'set-cookie': 'JSESSIONID=000078gRl5DySlCnFQmpAru7Whg:27a03188-585f-448c-8c74-36ffe3f8b23a; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_3_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Mon, 10 Aug 2015 15:51:45 GMT', 'x-transaction-id': 'XT10047905', 'etag': '877584344', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Mon, 10 Aug 2015 15:51:45 GMT', 'x-mc-type': 'PVM', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 70459846-6c0f-32d2-ac61-fb26a8ed810b 2015-08-10T11:51:45.425-04:00 IBM Power Systems Management Console c889bf0d-9996-33ac-84c5-d16727083a77 ManagedSystem 2015-08-10T11:51:45.603-04:00 IBM Power Systems Management Console 877584313 c889bf0d-9996-33ac-84c5-d16727083a77 0 false true false true true false false false true true false false false true true 0 true SAS RAID Controller, PCIe2, Dual-port 6Gb 260 1023 2 842 4116 553844757 U78CB.001.WZS06S2-P1-C14 true 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 512 1056 1 5719 4116 553779220 U78CB.001.WZS06S2-P1-C12 true Quad 8 Gigabit Fibre Channel LP Adapter 4 1054 2 9522 4116 553713680 U78CB.001.WZS06S2-P1-C7 true Empty slot 0 65535 65535 255 65535 65535 553713683 U78CB.001.WZS06S2-P1-C11 true Empty slot 0 65535 65535 255 65535 65535 553844765 U78CB.001.WZS06S2-P1-C9 true Quad 8 Gigabit Fibre Channel LP Adapter 4 1054 2 9522 4116 553713688 U78CB.001.WZS06S2-P1-C6 true Empty slot 0 65535 65535 255 65535 65535 553975839 U78CB.001.WZS06S2-P1-C15 true 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 512 1056 1 5719 4116 553910302 U78CB.001.WZS06S2-P1-C10 true Universal Serial Bus UHC Spec 3075 1202 2 33345 4116 553713691 U78CB.001.WZS06S2-P1-T2 13857705832243593216 131072 87296 256 6 131072 1 256 4864 87296 256 256 10 0.6 64 64 256 64 64 64 64 10 256 0.05 0.6 default POWER6 POWER6_Plus POWER7 POWER8 8247 21L 9999999 false 200 9.0.0.0 169.254.3.147 operating Server-8247-21L-SN9999999 16 16 0 0 false false false END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vios_hosting_vios_feed.txt0000664000175000017500000031045113571367171025664 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_vios_hosting_vios_feed.txt # #################################################### INFO{ {'comment': 'Used for multiple VIO Testing', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/e7344c5b-79b5-3e73-8f64-94821424bc25/VirtualIOServer'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 349eae6a-80eb-32ce-a8a5-ee4dfed459d0 2015-03-26T08:57:47.566Z IBM Power Systems Management Console 1300C76F-9814-4A4D-B1F0-5B69352A7DEA VirtualIOServer 2015-03-26T08:57:58.480Z IBM Power Systems Management Console -1287056625 162C3903-EA57-4FF0-B100-18D2B2BB18DE 0 127 POWER8 false normal 0.0.0.0.0.0 1 1000 true PCI-E SAS Controller "2053,2054,2055,5901,5909,5911" 260 825 826 1 4116 4116 553713680 U78CA.001.RCH0007-P1-C1-C1 553713680 U78CA.001.RCH0007-P1-C1-C1 false true Empty slot 0 65535 65535 65535 255 65535 65535 553713681 U78CA.001.RCH0007-P1-C2-C1 553713681 U78CA.001.RCH0007-P1-C2-C1 false true 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 512 5719 1056 1 5348 4116 553713682 U78CA.001.RCH0007-P1-C3-C1 553713682 U78CA.001.RCH0007-P1-C3-C1 false true Ethernet controller 512 4099 73 0 5555 5555 553713683 U78CA.001.RCH0007-P1-C4-C1 553713683 U78CA.001.RCH0007-P1-C4-C1 false true PCI-E SAS Controller "2053,2054,2055,5901,5909,5911" 260 825 826 1 4116 4116 553713684 U78CA.001.RCH0007-P1-C5-C1 553713684 U78CA.001.RCH0007-P1-C5-C1 false true Empty slot 0 65535 65535 65535 255 65535 65535 553713685 U78CA.001.RCH0007-P1-C6-C1 553713685 U78CA.001.RCH0007-P1-C6-C1 false true Empty slot 0 65535 65535 65535 255 65535 65535 553713686 U78CA.001.RCH0007-P1-C7-C1 553713686 U78CA.001.RCH0007-P1-C7-C1 false true 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 512 5719 1056 1 5348 4116 553713687 U78CA.001.RCH0007-P1-C8-C1 553713687 U78CA.001.RCH0007-P1-C8-C1 false 3 Server 1 B2B7657DEE96 4094 1 1 4 Server 1 2A2416BD0E5E 1 2 1 6 Server 1 1695B06C7315 2 2 1 16 Server 1 16 100 17 Server 1 16 101 1000 8192 0.0 6 16384 256 AUTO 0.0 6 16384 8192 0 256 false 0 false novalink_106CCC7 false 0.5 4 24 24 0.05 1 0 128 uncapped false uncapped false uncapped 4 24 0.05 0.5 0 128 1 24 running AIX/Linux 162C3903-EA57-4FF0-B100-18D2B2BB18DE default inactive true Linux ppc64le time_stamp=01/13/2017 19:46:03,refcode=Linux ppc64le,word2=03300000,fru_call_out_loc_codes=#16 SMP Fri Jan 6 18:12:40 CST 2017 true true Normal 1442 true false false cfg_ovstest2_434c537f_pvm.iso 75cd8366-1280-3bdd-a92e-8bf44e3a940f ro 0.000431 cfg_ovstest_a0ea436b_pvm.iso e774973c-2ba5-38b8-8dc7-23fef9dd8e6f ro 0.000431 repo 14 false false IBM - ST9300653SS scsi-35000c500717c0dbf 286102 /dev/sda active scsi-35000c500717c0dbf IBM - ST9300653SS scsi-35000c500717c3967 286102 /dev/sdb active scsi-35000c500717c3967 IBM - ST9300653SS scsi-35000c500717c4413 286102 /dev/sdc active scsi-35000c500717c4413 IBM - ST9300653SS lvm-pv-uuid-3nMuoO-wRq5-f3tb-CwEl-p6FO-fqRl-68BAZt 286102 /dev/sdd active lvm-pv-uuid-3nMuoO-wRq5-f3tb-CwEl-p6FO-fqRl-68BAZt IBM - ST9300653SS lvm-pv-uuid-8cUCVm-0yQC-puQM-3EVX-U11A-bsGH-0WG6dj 286102 /dev/sde active lvm-pv-uuid-8cUCVm-0yQC-puQM-3EVX-U11A-bsGH-0WG6dj IBM - ST9300653SS scsi-35000c500717c0dd7 286102 /dev/sdf active scsi-35000c500717c0dd7 IBM - ST9300653SS lvm-pv-uuid-5HUlQa-NeBG-yKd9-g75w-rJiO-V7iA-eUszBM 286102 /dev/sdg active lvm-pv-uuid-5HUlQa-NeBG-yKd9-g75w-rJiO-V7iA-eUszBM IBM - ST9300653SS scsi-35000c500717a36bb 286102 /dev/sdh active scsi-35000c500717a36bb IBM - ST9300653SS scsi-35000c500717a4a63 286102 /dev/sdi active scsi-35000c500717a4a63 U9119.MME.106CCC7-V1-C3 U9119.MME.106CCC7-V1-C3 1 3 true B2B7657DEE96 4094 true 1 ibmveth3 1 U9119.MME.106CCC7-V1-C4 U9119.MME.106CCC7-V1-C4 1 4 true 2A2416BD0E5E 1 true 2 tap7f415480-0f 1 U9119.MME.106CCC7-V1-C6 U9119.MME.106CCC7-V1-C6 1 6 true 1695B06C7315 2 true 2 tap2c1630f5-86 1 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U9119.MME.106CCC7-V1-C50 U9119.MME.106CCC7-V1-C50 1 false true 50 vfchost26 100 50 1dU9119.MME.106CCC7-V1-C50 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T2 fcs3 1aU78AB.001.WZSH5ZY-P1-C5-T2 10000090FA1B6899 59 64 Client U9119.MME.106CCC7-V100-C51 U9119.MME.106CCC7-V100-C51 100 false true 51 1 51 c05076065a7c02e4 c05076065a7c02e5 Server U9119.MME.106CCC7-V1-C51 U9119.MME.106CCC7-V1-C51 1 false true 51 vfchost27 100 51 1dU9119.MME.106CCC7-V1-C51 fcs3 U78AB.001.WZSH5ZY-P1-C5-T2 fcs3 1aU78AB.001.WZSH5ZY-P1-C5-T2 10000090FA1B6899 59 64 Client U9119.MME.106CCC7-V100-C16 U9119.MME.106CCC7-V100-C16 100 16 1 16 Server U9119.MME.106CCC7-V1-C16 U9119.MME.106CCC7-V1-C16 1 16 /var/lib/pvm/fileio/vios_msp_1 100 16 1eU9119.MME.106CCC7-V1-C16 35 /var/lib/pvm/fileio/vios_msp_1 /var/lib/pvm/fileio/vios_msp_1 d9b9278c-9772-3b1e-ae4e-358002eda5bd File lun1 FILEIO-vios_msp_1 30000010-lun1 Client U9119.MME.106CCC7-V100-C16 U9119.MME.106CCC7-V100-C16 100 16 1 16 Server U9119.MME.106CCC7-V1-C16 U9119.MME.106CCC7-V1-C16 1 16 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 100 16 1eU9119.MME.106CCC7-V1-C16 1.410669 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 6d091de6-a9f1-3d1d-86d0-9bb017358784 File lun2 vios_1640E_61k_install_v2 30000010-lun2 Client U9119.MME.106CCC7-V101-C16 U9119.MME.106CCC7-V101-C16 101 16 1 17 Server U9119.MME.106CCC7-V1-C17 U9119.MME.106CCC7-V1-C17 1 17 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 101 16 1eU9119.MME.106CCC7-V1-C17 1.410669 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 6d091de6-a9f1-3d1d-86d0-9bb017358784 File lun2 vios_1640E_61k_install_v2 30000011-lun2 Server U9119.MME.106CCC7-V1-C19 U9119.MME.106CCC7-V1-C19 1 19 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 101 17 1eU9119.MME.106CCC7-V1-C18 1.410669 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 6d091de6-a9f1-3d1d-86d0-9bb017358784 File lun1 vios_1640E_61k_install_v2 30000013-lun1 Client U9119.MME.106CCC7-V102-C16 U9119.MME.106CCC7-V102-C16 102 16 1 18 Server U9119.MME.106CCC7-V1-C18 U9119.MME.106CCC7-V1-C18 1 18 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 102 16 1eU9119.MME.106CCC7-V1-C18 1.410669 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 6d091de6-a9f1-3d1d-86d0-9bb017358784 File lun2 vios_1640E_61k_install_v2 30000012-lun2 Client U9119.MME.106CCC7-V16-C16 U9119.MME.106CCC7-V16-C16 16 16 1 20 Server U9119.MME.106CCC7-V1-C20 U9119.MME.106CCC7-V1-C20 1 20 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 16 16 1eU9119.MME.106CCC7-V1-C20 1.410669 /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso /var/lib/pvm/fileio/vios_install_1640E_61k/dvdimage.v2.iso 6d091de6-a9f1-3d1d-86d0-9bb017358784 File lun1 vios_1640E_61k_install_v2 30000014-lun1 3C635443-E816-4AC0-B90A-65937AB2B9FC VirtualIOServer 2015-03-26T08:57:58.767Z IBM Power Systems Management Console 608448218 3C635443-E816-4AC0-B90A-65937AB2B9FC 0 127 POWER7 false normal null, 100 64 3 Client 100 CE204D01F1B5 4094 1 16 Client 100 16 1 64 4096 0.0 6 4096 4096 AUTO 0.0 6 4096 4096 0 4096 false 0 false vios_msp_1 false 0.5 2 0.5 2 0.5 2 0 64 uncapped false uncapped false uncapped 2 0.5 0.5 0.5 0 64 2 2 not activated Virtual IO Server 3C635443-E816-4AC0-B90A-65937AB2B9FC default inactive 192.168.128.2 false 00000000 time_stamp=01/10/2017 22:17:12,refcode=00000000 false false Normal 0 true false false true false 7D26B67A-F036-43BC-85D9-34CDF6E1AD17 VirtualIOServer 2015-03-26T08:57:58.767Z IBM Power Systems Management Console 608448218 7D26B67A-F036-43BC-85D9-34CDF6E1AD17 0 127 POWER7 false normal 0.0.0.0.0.0 101 64 16 Client 101 17 1 64 4096 0.0 6 4096 4096 AUTO 0.0 6 4096 4096 0 4096 false 0 false vios_msp_2 false 0.5 2 0.5 2 0.5 2 0 64 uncapped false uncapped false uncapped 2 0.5 0.5 0.5 0 64 2 2 not activated Virtual IO Server 7D26B67A-F036-43BC-85D9-34CDF6E1AD17 default inactive false 00000000 time_stamp=01/11/2017 19:54:29,refcode=00000000 false false System_Management_Services 0 true false false true false END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/ssp.txt0000664000175000017500000001115513571367171020744 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh ssp.txt # #################################################### INFO{ {'comment': None, 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'SharedStoragePool/e357a79a-7a3d-35b6-8405-55ab6a2d0de7'} END OF SECTION} HEADERS{ {'content-length': '3571', 'content-type': 'application/atom+xml', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000PVgSCJDHjFlKfpACJQ2Ny5Y:c8963131-fc5d-48ff-a2f4-346b019f3f2c; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_2_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Fri, 06 Feb 2015 04:43:12 GMT', 'x-transaction-id': 'XT10009019', 'etag': '-1833924453', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Fri, 06 Feb 2015 04:43:12 GMT', 'x-transactionrecord-uuid': 'd718eebb-de51-4777-a665-bc75c71e2797'} END OF SECTION} BODY{ e357a79a-7a3d-35b6-8405-55ab6a2d0de7 SharedStoragePool 2015-02-06T04:43:12.194Z IBM Power Systems Management Console -1833924453 e357a79a-7a3d-35b6-8405-55ab6a2d0de7 1423194349238 true 27cfc907d2abf511e4b2d540f2e95daf301a02b0904778d755df5a46fe25e500d8 1 VirtualIO_Disk true neolu1 false false MPIO IBM 2076 FC Disk 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAwMw== 51200 hdisk3 active true 49.88 48.98 1.234567 1 35% neossp1 24cfc907d2abf511e4b2d540f2e95daf30000000000972FB370000000054D14EB8 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vios_feed_multi.txt0000664000175000017500000250736013571367171024314 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE #################################################### INFO{ {'comment': 'Used for multiple VIO Testing', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/e7344c5b-79b5-3e73-8f64-94821424bc25/VirtualIOServer'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 349eae6a-80eb-32ce-a8a5-ee4dfed459d0 2015-03-26T08:57:47.566Z IBM Power Systems Management Console 1300C76F-9814-4A4D-B1F0-5B69352A7DEA VirtualIOServer 2015-03-26T08:57:58.480Z IBM Power Systems Management Console -1287056625 1300C76F-9814-4A4D-B1F0-5B69352A7DEA 1427357798253 false 191 0 POWER7 On false false false false false false normal 21EF9FB2 VIOS 2.2.3.1 true true true true true 2 400 false EN4054 4-port 10Gb Ethernet Adapter U78AF.001.WZS04LA 1808 512 1808 59187 6562 2 6562 4319 false false false false false false false false false false 553714209 EN4054 4-port 10Gb Ethernet Adapter U78AF.001.WZS04LA-P1-C36-L1 U78AF.001.WZS04LA-P1-C36-L1 C36 1808 553714209 U78AF.001.WZS04LA-P1-C36-L1 C36 false EN4054 4-port 10Gb Ethernet Adapter U78AF.001.WZS04LA 1808 512 1808 59187 6562 2 6562 4319 false false false false false false false false false false 553714224 EN4054 4-port 10Gb Ethernet Adapter U78AF.001.WZS04LA-P1-C36-L2 U78AF.001.WZS04LA-P1-C36-L2 C36 1808 553714224 U78AF.001.WZS04LA-P1-C36-L2 C36 false FC5052 2-port 16Gb FC Adapter U78AF.001.WZS04LA 57856 3076 57856 57858 4319 16 4319 4319 false false false false false false false false false false 553714211 FC5052 2-port 16Gb FC Adapter U78AF.001.WZS04LA-P1-C37-L1 U78AF.001.WZS04LA-P1-C37-L1 C37 U78AF.001.WZS04LA-P1-C37-L1-T2 MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C37-L1-T2-W50050768022121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwNA== false 102400 hdisk0 active 332136005076D02810187E00000000000000404214503IBMfcp true fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 U78AF.001.WZS04LA-P1-C37-L1-T1 MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C37-L1-T2-W50050768022121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwNA== false 102400 hdisk0 active 332136005076D02810187E00000000000000404214503IBMfcp true fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 553714211 U78AF.001.WZS04LA-P1-C37-L1 C37 400 false false 0 24576 0.0 7 false 24576 24576 false 0 0.0 7 0 0 24576 24576 0 24576 false true false false 0 24576 24576 false nimbus-ch03-p2-vios2 false 1 2 1 2 1 2 0 capped false capped false 2 1 1 1 0 2 2 1 running Virtual IO Server 1300C76F-9814-4A4D-B1F0-5B69352A7DEA default 0 0 active 9.1.2.5 196281908884992 false true true vopt_32ab166a7a3c420895ffa8ce23c1c08c 0evopt_32ab166a7a3c420895ffa8ce23c1c08c rw 0.000000 vopt_783ab44695b84fb6b900742e4e832362 0evopt_783ab44695b84fb6b900742e4e832362 rw 0.000000 vopt_82eabf37001e4df8b379338c9225710b 0evopt_82eabf37001e4df8b379338c9225710b rw 0.000000 vopt_89eabc793ad048e98740af17d025d480 0evopt_89eabc793ad048e98740af17d025d480 rw 0.000000 vopt_9a6b287f4e294268a5fd99d89722e78b 0evopt_9a6b287f4e294268a5fd99d89722e78b rw 0.000000 vopt_d77cf4160f044c108f6a8d111c677990 0evopt_d77cf4160f044c108f6a8d111c677990 rw 0.000000 vopt_f54cb325a723479dad385b94d0105a6c 0evopt_f54cb325a723479dad385b94d0105a6c rw 0.000000 VMLibrary 1 true MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C37-L1-T2-W50050768022121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwNA== false 102400 hdisk0 active 332136005076D02810187E00000000000000404214503IBMfcp true ent5 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent0 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C36-L1-T1 U78AF.001.WZS04LA-P1-C36-L1-T1 13U78AF.001.WZS04LA-P1-C36-L1-T1 auto ent6 false 1 disabled 8192 true U7895.43X.21EF9FB-V2-C2 U7895.43X.21EF9FB-V2-C2 true true 2 ALL B28471747602 1 false false 0 ent4 2 U7895.43X.21EF9FB-V2-C4 U7895.43X.21EF9FB-V2-C4 false true 4 ALL B28471747604 4093 false 2134 2173 true 0 ent8 2 en6 Inactive 107f291dfe86e4fb5c true U7895.43X.21EF9FB-V2-C2 U7895.43X.21EF9FB-V2-C2 true true 2 ALL B28471747602 1 false false 0 ent4 2 U7895.43X.21EF9FB-V2-C4 U7895.43X.21EF9FB-V2-C4 false true 4 ALL B28471747604 4093 false 2134 2173 true 0 ent8 2 true Client U7895.43X.21EF9FB-V63-C3 U7895.43X.21EF9FB-V63-C3 10 false true 3 2 94 c05076079cff0e56 c05076079cff0e57 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C94 U7895.43X.21EF9FB-V2-C94 2 false true 94 vfchost60 10 3 1dU7895.43X.21EF9FB-V2-C94 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V66-C4 U7895.43X.21EF9FB-V66-C4 10 false true 4 2 93 c05076079cff0e68 c05076079cff0e69 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C93 U7895.43X.21EF9FB-V2-C93 2 false true 93 vfchost63 10 4 1dU7895.43X.21EF9FB-V2-C93 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V62-C4 U7895.43X.21EF9FB-V62-C4 10 false true 4 2 92 c05076079cff0e4c c05076079cff0e4d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C92 U7895.43X.21EF9FB-V2-C92 2 false true 92 vfchost59 10 4 1dU7895.43X.21EF9FB-V2-C92 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V52-C4 U7895.43X.21EF9FB-V52-C4 52 false true 4 2 88 c05076079cff0e44 c05076079cff0e45 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C88 U7895.43X.21EF9FB-V2-C88 2 false true 88 vfchost53 52 4 1dU7895.43X.21EF9FB-V2-C88 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V61-C3 U7895.43X.21EF9FB-V61-C3 61 false true 3 2 85 c05076079cff08da c05076079cff08db U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C85 U7895.43X.21EF9FB-V2-C85 2 false true 85 vfchost49 61 3 1dU7895.43X.21EF9FB-V2-C85 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V54-C3 U7895.43X.21EF9FB-V54-C3 54 false true 3 2 75 c05076079cff0922 c05076079cff0923 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C75 U7895.43X.21EF9FB-V2-C75 2 false true 75 vfchost51 54 3 1dU7895.43X.21EF9FB-V2-C75 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V47-C4 U7895.43X.21EF9FB-V47-C4 47 false true 4 2 73 c05076079cff0858 c05076079cff0859 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C73 U7895.43X.21EF9FB-V2-C73 2 false true 73 vfchost45 47 4 1dU7895.43X.21EF9FB-V2-C73 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V43-C3 U7895.43X.21EF9FB-V43-C3 43 false true 3 2 70 c05076079cff0f9a c05076079cff0f9b U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C70 U7895.43X.21EF9FB-V2-C70 2 false true 70 vfchost40 43 3 1dU7895.43X.21EF9FB-V2-C70 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V32-C3 U7895.43X.21EF9FB-V32-C3 32 false true 3 2 68 c05076079cff0f96 c05076079cff0f97 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C68 U7895.43X.21EF9FB-V2-C68 2 false true 68 vfchost29 32 3 1dU7895.43X.21EF9FB-V2-C68 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V42-C4 U7895.43X.21EF9FB-V42-C4 42 false false 4 2 67 c05076079cff0f88 c05076079cff0f89 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C67 U7895.43X.21EF9FB-V2-C67 2 false true 67 vfchost39 42 4 1dU7895.43X.21EF9FB-V2-C67 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V44-C3 U7895.43X.21EF9FB-V44-C3 44 false true 3 2 66 c05076079cff0e0e c05076079cff0e0f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C66 U7895.43X.21EF9FB-V2-C66 2 false true 66 vfchost41 44 3 1dU7895.43X.21EF9FB-V2-C66 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V27-C3 U7895.43X.21EF9FB-V27-C3 27 false true 3 2 65 c05076079cff0e0a c05076079cff0e0b U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C65 U7895.43X.21EF9FB-V2-C65 2 false true 65 vfchost20 27 3 1dU7895.43X.21EF9FB-V2-C65 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V40-C3 U7895.43X.21EF9FB-V40-C3 40 false true 3 2 64 c05076079cff0f76 c05076079cff0f77 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C64 U7895.43X.21EF9FB-V2-C64 2 false true 64 vfchost37 40 3 1dU7895.43X.21EF9FB-V2-C64 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C63 U7895.43X.21EF9FB-V2-C63 2 false true 63 vfchost34 37 3 1dU7895.43X.21EF9FB-V2-C63 Client U7895.43X.21EF9FB-V33-C4 U7895.43X.21EF9FB-V33-C4 33 false true 4 2 62 c05076079cff0f38 c05076079cff0f39 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C62 U7895.43X.21EF9FB-V2-C62 2 false true 62 vfchost30 33 4 1dU7895.43X.21EF9FB-V2-C62 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V41-C4 U7895.43X.21EF9FB-V41-C4 41 false true 4 2 61 c05076079cff0df4 c05076079cff0df5 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C61 U7895.43X.21EF9FB-V2-C61 2 false true 61 vfchost38 41 4 1dU7895.43X.21EF9FB-V2-C61 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V51-C3 U7895.43X.21EF9FB-V51-C3 51 false true 3 2 58 c05076079cff0cf2 c05076079cff0cf3 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C58 U7895.43X.21EF9FB-V2-C58 2 false true 58 vfchost48 51 3 1dU7895.43X.21EF9FB-V2-C58 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V4-C4 U7895.43X.21EF9FB-V4-C4 4 false true 4 2 57 c05076079cff0f74 c05076079cff0f75 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C57 U7895.43X.21EF9FB-V2-C57 2 false true 57 vfchost1 4 4 1dU7895.43X.21EF9FB-V2-C57 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V39-C3 U7895.43X.21EF9FB-V39-C3 39 false false 3 2 56 c05076079cff0f6e c05076079cff0f6f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C56 U7895.43X.21EF9FB-V2-C56 2 false true 56 vfchost36 39 3 1dU7895.43X.21EF9FB-V2-C56 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C55 U7895.43X.21EF9FB-V2-C55 2 false true 55 vfchost35 38 4 1dU7895.43X.21EF9FB-V2-C55 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V36-C3 U7895.43X.21EF9FB-V36-C3 36 false false 3 2 54 c05076079cff0f26 c05076079cff0f27 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C54 U7895.43X.21EF9FB-V2-C54 2 false true 54 vfchost33 36 3 1dU7895.43X.21EF9FB-V2-C54 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V14-C4 U7895.43X.21EF9FB-V14-C4 14 false true 4 2 53 c05076079cff0eb0 c05076079cff0eb1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C53 U7895.43X.21EF9FB-V2-C53 2 false true 53 vfchost21 14 4 1dU7895.43X.21EF9FB-V2-C53 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V85-C3 U7895.43X.21EF9FB-V85-C3 85 false true 3 2 52 c05076079cff0d3e c05076079cff0d3f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C52 U7895.43X.21EF9FB-V2-C52 2 false true 52 vfchost82 85 3 1dU7895.43X.21EF9FB-V2-C52 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V35-C4 U7895.43X.21EF9FB-V35-C4 35 false true 4 2 50 c05076079cff0ddc c05076079cff0ddd U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C50 U7895.43X.21EF9FB-V2-C50 2 false true 50 vfchost31 35 4 1dU7895.43X.21EF9FB-V2-C50 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V46-C4 U7895.43X.21EF9FB-V46-C4 46 false true 4 2 140 c05076079cff0bfc c05076079cff0bfd U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C140 U7895.43X.21EF9FB-V2-C140 2 false true 140 vfchost43 46 4 1dU7895.43X.21EF9FB-V2-C140 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V7-C3 U7895.43X.21EF9FB-V7-C3 7 false true 3 2 45 c05076079cff0f92 c05076079cff0f93 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C45 U7895.43X.21EF9FB-V2-C45 2 false true 45 vfchost5 7 3 1dU7895.43X.21EF9FB-V2-C45 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V29-C3 U7895.43X.21EF9FB-V29-C3 29 false true 3 2 44 c05076079cff0f4e c05076079cff0f4f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C44 U7895.43X.21EF9FB-V2-C44 2 false true 44 vfchost25 29 3 1dU7895.43X.21EF9FB-V2-C44 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V19-C4 U7895.43X.21EF9FB-V19-C4 19 false true 4 2 43 c05076079cff0ed8 c05076079cff0ed9 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C43 U7895.43X.21EF9FB-V2-C43 2 false true 43 vfchost17 19 4 1dU7895.43X.21EF9FB-V2-C43 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V25-C4 U7895.43X.21EF9FB-V25-C4 25 false false 4 2 42 c05076079cff0f44 c05076079cff0f45 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C42 U7895.43X.21EF9FB-V2-C42 2 false true 42 vfchost23 25 4 1dU7895.43X.21EF9FB-V2-C42 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V73-C4 U7895.43X.21EF9FB-V73-C4 73 false true 4 2 135 c05076079cff0d80 c05076079cff0d81 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C135 U7895.43X.21EF9FB-V2-C135 2 false true 135 vfchost70 73 4 1dU7895.43X.21EF9FB-V2-C135 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V20-C4 U7895.43X.21EF9FB-V20-C4 20 false true 4 2 40 c05076079cff0f84 c05076079cff0f85 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C40 U7895.43X.21EF9FB-V2-C40 2 false true 40 vfchost13 20 4 1dU7895.43X.21EF9FB-V2-C40 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V18-C3 U7895.43X.21EF9FB-V18-C3 18 false true 3 2 37 c05076079cff00d8 c05076079cff00d9 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C37 U7895.43X.21EF9FB-V2-C37 2 false true 37 vfchost22 18 3 1dU7895.43X.21EF9FB-V2-C37 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V23-C3 U7895.43X.21EF9FB-V23-C3 23 false false 3 2 36 c05076079cff0f7e c05076079cff0f7f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C36 U7895.43X.21EF9FB-V2-C36 2 false true 36 vfchost15 23 3 1dU7895.43X.21EF9FB-V2-C36 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V75-C4 U7895.43X.21EF9FB-V75-C4 75 false true 4 2 130 c05076079cff0d30 c05076079cff0d31 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C130 U7895.43X.21EF9FB-V2-C130 2 false true 130 vfchost72 75 4 1dU7895.43X.21EF9FB-V2-C130 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V24-C3 U7895.43X.21EF9FB-V24-C3 24 false true 3 2 35 c05076079cff0f32 c05076079cff0f33 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C35 U7895.43X.21EF9FB-V2-C35 2 false true 35 vfchost18 24 3 1dU7895.43X.21EF9FB-V2-C35 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V5-C4 U7895.43X.21EF9FB-V5-C4 5 false true 4 2 34 c05076079cff0f7c c05076079cff0f7d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C34 U7895.43X.21EF9FB-V2-C34 2 false true 34 vfchost2 5 4 1dU7895.43X.21EF9FB-V2-C34 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V22-C3 U7895.43X.21EF9FB-V22-C3 22 false true 3 2 32 c05076079cff0dc6 c05076079cff0dc7 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C32 U7895.43X.21EF9FB-V2-C32 2 false true 32 vfchost19 22 3 1dU7895.43X.21EF9FB-V2-C32 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V21-C3 U7895.43X.21EF9FB-V21-C3 21 false false 3 2 31 c05076079cff0f2e c05076079cff0f2f U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C31 U7895.43X.21EF9FB-V2-C31 2 false true 31 vfchost16 21 3 1dU7895.43X.21EF9FB-V2-C31 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V83-C3 U7895.43X.21EF9FB-V83-C3 83 false true 3 2 125 c05076079cff0b82 c05076079cff0b83 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C125 U7895.43X.21EF9FB-V2-C125 2 false true 125 vfchost80 83 3 1dU7895.43X.21EF9FB-V2-C125 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V31-C3 U7895.43X.21EF9FB-V31-C3 31 false true 3 2 30 c05076079cff0ea6 c05076079cff0ea7 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C30 U7895.43X.21EF9FB-V2-C30 2 false true 30 vfchost27 31 3 1dU7895.43X.21EF9FB-V2-C30 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V26-C3 U7895.43X.21EF9FB-V26-C3 26 false true 3 2 29 c05076079cff0f1a c05076079cff0f1b U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C29 U7895.43X.21EF9FB-V2-C29 2 false true 29 vfchost24 26 3 1dU7895.43X.21EF9FB-V2-C29 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V80-C4 U7895.43X.21EF9FB-V80-C4 80 false true 4 2 123 c05076079cff0838 c05076079cff0839 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C123 U7895.43X.21EF9FB-V2-C123 2 false true 123 vfchost77 80 4 1dU7895.43X.21EF9FB-V2-C123 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V34-C3 U7895.43X.21EF9FB-V34-C3 34 false true 3 2 26 c05076079cff0f66 c05076079cff0f67 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C26 U7895.43X.21EF9FB-V2-C26 2 false true 26 vfchost32 34 3 1dU7895.43X.21EF9FB-V2-C26 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V11-C3 U7895.43X.21EF9FB-V11-C3 11 false false 3 2 25 c05076079cff04aa c05076079cff04ab U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C25 U7895.43X.21EF9FB-V2-C25 2 false true 25 vfchost14 11 3 1dU7895.43X.21EF9FB-V2-C25 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V10-C4 U7895.43X.21EF9FB-V10-C4 10 false true 4 2 24 c05076079cff0d90 c05076079cff0d91 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C24 U7895.43X.21EF9FB-V2-C24 2 false true 24 vfchost7 10 4 1dU7895.43X.21EF9FB-V2-C24 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V30-C3 U7895.43X.21EF9FB-V30-C3 30 false true 3 2 22 c05076079cff04fe c05076079cff04ff U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C22 U7895.43X.21EF9FB-V2-C22 2 false true 22 vfchost28 30 3 1dU7895.43X.21EF9FB-V2-C22 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V15-C4 U7895.43X.21EF9FB-V15-C4 15 false false 4 2 20 c05076079cff0fa0 c05076079cff0fa1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C20 U7895.43X.21EF9FB-V2-C20 2 false true 20 vfchost10 15 4 1dU7895.43X.21EF9FB-V2-C20 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V9-C4 U7895.43X.21EF9FB-V9-C4 9 false true 4 2 19 c05076079cff0db8 c05076079cff0db9 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C19 U7895.43X.21EF9FB-V2-C19 2 false true 19 vfchost8 9 4 1dU7895.43X.21EF9FB-V2-C19 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V74-C4 U7895.43X.21EF9FB-V74-C4 74 false true 4 2 113 c05076079cff0e84 c05076079cff0e85 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C113 U7895.43X.21EF9FB-V2-C113 2 false true 113 vfchost71 74 4 1dU7895.43X.21EF9FB-V2-C113 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V17-C4 U7895.43X.21EF9FB-V17-C4 17 false false 4 2 18 c05076079cff0f3c c05076079cff0f3d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C18 U7895.43X.21EF9FB-V2-C18 2 false true 18 vfchost12 17 4 1dU7895.43X.21EF9FB-V2-C18 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V13-C4 U7895.43X.21EF9FB-V13-C4 13 false true 4 2 16 c05076079cff0d7c c05076079cff0d7d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C16 U7895.43X.21EF9FB-V2-C16 2 false true 16 vfchost9 13 4 1dU7895.43X.21EF9FB-V2-C16 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V6-C4 U7895.43X.21EF9FB-V6-C4 6 false true 4 2 14 c05076079cff0f90 c05076079cff0f91 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C14 U7895.43X.21EF9FB-V2-C14 2 false true 14 vfchost4 6 4 1dU7895.43X.21EF9FB-V2-C14 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V68-C4 U7895.43X.21EF9FB-V68-C4 68 false true 4 2 108 c05076079cff0c6c c05076079cff0c6d U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C108 U7895.43X.21EF9FB-V2-C108 2 false true 108 vfchost65 68 4 1dU7895.43X.21EF9FB-V2-C108 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V8-C3 U7895.43X.21EF9FB-V8-C3 8 false true 3 2 12 c05076079cff0f56 c05076079cff0f57 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C12 U7895.43X.21EF9FB-V2-C12 2 false true 12 vfchost6 8 3 1dU7895.43X.21EF9FB-V2-C12 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V3-C4 U7895.43X.21EF9FB-V3-C4 3 false true 4 2 11 c05076079cff07bc c05076079cff07bd U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C11 U7895.43X.21EF9FB-V2-C11 2 false true 11 vfchost0 3 4 1dU7895.43X.21EF9FB-V2-C11 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V12-C3 U7895.43X.21EF9FB-V12-C3 12 false true 3 2 9 c05076079cff0f4a c05076079cff0f4b U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Server U7895.43X.21EF9FB-V2-C9 U7895.43X.21EF9FB-V2-C9 2 false true 9 vfchost3 12 3 1dU7895.43X.21EF9FB-V2-C9 fcs1 U78AF.001.WZS04LA-P1-C37-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C37-L1-T2 10000090FA5371F2 26 64 Client U7895.43X.21EF9FB-V53-C3 U7895.43X.21EF9FB-V53-C3 53 false false 3 2 100 c05076079cff045e c05076079cff045f U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C100 U7895.43X.21EF9FB-V2-C100 2 false true 100 vfchost50 53 3 1dU7895.43X.21EF9FB-V2-C100 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V53-C3 U7895.43X.21EF9FB-V53-C3 100 false false 3 2 100 c05076079cff045e c05076079cff045f U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Server U7895.43X.21EF9FB-V2-C100 U7895.43X.21EF9FB-V2-C100 2 false true 100 vfchost50 53 3 1dU7895.43X.21EF9FB-V2-C100 fcs0 U78AF.001.WZS04LA-P1-C37-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C37-L1-T1 10000090FA5371F1 58 64 Client U7895.43X.21EF9FB-V42-C2 U7895.43X.21EF9FB-V42-C2 42 false false 2 2 46 U7895.43X.21EF9FB-V2-C46 Server U7895.43X.21EF9FB-V2-C46 U7895.43X.21EF9FB-V2-C46 2 false true 46 vhost19 42 2 U7895.43X.21EF9FB-V42-C2 1eU7895.43X.21EF9FB-V2-C46 Client U7895.43X.21EF9FB-V46-C2 U7895.43X.21EF9FB-V46-C2 46 false true 2 2 139 U7895.43X.21EF9FB-V2-C139 Server U7895.43X.21EF9FB-V2-C139 U7895.43X.21EF9FB-V2-C139 2 false true 139 vhost51 46 2 U7895.43X.21EF9FB-V46-C2 1eU7895.43X.21EF9FB-V2-C139 Client U7895.43X.21EF9FB-V62-C2 U7895.43X.21EF9FB-V62-C2 62 false true 2 2 91 U7895.43X.21EF9FB-V2-C91 Server U7895.43X.21EF9FB-V2-C91 U7895.43X.21EF9FB-V2-C91 2 false true 91 vhost31 62 2 U7895.43X.21EF9FB-V62-C2 1eU7895.43X.21EF9FB-V2-C91 Client U7895.43X.21EF9FB-V19-C2 U7895.43X.21EF9FB-V19-C2 19 false true 2 2 41 U7895.43X.21EF9FB-V2-C41 Server U7895.43X.21EF9FB-V2-C41 U7895.43X.21EF9FB-V2-C41 2 false true 41 vhost16 19 2 U7895.43X.21EF9FB-V19-C2 1eU7895.43X.21EF9FB-V2-C41 Client U7895.43X.21EF9FB-V75-C2 U7895.43X.21EF9FB-V75-C2 75 false true 2 2 87 U7895.43X.21EF9FB-V2-C87 Server U7895.43X.21EF9FB-V2-C87 U7895.43X.21EF9FB-V2-C87 2 false true 87 vhost40 75 2 U7895.43X.21EF9FB-V75-C2 1eU7895.43X.21EF9FB-V2-C87 Client U7895.43X.21EF9FB-V33-C2 U7895.43X.21EF9FB-V33-C2 33 false true 2 2 39 U7895.43X.21EF9FB-V2-C39 Server U7895.43X.21EF9FB-V2-C39 U7895.43X.21EF9FB-V2-C39 2 false true 39 vhost3 33 2 U7895.43X.21EF9FB-V33-C2 1eU7895.43X.21EF9FB-V2-C39 Client U7895.43X.21EF9FB-V14-C3 U7895.43X.21EF9FB-V14-C3 14 false true 3 2 38 U7895.43X.21EF9FB-V2-C38 Server U7895.43X.21EF9FB-V2-C38 U7895.43X.21EF9FB-V2-C38 2 false true 38 vhost10 14 3 U7895.43X.21EF9FB-V14-C3 1eU7895.43X.21EF9FB-V2-C38 Client U7895.43X.21EF9FB-V66-C2 U7895.43X.21EF9FB-V66-C2 66 false true 2 2 81 U7895.43X.21EF9FB-V2-C81 Server U7895.43X.21EF9FB-V2-C81 U7895.43X.21EF9FB-V2-C81 2 false true 81 vhost23 66 2 U7895.43X.21EF9FB-V66-C2 1eU7895.43X.21EF9FB-V2-C81 Client U7895.43X.21EF9FB-V5-C2 U7895.43X.21EF9FB-V5-C2 5 false true 2 2 33 U7895.43X.21EF9FB-V2-C33 Server U7895.43X.21EF9FB-V2-C33 U7895.43X.21EF9FB-V2-C33 2 false true 33 vhost7 5 2 U7895.43X.21EF9FB-V5-C2 1eU7895.43X.21EF9FB-V2-C33 Client U7895.43X.21EF9FB-V52-C3 U7895.43X.21EF9FB-V52-C3 52 false true 3 2 77 U7895.43X.21EF9FB-V2-C77 Server U7895.43X.21EF9FB-V2-C77 U7895.43X.21EF9FB-V2-C77 2 false true 77 vhost26 52 3 U7895.43X.21EF9FB-V52-C3 1eU7895.43X.21EF9FB-V2-C77 Client U7895.43X.21EF9FB-V80-C2 U7895.43X.21EF9FB-V80-C2 80 false true 2 2 122 U7895.43X.21EF9FB-V2-C122 Server U7895.43X.21EF9FB-V2-C122 U7895.43X.21EF9FB-V2-C122 2 false true 122 vhost38 80 2 U7895.43X.21EF9FB-V80-C2 1eU7895.43X.21EF9FB-V2-C122 Client U7895.43X.21EF9FB-V18-C2 U7895.43X.21EF9FB-V18-C2 18 false true 2 2 28 U7895.43X.21EF9FB-V2-C28 Server U7895.43X.21EF9FB-V2-C28 U7895.43X.21EF9FB-V2-C28 2 false true 28 vhost5 18 2 U7895.43X.21EF9FB-V18-C2 1eU7895.43X.21EF9FB-V2-C28 Client U7895.43X.21EF9FB-V25-C2 U7895.43X.21EF9FB-V25-C2 25 false false 2 2 27 U7895.43X.21EF9FB-V2-C27 Server U7895.43X.21EF9FB-V2-C27 U7895.43X.21EF9FB-V2-C27 2 false true 27 vhost11 25 2 U7895.43X.21EF9FB-V25-C2 1eU7895.43X.21EF9FB-V2-C27 Client U7895.43X.21EF9FB-V73-C2 U7895.43X.21EF9FB-V73-C2 73 false true 2 2 71 U7895.43X.21EF9FB-V2-C71 Server U7895.43X.21EF9FB-V2-C71 U7895.43X.21EF9FB-V2-C71 2 false true 71 vhost47 73 2 U7895.43X.21EF9FB-V73-C2 1eU7895.43X.21EF9FB-V2-C71 Client U7895.43X.21EF9FB-V74-C2 U7895.43X.21EF9FB-V74-C2 74 false true 2 2 112 U7895.43X.21EF9FB-V2-C112 Server U7895.43X.21EF9FB-V2-C112 U7895.43X.21EF9FB-V2-C112 2 false true 112 vhost35 74 2 U7895.43X.21EF9FB-V74-C2 1eU7895.43X.21EF9FB-V2-C112 Client U7895.43X.21EF9FB-V17-C2 U7895.43X.21EF9FB-V17-C2 17 false false 2 2 17 U7895.43X.21EF9FB-V2-C17 Server U7895.43X.21EF9FB-V2-C17 U7895.43X.21EF9FB-V2-C17 2 false true 17 vhost4 17 2 U7895.43X.21EF9FB-V17-C2 1eU7895.43X.21EF9FB-V2-C17 Client U7895.43X.21EF9FB-V10-C2 U7895.43X.21EF9FB-V10-C2 10 false true 2 2 15 U7895.43X.21EF9FB-V2-C15 Server U7895.43X.21EF9FB-V2-C15 U7895.43X.21EF9FB-V2-C15 2 false true 15 vhost6 10 2 U7895.43X.21EF9FB-V10-C2 1eU7895.43X.21EF9FB-V2-C15 Client U7895.43X.21EF9FB-V47-C2 U7895.43X.21EF9FB-V47-C2 47 false true 2 2 60 U7895.43X.21EF9FB-V2-C60 Server U7895.43X.21EF9FB-V2-C60 U7895.43X.21EF9FB-V2-C60 2 false true 60 vhost15 47 2 U7895.43X.21EF9FB-V47-C2 1eU7895.43X.21EF9FB-V2-C60 Client U7895.43X.21EF9FB-V6-C3 U7895.43X.21EF9FB-V6-C3 6 false true 3 2 13 U7895.43X.21EF9FB-V2-C13 Server U7895.43X.21EF9FB-V2-C13 U7895.43X.21EF9FB-V2-C13 2 false true 13 vhost20 6 3 U7895.43X.21EF9FB-V6-C3 1eU7895.43X.21EF9FB-V2-C13 Client U7895.43X.21EF9FB-V41-C3 U7895.43X.21EF9FB-V41-C3 41 false true 3 2 59 U7895.43X.21EF9FB-V2-C59 Server U7895.43X.21EF9FB-V2-C59 U7895.43X.21EF9FB-V2-C59 2 false true 59 vhost17 41 3 U7895.43X.21EF9FB-V41-C3 1eU7895.43X.21EF9FB-V2-C59 Client U7895.43X.21EF9FB-V68-C2 U7895.43X.21EF9FB-V68-C2 68 false true 2 2 105 U7895.43X.21EF9FB-V2-C105 Server U7895.43X.21EF9FB-V2-C105 U7895.43X.21EF9FB-V2-C105 2 false true 105 vhost27 68 2 U7895.43X.21EF9FB-V68-C2 1eU7895.43X.21EF9FB-V2-C105 Client U7895.43X.21EF9FB-V13-C2 U7895.43X.21EF9FB-V13-C2 13 false true 2 2 10 U7895.43X.21EF9FB-V2-C10 Server U7895.43X.21EF9FB-V2-C10 U7895.43X.21EF9FB-V2-C10 2 false true 10 vhost1 13 2 U7895.43X.21EF9FB-V13-C2 1eU7895.43X.21EF9FB-V2-C10 Client U7895.43X.21EF9FB-V9-C2 U7895.43X.21EF9FB-V9-C2 9 false true 2 2 8 U7895.43X.21EF9FB-V2-C8 Server U7895.43X.21EF9FB-V2-C8 U7895.43X.21EF9FB-V2-C8 2 false true 8 vhost0 9 2 U7895.43X.21EF9FB-V9-C2 1eU7895.43X.21EF9FB-V2-C8 Client U7895.43X.21EF9FB-V20-C2 U7895.43X.21EF9FB-V20-C2 20 false true 2 2 7 U7895.43X.21EF9FB-V2-C7 Server U7895.43X.21EF9FB-V2-C7 U7895.43X.21EF9FB-V2-C7 2 false true 7 vhost9 20 2 U7895.43X.21EF9FB-V20-C2 1eU7895.43X.21EF9FB-V2-C7 Client U7895.43X.21EF9FB-V4-C2 U7895.43X.21EF9FB-V4-C2 4 false true 2 2 6 U7895.43X.21EF9FB-V2-C6 Server U7895.43X.21EF9FB-V2-C6 U7895.43X.21EF9FB-V2-C6 2 false true 6 vhost18 4 2 U7895.43X.21EF9FB-V4-C2 1eU7895.43X.21EF9FB-V2-C6 Client U7895.43X.21EF9FB-V53-C2 U7895.43X.21EF9FB-V53-C2 53 false false 2 2 99 U7895.43X.21EF9FB-V2-C99 Server U7895.43X.21EF9FB-V2-C99 U7895.43X.21EF9FB-V2-C99 2 false true 99 vhost42 53 2 U7895.43X.21EF9FB-V53-C2 1eU7895.43X.21EF9FB-V2-C99 Client U7895.43X.21EF9FB-V3-C2 U7895.43X.21EF9FB-V3-C2 3 false true 2 2 5 U7895.43X.21EF9FB-V2-C5 Server U7895.43X.21EF9FB-V2-C5 U7895.43X.21EF9FB-V2-C5 2 false true 5 vhost2 3 2 U7895.43X.21EF9FB-V3-C2 1eU7895.43X.21EF9FB-V2-C5 Server U7895.43X.21EF9FB-V2-C51 U7895.43X.21EF9FB-V2-C51 2 false true 51 vhost13 38 2 1eU7895.43X.21EF9FB-V2-C51 Client U7895.43X.21EF9FB-V35-C2 U7895.43X.21EF9FB-V35-C2 35 false true 2 2 49 U7895.43X.21EF9FB-V2-C49 Server U7895.43X.21EF9FB-V2-C49 U7895.43X.21EF9FB-V2-C49 2 false true 49 vhost14 35 2 U7895.43X.21EF9FB-V35-C2 1eU7895.43X.21EF9FB-V2-C49 Client U7895.43X.21EF9FB-V37-C2 U7895.43X.21EF9FB-V37-C2 37 false false 2 2 21 U7895.43X.21EF9FB-V2-C21 Server U7895.43X.21EF9FB-V2-C21 U7895.43X.21EF9FB-V2-C21 2 false true 21 vhost8 37 2 U7895.43X.21EF9FB-V37-C2 1eU7895.43X.21EF9FB-V2-C21 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent3 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C36-L2-T2 U78AF.001.WZS04LA-P1-C36-L2-T2 13U78AF.001.WZS04LA-P1-C36-L2-T2 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent1 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C36-L1-T2 U78AF.001.WZS04LA-P1-C36-L1-T2 13U78AF.001.WZS04LA-P1-C36-L1-T2 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent2 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C36-L2-T1 U78AF.001.WZS04LA-P1-C36-L2-T1 13U78AF.001.WZS04LA-P1-C36-L2-T1 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent3 U78AF.001.WZS04LA-P1-C36-L2-T2 en3 Inactive 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent1 U78AF.001.WZS04LA-P1-C36-L1-T2 en1 Inactive 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent2 U78AF.001.WZS04LA-P1-C36-L2-T1 en2 9.1.2.5 255.255.255.0 Active 7DBBE705-E4C4-4458-8223-3EBE07015CA9 VirtualIOServer 2015-03-26T08:57:58.767Z IBM Power Systems Management Console 608448218 7DBBE705-E4C4-4458-8223-3EBE07015CA9 1427360278204 false 191 0 POWER7 On false false false false false false normal 21EF9FB1 VIOS 2.2.3.1 true true true true true 1 400 false Ethernet controller U78AF.001.WZS04LA 1808 512 1808 59187 6562 3 6562 4319 false false false false false false false false false false 553714177 Ethernet controller U78AF.001.WZS04LA-P1-C34-L1 U78AF.001.WZS04LA-P1-C34-L1 C34 1808 553714177 U78AF.001.WZS04LA-P1-C34-L1 C34 false Ethernet controller U78AF.001.WZS04LA 1808 512 1808 59187 6562 3 6562 4319 false false false false false false false false false false 553714192 Ethernet controller U78AF.001.WZS04LA-P1-C34-L2 U78AF.001.WZS04LA-P1-C34-L2 C34 1808 553714192 U78AF.001.WZS04LA-P1-C34-L2 C34 false FC5052 2-port 16Gb FC Adapter U78AF.001.WZS04LA 57856 3076 57856 57858 4319 16 4319 4319 false false false false false false false false false false 553714179 FC5052 2-port 16Gb FC Adapter U78AF.001.WZS04LA-P1-C35-L1 U78AF.001.WZS04LA-P1-C35-L1 C35 U78AF.001.WZS04LA-P1-C35-L1-T2 MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C35-L1-T1-W50050768021121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwMw== false 102400 hdisk0 active 332136005076D02810187E00000000000000304214503IBMfcp true fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 U78AF.001.WZS04LA-P1-C35-L1-T1 MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C35-L1-T1-W50050768021121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwMw== false 102400 hdisk0 active 332136005076D02810187E00000000000000304214503IBMfcp true fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 553714179 U78AF.001.WZS04LA-P1-C35-L1 C35 400 false false 0 24576 0.0 7 false 24576 24576 false 0 0.0 7 0 0 24576 24576 0 24576 false true false false 0 24576 24576 false nimbus-ch03-p2-vios1 false 1 2 1 2 1 2 0 capped false capped false 2 1 1 1 0 2 2 1 running Virtual IO Server 7DBBE705-E4C4-4458-8223-3EBE07015CA9 default 0 0 active 9.1.2.4 196281927536384 false true true vopt_4556d33e7f404e72b5fccc126e2038d9 0evopt_4556d33e7f404e72b5fccc126e2038d9 rw 0.000000 vopt_4bd422fb29d24b369cd93d674606d9ee 0evopt_4bd422fb29d24b369cd93d674606d9ee rw 0.000000 vopt_56c31f83256f49b8ab71810dd3bcf115 0evopt_56c31f83256f49b8ab71810dd3bcf115 rw 0.000000 vopt_8228f1bb5b6941ce8cf7947bdbbd4123 0evopt_8228f1bb5b6941ce8cf7947bdbbd4123 rw 0.000000 VMLibrary 7 true MPIO IBM 2076 FC Disk U78AF.001.WZS04LA-P1-C35-L1-T1-W50050768021121CA-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2RDAyODEwMTg3RTAwMDAwMDAwMDAwMDAwMw== false 102400 hdisk0 active 332136005076D02810187E00000000000000304214503IBMfcp true ent5 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent0 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C34-L1-T1 U78AF.001.WZS04LA-P1-C34-L1-T1 13U78AF.001.WZS04LA-P1-C34-L1-T1 auto ent6 false 1 disabled 8192 true U7895.43X.21EF9FB-V1-C2 U7895.43X.21EF9FB-V1-C2 true true 2 ALL B28472910F02 1 false false 0 ent4 1 U7895.43X.21EF9FB-V1-C4 U7895.43X.21EF9FB-V1-C4 false true 4 ALL B28472910F04 4093 false 2134 2173 true 0 ent8 1 en6 Inactive 10beee7d111da8da1f true U7895.43X.21EF9FB-V1-C2 U7895.43X.21EF9FB-V1-C2 true true 2 ALL B28472910F02 1 false false 0 ent4 1 U7895.43X.21EF9FB-V1-C4 U7895.43X.21EF9FB-V1-C4 false true 4 ALL B28472910F04 4093 false 2134 2173 true 0 ent8 1 true Client U7895.43X.21EF9FB-V63-C4 U7895.43X.21EF9FB-V63-C4 63 false true 4 1 94 c05076079cff0e58 c05076079cff0e59 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C94 U7895.43X.21EF9FB-V1-C94 1 false true 94 vfchost58 63 4 1dU7895.43X.21EF9FB-V1-C94 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V62-C3 U7895.43X.21EF9FB-V62-C3 62 false true 3 1 92 c05076079cff0e4a c05076079cff0e4b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C92 U7895.43X.21EF9FB-V1-C92 1 false true 92 vfchost57 62 3 1dU7895.43X.21EF9FB-V1-C92 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V61-C4 U7895.43X.21EF9FB-V61-C4 61 false true 4 1 91 c05076079cff08dc c05076079cff08dd U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C91 U7895.43X.21EF9FB-V1-C91 1 false true 91 vfchost49 61 4 1dU7895.43X.21EF9FB-V1-C91 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V68-C3 U7895.43X.21EF9FB-V68-C3 68 false true 3 1 85 c05076079cff0c6a c05076079cff0c6b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C85 U7895.43X.21EF9FB-V1-C85 1 false true 85 vfchost63 68 3 1dU7895.43X.21EF9FB-V1-C85 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V52-C2 U7895.43X.21EF9FB-V52-C2 52 false true 2 1 83 c05076079cff0e42 c05076079cff0e43 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C83 U7895.43X.21EF9FB-V1-C83 1 false true 83 vfchost52 52 2 1dU7895.43X.21EF9FB-V1-C83 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V31-C4 U7895.43X.21EF9FB-V31-C4 31 false true 4 1 81 c05076079cff0ea8 c05076079cff0ea9 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C81 U7895.43X.21EF9FB-V1-C81 1 false true 81 vfchost27 31 4 1dU7895.43X.21EF9FB-V1-C81 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V44-C4 U7895.43X.21EF9FB-V44-C4 44 false true 4 1 80 c05076079cff0e10 c05076079cff0e11 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C80 U7895.43X.21EF9FB-V1-C80 1 false true 80 vfchost41 44 4 1dU7895.43X.21EF9FB-V1-C80 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V47-C3 U7895.43X.21EF9FB-V47-C3 47 false true 3 1 77 c05076079cff0856 c05076079cff0857 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C77 U7895.43X.21EF9FB-V1-C77 1 false true 77 vfchost44 47 3 1dU7895.43X.21EF9FB-V1-C77 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V41-C2 U7895.43X.21EF9FB-V41-C2 41 false true 2 1 74 c05076079cff0df2 c05076079cff0df3 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C74 U7895.43X.21EF9FB-V1-C74 1 false true 74 vfchost38 41 2 1dU7895.43X.21EF9FB-V1-C74 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V51-C4 U7895.43X.21EF9FB-V51-C4 51 false true 4 1 73 c05076079cff0cf4 c05076079cff0cf5 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C73 U7895.43X.21EF9FB-V1-C73 1 false true 73 vfchost48 51 4 1dU7895.43X.21EF9FB-V1-C73 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V46-C3 U7895.43X.21EF9FB-V46-C3 46 false true 3 1 72 c05076079cff0bfa c05076079cff0bfb U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C72 U7895.43X.21EF9FB-V1-C72 1 false true 72 vfchost43 46 3 1dU7895.43X.21EF9FB-V1-C72 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V43-C4 U7895.43X.21EF9FB-V43-C4 43 false true 4 1 67 c05076079cff0f9c c05076079cff0f9d U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C67 U7895.43X.21EF9FB-V1-C67 1 false true 67 vfchost40 43 4 1dU7895.43X.21EF9FB-V1-C67 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V42-C3 U7895.43X.21EF9FB-V42-C3 42 false false 3 1 66 c05076079cff0f86 c05076079cff0f87 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C66 U7895.43X.21EF9FB-V1-C66 1 false true 66 vfchost39 42 3 1dU7895.43X.21EF9FB-V1-C66 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V23-C4 U7895.43X.21EF9FB-V23-C4 23 false false 4 1 65 c05076079cff0f80 c05076079cff0f81 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C65 U7895.43X.21EF9FB-V1-C65 1 false true 65 vfchost17 23 4 1dU7895.43X.21EF9FB-V1-C65 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V40-C4 U7895.43X.21EF9FB-V40-C4 40 false true 4 1 63 c05076079cff0f78 c05076079cff0f79 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C63 U7895.43X.21EF9FB-V1-C63 1 false true 63 vfchost37 40 4 1dU7895.43X.21EF9FB-V1-C63 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V30-C4 U7895.43X.21EF9FB-V30-C4 30 false true 4 1 62 c05076079cff0500 c05076079cff0501 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C62 U7895.43X.21EF9FB-V1-C62 1 false true 62 vfchost24 30 4 1dU7895.43X.21EF9FB-V1-C62 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V39-C4 U7895.43X.21EF9FB-V39-C4 39 false false 4 1 61 c05076079cff0f70 c05076079cff0f71 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C61 U7895.43X.21EF9FB-V1-C61 1 false true 61 vfchost36 39 4 1dU7895.43X.21EF9FB-V1-C61 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C59 U7895.43X.21EF9FB-V1-C59 1 false true 59 vfchost35 38 3 1dU7895.43X.21EF9FB-V1-C59 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V34-C4 U7895.43X.21EF9FB-V34-C4 34 false true 4 1 58 c05076079cff0f68 c05076079cff0f69 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C58 U7895.43X.21EF9FB-V1-C58 1 false true 58 vfchost32 34 4 1dU7895.43X.21EF9FB-V1-C58 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V8-C4 U7895.43X.21EF9FB-V8-C4 8 false true 4 1 57 c05076079cff0f58 c05076079cff0f59 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C57 U7895.43X.21EF9FB-V1-C57 1 false true 57 vfchost6 8 4 1dU7895.43X.21EF9FB-V1-C57 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V29-C4 U7895.43X.21EF9FB-V29-C4 29 false true 4 1 56 c05076079cff0f50 c05076079cff0f51 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C56 U7895.43X.21EF9FB-V1-C56 1 false true 56 vfchost28 29 4 1dU7895.43X.21EF9FB-V1-C56 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V73-C3 U7895.43X.21EF9FB-V73-C3 73 false true 3 1 55 c05076079cff0d7e c05076079cff0d7f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C55 U7895.43X.21EF9FB-V1-C55 1 false true 55 vfchost68 73 3 1dU7895.43X.21EF9FB-V1-C55 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V35-C3 U7895.43X.21EF9FB-V35-C3 35 false true 3 1 49 c05076079cff0dda c05076079cff0ddb U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C49 U7895.43X.21EF9FB-V1-C49 1 false true 49 vfchost31 35 3 1dU7895.43X.21EF9FB-V1-C49 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V33-C3 U7895.43X.21EF9FB-V33-C3 33 false true 3 1 48 c05076079cff0f36 c05076079cff0f37 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C48 U7895.43X.21EF9FB-V1-C48 1 false true 48 vfchost30 33 3 1dU7895.43X.21EF9FB-V1-C48 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V36-C4 U7895.43X.21EF9FB-V36-C4 36 false false 4 1 47 c05076079cff0f28 c05076079cff0f29 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C47 U7895.43X.21EF9FB-V1-C47 1 false true 47 vfchost33 36 4 1dU7895.43X.21EF9FB-V1-C47 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V11-C4 U7895.43X.21EF9FB-V11-C4 11 false false 4 1 46 c05076079cff04ac c05076079cff04ad U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C46 U7895.43X.21EF9FB-V1-C46 1 false true 46 vfchost14 11 4 1dU7895.43X.21EF9FB-V1-C46 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V12-C4 U7895.43X.21EF9FB-V12-C4 12 false true 4 1 45 c05076079cff0f4c c05076079cff0f4d U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C45 U7895.43X.21EF9FB-V1-C45 1 false true 45 vfchost3 12 4 1dU7895.43X.21EF9FB-V1-C45 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V26-C4 U7895.43X.21EF9FB-V26-C4 26 false true 4 1 42 c05076079cff0f1c c05076079cff0f1d U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C42 U7895.43X.21EF9FB-V1-C42 1 false true 42 vfchost25 26 4 1dU7895.43X.21EF9FB-V1-C42 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V32-C4 U7895.43X.21EF9FB-V32-C4 32 false true 4 1 41 c05076079cff0f98 c05076079cff0f99 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C41 U7895.43X.21EF9FB-V1-C41 1 false true 41 vfchost29 32 4 1dU7895.43X.21EF9FB-V1-C41 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V24-C4 U7895.43X.21EF9FB-V24-C4 24 false true 4 1 39 c05076079cff0f34 c05076079cff0f35 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C39 U7895.43X.21EF9FB-V1-C39 1 false true 39 vfchost18 24 4 1dU7895.43X.21EF9FB-V1-C39 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V22-C4 U7895.43X.21EF9FB-V22-C4 22 false true 4 1 36 c05076079cff0dc8 c05076079cff0dc9 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C36 U7895.43X.21EF9FB-V1-C36 1 false true 36 vfchost19 22 4 1dU7895.43X.21EF9FB-V1-C36 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V27-C4 U7895.43X.21EF9FB-V27-C4 27 false true 4 1 35 c05076079cff0e0c c05076079cff0e0d U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C35 U7895.43X.21EF9FB-V1-C35 1 false true 35 vfchost23 27 4 1dU7895.43X.21EF9FB-V1-C35 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V25-C3 U7895.43X.21EF9FB-V25-C3 25 false false 3 1 33 c05076079cff0f42 c05076079cff0f43 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C33 U7895.43X.21EF9FB-V1-C33 1 false true 33 vfchost20 25 3 1dU7895.43X.21EF9FB-V1-C33 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V18-C4 U7895.43X.21EF9FB-V18-C4 18 false true 4 1 32 c05076079cff00da c05076079cff00db U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C32 U7895.43X.21EF9FB-V1-C32 1 false true 32 vfchost22 18 4 1dU7895.43X.21EF9FB-V1-C32 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V17-C3 U7895.43X.21EF9FB-V17-C3 17 false false 3 1 31 c05076079cff0f3a c05076079cff0f3b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C31 U7895.43X.21EF9FB-V1-C31 1 false true 31 vfchost12 17 3 1dU7895.43X.21EF9FB-V1-C31 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V20-C3 U7895.43X.21EF9FB-V20-C3 20 false true 3 1 30 c05076079cff0f82 c05076079cff0f83 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C30 U7895.43X.21EF9FB-V1-C30 1 false true 30 vfchost13 20 3 1dU7895.43X.21EF9FB-V1-C30 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V14-C2 U7895.43X.21EF9FB-V14-C2 14 false true 2 1 29 c05076079cff0eae c05076079cff0eaf U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C29 U7895.43X.21EF9FB-V1-C29 1 false true 29 vfchost21 14 2 1dU7895.43X.21EF9FB-V1-C29 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V10-C3 U7895.43X.21EF9FB-V10-C3 10 false true 3 1 27 c05076079cff0d8e c05076079cff0d8f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C27 U7895.43X.21EF9FB-V1-C27 1 false true 27 vfchost7 10 3 1dU7895.43X.21EF9FB-V1-C27 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V21-C4 U7895.43X.21EF9FB-V21-C4 21 false false 4 1 25 c05076079cff0f30 c05076079cff0f31 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C25 U7895.43X.21EF9FB-V1-C25 1 false true 25 vfchost15 21 4 1dU7895.43X.21EF9FB-V1-C25 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V85-C4 U7895.43X.21EF9FB-V85-C4 85 false true 4 1 119 c05076079cff0d40 c05076079cff0d41 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C119 U7895.43X.21EF9FB-V1-C119 1 false true 119 vfchost77 85 4 1dU7895.43X.21EF9FB-V1-C119 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V80-C3 U7895.43X.21EF9FB-V80-C3 80 false true 3 1 118 c05076079cff0836 c05076079cff0837 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C118 U7895.43X.21EF9FB-V1-C118 1 false true 118 vfchost75 80 3 1dU7895.43X.21EF9FB-V1-C118 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V19-C3 U7895.43X.21EF9FB-V19-C3 19 false true 3 1 23 c05076079cff0ed6 c05076079cff0ed7 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C23 U7895.43X.21EF9FB-V1-C23 1 false true 23 vfchost16 19 3 1dU7895.43X.21EF9FB-V1-C23 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V83-C4 U7895.43X.21EF9FB-V83-C4 83 false true 4 1 116 c05076079cff0b84 c05076079cff0b85 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C116 U7895.43X.21EF9FB-V1-C116 1 false true 116 vfchost78 83 4 1dU7895.43X.21EF9FB-V1-C116 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V15-C3 U7895.43X.21EF9FB-V15-C3 15 false false 3 1 17 c05076079cff0f9e c05076079cff0f9f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C17 U7895.43X.21EF9FB-V1-C17 1 false true 17 vfchost10 15 3 1dU7895.43X.21EF9FB-V1-C17 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V9-C3 U7895.43X.21EF9FB-V9-C3 9 false true 3 1 16 c05076079cff0db6 c05076079cff0db7 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C16 U7895.43X.21EF9FB-V1-C16 1 false true 16 vfchost8 9 3 1dU7895.43X.21EF9FB-V1-C16 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V13-C3 U7895.43X.21EF9FB-V13-C3 13 false true 3 1 15 c05076079cff0d7a c05076079cff0d7b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C15 U7895.43X.21EF9FB-V1-C15 1 false true 15 vfchost9 13 3 1dU7895.43X.21EF9FB-V1-C15 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V74-C3 U7895.43X.21EF9FB-V74-C3 74 false true 3 1 109 c05076079cff0e82 c05076079cff0e83 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C109 U7895.43X.21EF9FB-V1-C109 1 false true 109 vfchost69 74 3 1dU7895.43X.21EF9FB-V1-C109 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V54-C4 U7895.43X.21EF9FB-V54-C4 54 false true 4 1 107 c05076079cff0924 c05076079cff0925 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C107 U7895.43X.21EF9FB-V1-C107 1 false true 107 vfchost50 54 4 1dU7895.43X.21EF9FB-V1-C107 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V7-C4 U7895.43X.21EF9FB-V7-C4 7 false true 4 1 12 c05076079cff0f94 c05076079cff0f95 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C12 U7895.43X.21EF9FB-V1-C12 1 false true 12 vfchost5 7 4 1dU7895.43X.21EF9FB-V1-C12 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V6-C2 U7895.43X.21EF9FB-V6-C2 6 false true 2 1 9 c05076079cff0f8e c05076079cff0f8f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C9 U7895.43X.21EF9FB-V1-C9 1 false true 9 vfchost4 6 2 1dU7895.43X.21EF9FB-V1-C9 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V75-C3 U7895.43X.21EF9FB-V75-C3 75 false true 3 1 102 c05076079cff0d2e c05076079cff0d2f U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C102 U7895.43X.21EF9FB-V1-C102 1 false true 102 vfchost70 75 3 1dU7895.43X.21EF9FB-V1-C102 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V5-C3 U7895.43X.21EF9FB-V5-C3 5 false true 3 1 7 c05076079cff0f7a c05076079cff0f7b U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C7 U7895.43X.21EF9FB-V1-C7 1 false true 7 vfchost2 5 3 1dU7895.43X.21EF9FB-V1-C7 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V4-C3 U7895.43X.21EF9FB-V4-C3 4 false true 3 1 6 c05076079cff0f72 c05076079cff0f73 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C6 U7895.43X.21EF9FB-V1-C6 1 false true 6 vfchost1 4 3 1dU7895.43X.21EF9FB-V1-C6 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V66-C3 U7895.43X.21EF9FB-V66-C3 66 false true 3 1 100 c05076079cff0e66 c05076079cff0e67 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Server U7895.43X.21EF9FB-V1-C100 U7895.43X.21EF9FB-V1-C100 1 false true 100 vfchost61 66 3 1dU7895.43X.21EF9FB-V1-C100 fcs0 U78AF.001.WZS04LA-P1-C35-L1-T1 fcs0 1aU78AF.001.WZS04LA-P1-C35-L1-T1 10000090FA537209 26 64 Client U7895.43X.21EF9FB-V3-C3 U7895.43X.21EF9FB-V3-C3 3 false true 3 1 5 c05076079cff07ba c05076079cff07bb U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C5 U7895.43X.21EF9FB-V1-C5 1 false true 5 vfchost0 3 3 1dU7895.43X.21EF9FB-V1-C5 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Server U7895.43X.21EF9FB-V1-C5 U7895.43X.21EF9FB-V1-C5 1 false true 5 vfchost0 3 3 1dU7895.43X.21EF9FB-V1-C5 fcs1 U78AF.001.WZS04LA-P1-C35-L1-T2 fcs1 1aU78AF.001.WZS04LA-P1-C35-L1-T2 10000090FA53720A 58 64 Client U7895.43X.21EF9FB-V43-C2 U7895.43X.21EF9FB-V43-C2 43 false true 2 1 44 U7895.43X.21EF9FB-V1-C44 Server U7895.43X.21EF9FB-V1-C44 U7895.43X.21EF9FB-V1-C44 1 false true 44 vhost8 43 2 U7895.43X.21EF9FB-V43-C2 1eU7895.43X.21EF9FB-V1-C44 Client U7895.43X.21EF9FB-V36-C2 U7895.43X.21EF9FB-V36-C2 36 false false 2 1 43 U7895.43X.21EF9FB-V1-C43 Server U7895.43X.21EF9FB-V1-C43 U7895.43X.21EF9FB-V1-C43 1 false true 43 vhost0 36 2 U7895.43X.21EF9FB-V36-C2 1eU7895.43X.21EF9FB-V1-C43 Client U7895.43X.21EF9FB-V54-C2 U7895.43X.21EF9FB-V54-C2 54 false true 2 1 89 U7895.43X.21EF9FB-V1-C89 Server U7895.43X.21EF9FB-V1-C89 U7895.43X.21EF9FB-V1-C89 1 false true 89 vhost37 54 2 U7895.43X.21EF9FB-V54-C2 1eU7895.43X.21EF9FB-V1-C89 Client U7895.43X.21EF9FB-V30-C2 U7895.43X.21EF9FB-V30-C2 30 false true 2 1 40 U7895.43X.21EF9FB-V1-C40 Server U7895.43X.21EF9FB-V1-C40 U7895.43X.21EF9FB-V1-C40 1 false true 40 vhost26 30 2 U7895.43X.21EF9FB-V30-C2 1eU7895.43X.21EF9FB-V1-C40 Client U7895.43X.21EF9FB-V63-C2 U7895.43X.21EF9FB-V63-C2 63 false true 2 1 86 U7895.43X.21EF9FB-V1-C86 Server U7895.43X.21EF9FB-V1-C86 U7895.43X.21EF9FB-V1-C86 1 false true 86 vhost9 63 2 U7895.43X.21EF9FB-V63-C2 1eU7895.43X.21EF9FB-V1-C86 Client U7895.43X.21EF9FB-V44-C2 U7895.43X.21EF9FB-V44-C2 44 false true 2 1 38 U7895.43X.21EF9FB-V1-C38 Server U7895.43X.21EF9FB-V1-C38 U7895.43X.21EF9FB-V1-C38 1 false true 38 vhost13 44 2 U7895.43X.21EF9FB-V44-C2 1eU7895.43X.21EF9FB-V1-C38 Client U7895.43X.21EF9FB-V26-C2 U7895.43X.21EF9FB-V26-C2 26 false true 2 1 37 U7895.43X.21EF9FB-V1-C37 Server U7895.43X.21EF9FB-V1-C37 U7895.43X.21EF9FB-V1-C37 1 false true 37 vhost6 26 2 U7895.43X.21EF9FB-V26-C2 1eU7895.43X.21EF9FB-V1-C37 Client U7895.43X.21EF9FB-V22-C2 U7895.43X.21EF9FB-V22-C2 22 false true 2 1 34 U7895.43X.21EF9FB-V1-C34 Server U7895.43X.21EF9FB-V1-C34 U7895.43X.21EF9FB-V1-C34 1 false true 34 vhost10 22 2 U7895.43X.21EF9FB-V22-C2 1eU7895.43X.21EF9FB-V1-C34 Client U7895.43X.21EF9FB-V61-C2 U7895.43X.21EF9FB-V61-C2 61 false true 2 1 78 U7895.43X.21EF9FB-V1-C78 Server U7895.43X.21EF9FB-V1-C78 U7895.43X.21EF9FB-V1-C78 1 false true 78 vhost29 61 2 U7895.43X.21EF9FB-V61-C2 1eU7895.43X.21EF9FB-V1-C78 Client U7895.43X.21EF9FB-V24-C2 U7895.43X.21EF9FB-V24-C2 24 false true 2 1 28 U7895.43X.21EF9FB-V1-C28 Server U7895.43X.21EF9FB-V1-C28 U7895.43X.21EF9FB-V1-C28 1 false true 28 vhost7 24 2 U7895.43X.21EF9FB-V24-C2 1eU7895.43X.21EF9FB-V1-C28 Client U7895.43X.21EF9FB-V51-C2 U7895.43X.21EF9FB-V51-C2 51 false true 2 1 71 U7895.43X.21EF9FB-V1-C71 Server U7895.43X.21EF9FB-V1-C71 U7895.43X.21EF9FB-V1-C71 1 false true 71 vhost18 51 2 U7895.43X.21EF9FB-V51-C2 1eU7895.43X.21EF9FB-V1-C71 Client U7895.43X.21EF9FB-V8-C2 U7895.43X.21EF9FB-V8-C2 8 false true 2 1 24 U7895.43X.21EF9FB-V1-C24 Server U7895.43X.21EF9FB-V1-C24 U7895.43X.21EF9FB-V1-C24 1 false true 24 vhost17 8 2 U7895.43X.21EF9FB-V8-C2 1eU7895.43X.21EF9FB-V1-C24 Client U7895.43X.21EF9FB-V21-C2 U7895.43X.21EF9FB-V21-C2 21 false false 2 1 22 U7895.43X.21EF9FB-V1-C22 Server U7895.43X.21EF9FB-V1-C22 U7895.43X.21EF9FB-V1-C22 1 false true 22 vhost5 21 2 U7895.43X.21EF9FB-V21-C2 1eU7895.43X.21EF9FB-V1-C22 Client U7895.43X.21EF9FB-V83-C2 U7895.43X.21EF9FB-V83-C2 83 false true 2 1 115 U7895.43X.21EF9FB-V1-C115 Server U7895.43X.21EF9FB-V1-C115 U7895.43X.21EF9FB-V1-C115 1 false true 115 vhost33 83 2 U7895.43X.21EF9FB-V83-C2 1eU7895.43X.21EF9FB-V1-C115 Client U7895.43X.21EF9FB-V11-C2 U7895.43X.21EF9FB-V11-C2 11 false false 2 1 21 U7895.43X.21EF9FB-V1-C21 Server U7895.43X.21EF9FB-V1-C21 U7895.43X.21EF9FB-V1-C21 1 false true 21 vhost12 11 2 U7895.43X.21EF9FB-V11-C2 1eU7895.43X.21EF9FB-V1-C21 Client U7895.43X.21EF9FB-V27-C2 U7895.43X.21EF9FB-V27-C2 27 false true 2 1 20 U7895.43X.21EF9FB-V1-C20 Server U7895.43X.21EF9FB-V1-C20 U7895.43X.21EF9FB-V1-C20 1 false true 20 vhost4 27 2 U7895.43X.21EF9FB-V27-C2 1eU7895.43X.21EF9FB-V1-C20 Client U7895.43X.21EF9FB-V16-C2 U7895.43X.21EF9FB-V16-C2 16 false false 2 1 19 U7895.43X.21EF9FB-V1-C19 Server U7895.43X.21EF9FB-V1-C19 U7895.43X.21EF9FB-V1-C19 1 false true 19 vhost22 16 2 U7895.43X.21EF9FB-V16-C2 1eU7895.43X.21EF9FB-V1-C19 Client U7895.43X.21EF9FB-V12-C2 U7895.43X.21EF9FB-V12-C2 12 false true 2 1 18 U7895.43X.21EF9FB-V1-C18 Server U7895.43X.21EF9FB-V1-C18 U7895.43X.21EF9FB-V1-C18 1 false true 18 vhost15 12 2 U7895.43X.21EF9FB-V12-C2 1eU7895.43X.21EF9FB-V1-C18 Client U7895.43X.21EF9FB-V85-C2 U7895.43X.21EF9FB-V85-C2 85 false true 2 1 111 U7895.43X.21EF9FB-V1-C111 Server U7895.43X.21EF9FB-V1-C111 U7895.43X.21EF9FB-V1-C111 1 false true 111 vhost30 85 2 U7895.43X.21EF9FB-V85-C2 1eU7895.43X.21EF9FB-V1-C111 Client U7895.43X.21EF9FB-V15-C2 U7895.43X.21EF9FB-V15-C2 15 false false 2 1 14 U7895.43X.21EF9FB-V1-C14 Server U7895.43X.21EF9FB-V1-C14 U7895.43X.21EF9FB-V1-C14 1 false true 14 vhost11 15 2 U7895.43X.21EF9FB-V15-C2 1eU7895.43X.21EF9FB-V1-C14 Client U7895.43X.21EF9FB-V39-C2 U7895.43X.21EF9FB-V39-C2 39 false false 2 1 60 U7895.43X.21EF9FB-V1-C60 Server U7895.43X.21EF9FB-V1-C60 U7895.43X.21EF9FB-V1-C60 1 false true 60 vhost20 39 2 U7895.43X.21EF9FB-V39-C2 1eU7895.43X.21EF9FB-V1-C60 Client U7895.43X.21EF9FB-V32-C2 U7895.43X.21EF9FB-V32-C2 32 false true 2 1 13 U7895.43X.21EF9FB-V1-C13 Server U7895.43X.21EF9FB-V1-C13 U7895.43X.21EF9FB-V1-C13 1 false true 13 vhost2 32 2 U7895.43X.21EF9FB-V32-C2 1eU7895.43X.21EF9FB-V1-C13 Client U7895.43X.21EF9FB-V40-C2 U7895.43X.21EF9FB-V40-C2 40 false true 2 1 11 U7895.43X.21EF9FB-V1-C11 Server U7895.43X.21EF9FB-V1-C11 U7895.43X.21EF9FB-V1-C11 1 false true 11 vhost3 40 2 U7895.43X.21EF9FB-V40-C2 1eU7895.43X.21EF9FB-V1-C11 Client U7895.43X.21EF9FB-V7-C2 U7895.43X.21EF9FB-V7-C2 7 false true 2 1 10 U7895.43X.21EF9FB-V1-C10 Server U7895.43X.21EF9FB-V1-C10 U7895.43X.21EF9FB-V1-C10 1 false true 10 vhost1 7 2 U7895.43X.21EF9FB-V7-C2 1eU7895.43X.21EF9FB-V1-C10 Client U7895.43X.21EF9FB-V23-C2 U7895.43X.21EF9FB-V23-C2 23 false false 2 1 8 U7895.43X.21EF9FB-V1-C8 Server U7895.43X.21EF9FB-V1-C8 U7895.43X.21EF9FB-V1-C8 1 false true 8 vhost21 23 2 U7895.43X.21EF9FB-V23-C2 1eU7895.43X.21EF9FB-V1-C8 Client U7895.43X.21EF9FB-V29-C2 U7895.43X.21EF9FB-V29-C2 29 false true 2 1 54 U7895.43X.21EF9FB-V1-C54 Server U7895.43X.21EF9FB-V1-C54 U7895.43X.21EF9FB-V1-C54 1 false true 54 vhost16 29 2 U7895.43X.21EF9FB-V29-C2 1eU7895.43X.21EF9FB-V1-C54 Client U7895.43X.21EF9FB-V31-C2 U7895.43X.21EF9FB-V31-C2 31 false true 2 1 53 U7895.43X.21EF9FB-V1-C53 Server U7895.43X.21EF9FB-V1-C53 U7895.43X.21EF9FB-V1-C53 1 false true 53 vhost34 31 2 U7895.43X.21EF9FB-V31-C2 1eU7895.43X.21EF9FB-V1-C53 Client U7895.43X.21EF9FB-V34-C2 U7895.43X.21EF9FB-V34-C2 34 false true 2 1 50 U7895.43X.21EF9FB-V1-C50 Server U7895.43X.21EF9FB-V1-C50 U7895.43X.21EF9FB-V1-C50 1 false true 50 vhost19 34 2 U7895.43X.21EF9FB-V34-C2 1eU7895.43X.21EF9FB-V1-C50 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent3 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C34-L2-T2 U78AF.001.WZS04LA-P1-C34-L2-T2 13U78AF.001.WZS04LA-P1-C34-L2-T2 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent1 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C34-L1-T2 U78AF.001.WZS04LA-P1-C34-L1-T2 13U78AF.001.WZS04LA-P1-C34-L1-T2 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent2 physicalEthernetAdpter U78AF.001.WZS04LA-P1-C34-L2-T1 U78AF.001.WZS04LA-P1-C34-L2-T1 13U78AF.001.WZS04LA-P1-C34-L2-T1 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent3 U78AF.001.WZS04LA-P1-C34-L2-T2 en3 Inactive 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent1 U78AF.001.WZS04LA-P1-C34-L1-T2 en1 Inactive 1 10GbE 4-port Mezzanine Adapter (a2191007df1033e7) ent2 U78AF.001.WZS04LA-P1-C34-L2-T1 en2 9.1.2.4 255.255.255.0 Active END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/cna_feed1.txt0000664000175000017500000002372713571367171021754 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh cna1.txt # #################################################### INFO{ {'comment': None, 'path': 'VirtualIOServer/6542B241-3BB1-4CA8-8252-2654A048D528/ClientNetworkAdapter', 'reason': 'OK', 'status': 200} END OF SECTION} HEADERS{ {'Content-Length': '8022', 'X-Powered-By': 'Servlet/3.1', 'Set-Cookie': 'JSESSIONID=0000rgIohOYesLlfdlbKTw6I52v:a17cec82-c18b-4c87-8be7-df4ca789296a; Path=/; Secure; HttpOnly', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Mon, 09 May 2016 22:41:43 GMT', 'X-Transaction-ID': 'XT11313049', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Date': 'Mon, 09 May 2016 22:41:44 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'X-TransactionRecord-Uuid': '8dd46b82-8c1f-4bc3-a06a-e41c94278ad6', 'ETag': '1096161669'} END OF SECTION} BODY{ 8ac782d6-1614-3b73-ac3b-a89a6b6b8e97 2016-05-09T18:41:43.994-04:00 IBM Power Systems Management Console 6c9eccf7-a803-3248-b0be-4576b277aa62 ClientNetworkAdapter 2016-05-09T18:41:44.003-04:00 IBM Power Systems Management Console -229464051 6c9eccf7-a803-3248-b0be-4576b277aa62 0 U8247.42L.2120FBA-V2-C3 U8247.42L.2120FBA-V2-C3 2 3 true 5E372CFD9E6D 1 2227 true 0 1 1500 br-int iface-id=994580c7-df12-4865-b066-6e5587475b7c,iface-status=active,attached-mac=fa:6c:c2:55:0a:20,vm-uuid=8702dd36-e405-407f-bd75-224babc04da5 ec77a416-5f24-37de-b7d3-71a88e804045 ClientNetworkAdapter 2016-05-09T18:41:44.004-04:00 IBM Power Systems Management Console -1027154876 ec77a416-5f24-37de-b7d3-71a88e804045 0 U8247.42L.2120FBA-V2-C4 U8247.42L.2120FBA-V2-C4 2 4 true 2A2E57A4DE9C 4094 2207 2210 true 0 1 1500 br-int iface-id=f35362c3-5213-4398-b1d8-c06dce3a50ca,iface-status=active,attached-mac=fa:a7:49:c0:1a:20,vm-uuid=abf12ade-9743-4a5a-af5e-1bc88d505886 849e6a67-0e9d-3b6e-abbf-52fd3eb3b0a9 ClientNetworkAdapter 2016-05-09T18:41:44.005-04:00 IBM Power Systems Management Console 49815581 849e6a67-0e9d-3b6e-abbf-52fd3eb3b0a9 0 U8247.42L.2120FBA-V2-C6 U8247.42L.2120FBA-V2-C6 2 6 true 3AEAC528A7E3 4094 true 1 1500 br-int iface-id=ba9d8ec3-64b2-47fe-9f50-e12ba373814c,iface-status=active,attached-mac=fa:e6:c8:3f:80:20,vm-uuid=64443c49-920d-47d7-9b78-1216845c51f5 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/token_file0000664000175000017500000000002513571367171021432 0ustar neoneo00000000000000file-based-auth-tokenpypowervm-1.1.24/pypowervm/tests/data/fake_volume_group2.txt0000664000175000017500000007762613571367171023751 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_volume_group.txt # #################################################### INFO{ {'comment': 'Used for testing test_volume_group.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/14B854F7-42CE-4FF0-BD57-1D117054E701/VolumeGroup'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 8070198f-b358-3b26-a144-1b268cf6f8d4 2015-01-22T04:15:33.346Z IBM Power Systems Management Console 1e46bbfd-73b6-3c2a-aeab-a1d3f065e92f VolumeGroup 2015-01-22T04:15:33.810Z IBM Power Systems Management Console -762288609 1e46bbfd-73b6-3c2a-aeab-a1d3f065e92f 1421883668662 1033 1033 1064 rootvg 00f8d6de00004b000000014a53505d2f 256 SAS RAID 0 Disk Array U78C9.001.WZS0095-P1-C14-R1-L205D828300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1RDgyODMwMDAwMDAwMDIw false 1089592 hdisk0 active 391BIBMIPR-0 5D8283000000002010IPR-0 5D82830003IBMsas false 0400f8d6de00004b000000014a53505d2f b6bdbf1f-eddf-3c81-8801-9859eb6fedcb VolumeGroup 2015-01-22T04:15:33.816Z IBM Power Systems Management Console 71932090 b6bdbf1f-eddf-3c81-8801-9859eb6fedcb 1421883668662 997 997 1063 image_pool 00f8d6de00004b000000014a54555cd9 1024 asdcv_3bc2b715_userID_config.iso 0easdcv_3bc2b715_userID_config.iso rw 0.000000 asdf_c3f7b1e7_userID_config.iso 0easdf_c3f7b1e7_userID_config.iso rw 0.000000 blank_media1 0eblank_media1 rw 0.0977 blank_media_2 0eblank_media_2 rw 0.0488 cirros_eea81ef4_userID_config.iso 0ecirros_eea81ef4_userID_config.iso rw 0.000000 test_aix_17cbd370_userID_config.iso 0etest_aix_17cbd370_userID_config.iso rw 0.000000 VMLibrary 11 SAS RAID 0 Disk Array U78C9.001.WZS0095-P1-C14-R1-L405D828300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1RDgyODMwMDAwMDAwMDQw false 1089592 hdisk1 active 391BIBMIPR-0 5D8283000000004010IPR-0 5D82830003IBMsas false 0400f8d6de00004b000000014a54555cd9 1 None boot_b853fd66 0300f8d6de00004b000000014a54555cd9.20 1 None boot_df4029a6 0300f8d6de00004b000000014a54555cd9.21 1 None boot_e0dd0297 0300f8d6de00004b000000014a54555cd9.22 5 None boot_17cbd370 0300f8d6de00004b000000014a54555cd9.23 1 None boot_d2d886e1 0300f8d6de00004b000000014a54555cd9.24 1 None boot_947a017f 0300f8d6de00004b000000014a54555cd9.25 1 None boot_810f7730 0300f8d6de00004b000000014a54555cd9.26 1 None boot_9f5befd2 0300f8d6de00004b000000014a54555cd9.27 1 None boot_9699a0f5 0300f8d6de00004b000000014a54555cd9.28 1 None boot_d50cd8e4 0300f8d6de00004b000000014a54555cd9.29 1 None boot_540240c8 0300f8d6de00004b000000014a54555cd9.30 1 None boot_16eea13e 0300f8d6de00004b000000014a54555cd9.31 1 None boot_37e3c8d3 0300f8d6de00004b000000014a54555cd9.32 1 None boot_f07b23e2 0300f8d6de00004b000000014a54555cd9.10 1 None boot_6ec6f2cd 0300f8d6de00004b000000014a54555cd9.33 1 None boot_9306783f 0300f8d6de00004b000000014a54555cd9.11 1 None efried0.207216 0300f8d6de00004b000000014a54555cd9.12 1 None asdf 0300f8d6de00004b000000014a54555cd9.1 1 None boot_925c5fa3 0300f8d6de00004b000000014a54555cd9.13 1 None boot_a919a184 0300f8d6de00004b000000014a54555cd9.3 10 None aix_disk1 0300f8d6de00004b000000014a54555cd9.14 10 None aix_disk2 0300f8d6de00004b000000014a54555cd9.15 2 None asdf2 0300f8d6de00004b000000014a54555cd9.4 1 None boot_30a56789 0300f8d6de00004b000000014a54555cd9.5 1 None boot_26610873 0300f8d6de00004b000000014a54555cd9.16 1 None boot_42364069 0300f8d6de00004b000000014a54555cd9.6 1 None boot_c3f7b1e7 0300f8d6de00004b000000014a54555cd9.17 1 None boot_96654a43 0300f8d6de00004b000000014a54555cd9.7 1 None boot_1ecdfefc 0300f8d6de00004b000000014a54555cd9.18 1 None boot_b2460263 0300f8d6de00004b000000014a54555cd9.19 1 None boot_bef997cd 0300f8d6de00004b000000014a54555cd9.8 1 None boot_21e1de68 0300f8d6de00004b000000014a54555cd9.9 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/pcm_pref.txt0000664000175000017500000000571113571367171021733 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystemPcmPreference'} END OF SECTION} HEADERS{ {'content-length': '2380', 'x-powered-by': 'Servlet/3.0', 'x-hmc-schema-version': 'V1_2_0', 'last-modified': 'Wed, 29 Apr 2015 04:07:37 GMT', 'etag': '-215935973', 'date': 'Wed, 29 Apr 2015 04:07:36 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 98498bed-c78a-3a4f-b90a-4b715418fcb6 Performance and Capacity Monitoring Preferences ac7580fa-861d-4ab8-b0d1-351267684aa5 2015-04-29T03:35:07.107Z Performance and Capacity Monitoring Preferences 2015-04-29T03:35:07.107Z IBM Power Systems Management Console 98498bed-c78a-3a4f-b90a-4b715418fcb6 1430278507106 dev-system-6 8247 22L 1111111 false false false false END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/lpar_ibmi.txt0000664000175000017500000002635013571367171022100 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh lpar_ibmi.txt # #################################################### INFO{ {'comment': 'Created from query of LogicalPartition', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'LogicalPartition'} END OF SECTION} HEADERS{ {'x-powered-by': 'Servlet/3.0', 'transfer-encoding': 'chunked', 'set-cookie': 'JSESSIONID=00002EyMEecWDIzdx_K0LwQNiUO:aa95eb5b-d145-4cd8-9030-8b370106cfee; Path=/; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Fri, 16 Aug 2013 09:49:34 GMT', 'etag': '959374938', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Fri, 16 Aug 2013 09:49:40 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 9fb9e6bf-5fa9-3c64-90c1-c9ff54863ffb 2013-09-09T12:04:13.919-04:00 IBM Power Systems Management Console 17294AEF-F0DE-4644-95F1-7635D3F88130 LogicalPartition 2013-09-09T12:04:24.364-04:00 IBM Power Systems Management Console 17294AEF-F0DE-4644-95F1-7635D3F88130 1433496957875 false 127 POWER7 On false true false false false true false manual 10C4DAT4 Unknown false false false false false 4 false 8 NONE 0 NONE false 8 false false 0 0.0 6 0 0 0.0 6 0 0 0 true true false false 0 0 false powervmtest1 0 0 0 true sre idle proces true sre idle proces 0 0 0 0 true not activated OS400 17294AEF-F0DE-4644-95F1-7635D3F88130 POWER7 0 0 inactive 121611008399872 false 00000000 false true false false false Not_Migrating Invalid false false true b END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/upload_file.txt0000664000175000017500000000507513571367171022426 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh upload_file.txt # #################################################### INFO{ {'comment': 'A file get', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'File/6233b070-31cc-4b57-99bd-37f80e845de9'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 6233b070-31cc-4b57-99bd-37f80e845de9 File 2015-01-23T02:53:59.301Z IBM Power Systems Management Console 6233b070-31cc-4b57-99bd-37f80e845de9 1421385014513 boot_c3f7b1e7 1421385016137 application/octet-stream 6233b070-31cc-4b57-99bd-37f80e845de9 4550656 4550656 BROKERED_DISK_IMAGE 14B854F7-42CE-4FF0-BD57-1D117054E701 0300f8d6de00004b000000014a54555cd9.17 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_volume_group_with_vio_data.txt0000664000175000017500000001272713571367171026557 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_volume_group_with_vio_data.txt # #################################################### INFO{ {'comment': 'Used for testing test_volume_group.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/3443DB77-AED1-47ED-9AA5-3DB9C6CF7089/VolumeGroup/dc08da6c-8bff-3fa8-b0d9-9ba7405aca91'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ d5065c2c-ac43-3fa6-af32-ea84a3960291 VolumeGroup 2015-01-31T02:54:28.846Z IBM Power Systems Management Console 1127525129 d5065c2c-ac43-3fa6-af32-ea84a3960291 1422654379462 235.5 235.5 264 rootvg 00025d4a00007a000000014b368de97e 256 bg_7f81628b_thorst_config.iso 0ebg_7f81628b_thorst_config.iso rw 0.000000 VMLibrary 1 SAS RAID 0 Disk Array U78CB.001.WZS007Y-P1-C14-T1-L205DB60300-L0 NoReserve Failover 01MUlCTSAgICAgSVBSLTAgICA1REI2MDMwMDAwMDAwMDIw false 270648 hdisk0 active 391BIBMIPR-0 5DB603000000002010IPR-0 5DB6030003IBMsas false 0400025d4a00007a000000014b368de97e END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/nbbr_virtual_network.txt0000664000175000017500000004404013571367171024400 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh nbbr_virtual_network.txt # #################################################### INFO{ {'comment': 'Created by thorst.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/VirtualNetwork'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 7de032cc-a903-3fe1-a6ff-10522eedf373 2015-02-17T22:59:32.400Z IBM Power Systems Management Console 6508ca79-c94c-3f73-8137-af2e9c669c61 VirtualNetwork 2015-02-17T22:59:32.508Z IBM Power Systems Management Console 297058899 6508ca79-c94c-3f73-8137-af2e9c669c61 1424132682998 ETHERNET0-1234 1234 0 true 36cf4d94-d682-3962-bc86-acad65af6fbf VirtualNetwork 2015-02-17T22:59:32.509Z IBM Power Systems Management Console -1187527277 36cf4d94-d682-3962-bc86-acad65af6fbf 1424132682998 VLAN2227-ETHERNET0 2227 0 false f6b2d900-fd4e-34fa-a2e8-2650392f724e VirtualNetwork 2015-02-17T22:59:32.510Z IBM Power Systems Management Console 2044974078 f6b2d900-fd4e-34fa-a2e8-2650392f724e 1424132682998 ETHERNET0-1 1 0 false 3b19f453-9cbf-303b-9d72-4162d50739dc VirtualNetwork 2015-02-17T22:59:32.511Z IBM Power Systems Management Console 2116190827 3b19f453-9cbf-303b-9d72-4162d50739dc 1424132682998 ETHERNET0-2 2 0 true 4cb636f5-7d15-3cf9-a14d-a10c138c98a1 VirtualNetwork 2015-02-17T22:59:32.512Z IBM Power Systems Management Console 1658194928 4cb636f5-7d15-3cf9-a14d-a10c138c98a1 1424132682998 ETHERNET0-1001 1001 0 true e6c0be9f-b974-35f4-855e-2b7192034fae VirtualNetwork 2015-02-17T22:59:32.513Z IBM Power Systems Management Console 1586976810 e6c0be9f-b974-35f4-855e-2b7192034fae 1424132844969 ETHERNET0-1000 1000 0 true 4dc25253-d47c-3774-8e36-42752bf7223b VirtualNetwork 2015-02-17T22:59:32.514Z IBM Power Systems Management Console 2005776930 4dc25253-d47c-3774-8e36-42752bf7223b 1424145231000 VLAN4093-ETHERNET0 4093 0 false 9b61c184-6878-36bf-88ea-dc4a61148316 VirtualNetwork 2015-02-17T22:59:32.515Z IBM Power Systems Management Console 1438427048 9b61c184-6878-36bf-88ea-dc4a61148316 1424153353695 VLAN4094-ETHERNET0 4094 0 false a97a5706-5894-11e5-885d-feff819cdc9f VirtualNetwork 2015-02-17T22:59:32.515Z IBM Power Systems Management Console 1438427048 a97a5706-5894-11e5-885d-feff819cdc9f 1424153353695 VLAN2828-ETHERNET0 2828 0 false bed3cc0e-5894-11e5-885d-feff819cdc9f VirtualNetwork 2015-02-17T22:59:32.515Z IBM Power Systems Management Console 1438427048 bed3cc0e-5894-11e5-885d-feff819cdc9f 1424153353695 VLAN1001-ETHERNET0 1001 0 false END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/cdata.xml0000664000175000017500000001522213571367171021173 0ustar neoneo00000000000000INFO{ {'comment': 'For power on-off testing', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/20414ABB-D6F0-4B3D-BB46-3822240BC4E9'} END OF SECTION} HEADERS{ {'content-length': '20079', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000gMeCzUqIZcs3oxLu4apVINO:a034238f-7921-42e3-862d-89cae58dc68a; Path=/; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'cache-control': 'no-cache="set-cookie, set-cookie2"', 'date': 'Tue, 30 Jul 2013 14:43:59 GMT', 'content-type': 'application/xml'} END OF SECTION} BODY{ 716224d8-afc2-4bcd-84f0-763873291dd0 JobResponse 2013-08-02T07:01:44.225-04:00 IBM Power Systems Management Console 6D459A1E-D028-4F36-83FF-207F29700FB8 1375391227297 1375441290642 1375441301998 COMPLETED_OK ActivateCurrentProfile LogicalPartition inputXML 1trueIBM6005076802810B0FD00000000000049F04214521000024FF409CD0500507680245CAC06000000000000]]> Waiting WAITING Complete COMPLETE ActivateCurrentProfile not yet started NOT STARTED ActivateCurrentProfile in progress ACTIVATECURRENTPROFILE INPROGRESS ActivateCurrentProfile Completed ACTIVATECURRENTPROFILE COMPLETED returnCode 0 inputXML 1trueIBM6005076802810B0FD00000000000049F04214521000024FF409CD0500507680245CAC06000000000000]]> END OF SECTION}pypowervm-1.1.24/pypowervm/tests/data/ltm_feed.txt0000664000175000017500000000605313571367171021717 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'RawMetrics/LongTermMonitor'} END OF SECTION} HEADERS{ {'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 98498bed-c78a-3a4f-b90a-4b715418fcb6 2015-04-30T03:53:00.000Z LongTermMetrics ManagedSystem 98498bed-c78a-3a4f-b90a-4b715418fcb6 15161241-b72f-41d5-8154-557ff699fb75 2015-04-30T03:53:00.000Z LTM_8247-22L*1111111_vios_2_20150430T035300+0000.json 2015-04-30T03:53:00.000Z IBM Power Systems Management Console cf8bf632-b702-4f4f-9029-5ffc8934e886 2015-04-30T03:53:00.000Z LTM_8247-22L*1111111_phyp_20150430T035300+0000.json 2015-04-30T03:53:00.000-05:00 IBM Power Systems Management Console aa05223a-0141-467e-86a5-4d57ede44ab8 2015-04-30T03:53:00.000Z LTM_8247-22L*1111111_lpar_20150430T035300+0000.json 2015-04-30T03:53:00.000Z IBM Power Systems Management Console END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/shrprocpool.txt0000664000175000017500000033155513571367171022522 0ustar neoneo00000000000000INFO{ {'comment': None, 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/SharedProcessorPool'} END OF SECTION} HEADERS{ {'x-powered-by': 'Servlet/3.0', 'transfer-encoding': 'chunked', 'set-cookie': 'JSESSIONID=0000xpnw0wcrfjvFxAa4Dl1ZxW0:a034238f-7921-42e3-862d-89cae58dc68a; Path=/; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 31 Jul 2013 17:26:12 GMT', 'etag': '798417269', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 31 Jul 2013 17:26:16 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 4c04f267-7490-3321-bac6-3720894b5b5e 2015-07-15T00:40:38.721Z IBM Power Systems Management Console 4b854a40-4f9a-3848-bc1f-e278b4d8ce04 SharedProcessorPool 2015-07-15T00:40:38.857Z IBM Power Systems Management Console -658595740 4b854a40-4f9a-3848-bc1f-e278b4d8ce04 1436920838754 0 0.00 DefaultPool f55a0681-aed0-321a-9df0-3968a1dfe45d SharedProcessorPool 2015-07-15T00:40:38.858Z IBM Power Systems Management Console -1688233236 f55a0681-aed0-321a-9df0-3968a1dfe45d 1436920838754 5.35 10.25 6.15 1 5.05 SharedPool01 154c50c0-04cc-3d8a-9a07-787b6f23cd6c SharedProcessorPool 2015-07-15T00:40:38.860Z IBM Power Systems Management Console -1686357706 154c50c0-04cc-3d8a-9a07-787b6f23cd6c 1436920838754 0.00 0.00 0.00 2 0.00 SharedPool02 3e07c277-47ee-3e4f-85ee-7769412c42b7 SharedProcessorPool 2015-07-15T00:40:38.861Z IBM Power Systems Management Console -1684482176 3e07c277-47ee-3e4f-85ee-7769412c42b7 1436920838755 0.00 0.00 0.00 3 0.00 SharedPool03 eb2251b7-717f-3212-92e9-8430a121fecf SharedProcessorPool 2015-07-15T00:40:38.862Z IBM Power Systems Management Console -1682606646 eb2251b7-717f-3212-92e9-8430a121fecf 1436920838755 0.00 0.00 0.00 4 0.00 SharedPool04 af8a068d-22c6-31c0-98b5-7766d2444834 SharedProcessorPool 2015-07-15T00:40:38.863Z IBM Power Systems Management Console -1680731116 af8a068d-22c6-31c0-98b5-7766d2444834 1436920838755 0.00 0.00 0.00 5 0.00 SharedPool05 076ef7be-6c50-35f9-a6df-fd78bb898ab8 SharedProcessorPool 2015-07-15T00:40:38.864Z IBM Power Systems Management Console -1678855586 076ef7be-6c50-35f9-a6df-fd78bb898ab8 1436920838755 0.00 0.00 0.00 6 0.00 SharedPool06 1c162e7a-430b-323f-90ee-f15a392fffaf SharedProcessorPool 2015-07-15T00:40:38.866Z IBM Power Systems Management Console -1676980056 1c162e7a-430b-323f-90ee-f15a392fffaf 1436920838755 0.00 0.00 0.00 7 0.00 SharedPool07 13335c11-871e-3cb4-b749-81dafb3376e9 SharedProcessorPool 2015-07-15T00:40:38.867Z IBM Power Systems Management Console -1675104526 13335c11-871e-3cb4-b749-81dafb3376e9 1436920838755 0.00 0.00 0.00 8 0.00 SharedPool08 11bcbaef-3111-3e66-8018-ad5f43a1b483 SharedProcessorPool 2015-07-15T00:40:38.869Z IBM Power Systems Management Console -1673228996 11bcbaef-3111-3e66-8018-ad5f43a1b483 1436920838755 0.00 0.00 0.00 9 0.00 SharedPool09 47b418a1-4f1e-3afa-a785-5154ade4ad3a SharedProcessorPool 2015-07-15T00:40:38.870Z IBM Power Systems Management Console -1671324717 47b418a1-4f1e-3afa-a785-5154ade4ad3a 1436920838755 0.00 0.00 0.00 10 0.00 SharedPool10 03eda1f0-0d4b-393b-bb9e-117561f96baf SharedProcessorPool 2015-07-15T00:40:38.872Z IBM Power Systems Management Console -1669449187 03eda1f0-0d4b-393b-bb9e-117561f96baf 1436920838756 0.00 0.00 0.00 11 0.00 SharedPool11 86213f38-bc25-35d4-a2ef-5dccfd35f80d SharedProcessorPool 2015-07-15T00:40:38.873Z IBM Power Systems Management Console -1667573657 86213f38-bc25-35d4-a2ef-5dccfd35f80d 1436920838756 0.00 0.00 0.00 12 0.00 SharedPool12 b20c74cc-b847-3890-a3c3-15417726689a SharedProcessorPool 2015-07-15T00:40:38.874Z IBM Power Systems Management Console -1665698127 b20c74cc-b847-3890-a3c3-15417726689a 1436920838756 0.00 0.00 0.00 13 0.00 SharedPool13 71ce2c66-63f9-388d-b2e4-537316e82e69 SharedProcessorPool 2015-07-15T00:40:38.875Z IBM Power Systems Management Console -1663822597 71ce2c66-63f9-388d-b2e4-537316e82e69 1436920838756 0.00 0.00 0.00 14 0.00 SharedPool14 0084044a-80dd-3930-9c84-5493e38ba8ea SharedProcessorPool 2015-07-15T00:40:38.876Z IBM Power Systems Management Console -1661947067 0084044a-80dd-3930-9c84-5493e38ba8ea 1436920838756 0.00 0.00 0.00 15 0.00 SharedPool15 cb326b7b-5363-3eb9-8392-5babba484182 SharedProcessorPool 2015-07-15T00:40:38.878Z IBM Power Systems Management Console -1660071537 cb326b7b-5363-3eb9-8392-5babba484182 1436920838756 0.00 0.00 0.00 16 0.00 SharedPool16 871719eb-ceee-3eba-ab4b-fcfbe65d0982 SharedProcessorPool 2015-07-15T00:40:38.879Z IBM Power Systems Management Console -1658196007 871719eb-ceee-3eba-ab4b-fcfbe65d0982 1436920838756 0.00 0.00 0.00 17 0.00 SharedPool17 fd463042-ce00-36a8-82d9-c9c11bfd2aea SharedProcessorPool 2015-07-15T00:40:38.880Z IBM Power Systems Management Console -1656320477 fd463042-ce00-36a8-82d9-c9c11bfd2aea 1436920838757 0.00 0.00 0.00 18 0.00 SharedPool18 a459141f-57b5-33d8-ab76-2d5f70551a48 SharedProcessorPool 2015-07-15T00:40:38.881Z IBM Power Systems Management Console -1654444947 a459141f-57b5-33d8-ab76-2d5f70551a48 1436920838757 0.00 0.00 0.00 19 0.00 SharedPool19 602ecf9c-370b-3d7a-a704-9d04f6101eb4 SharedProcessorPool 2015-07-15T00:40:38.883Z IBM Power Systems Management Console -1652540668 602ecf9c-370b-3d7a-a704-9d04f6101eb4 1436920838757 0.00 0.00 0.00 20 0.00 SharedPool20 d424938e-cab4-3bef-9eb3-b33480e4d4b5 SharedProcessorPool 2015-07-15T00:40:38.885Z IBM Power Systems Management Console -1650665138 d424938e-cab4-3bef-9eb3-b33480e4d4b5 1436920838757 0.00 0.00 0.00 21 0.00 SharedPool21 3913b557-8ae3-3563-9925-a2223203c7f1 SharedProcessorPool 2015-07-15T00:40:38.886Z IBM Power Systems Management Console -1648789608 3913b557-8ae3-3563-9925-a2223203c7f1 1436920838757 0.00 0.00 0.00 22 0.00 SharedPool22 0a31aa3b-80d3-3d97-9d45-c9fcd5330cd9 SharedProcessorPool 2015-07-15T00:40:38.887Z IBM Power Systems Management Console -1646914078 0a31aa3b-80d3-3d97-9d45-c9fcd5330cd9 1436920838757 0.00 0.00 0.00 23 0.00 SharedPool23 b8561240-1a90-3817-93a1-bcb371c2cd60 SharedProcessorPool 2015-07-15T00:40:38.889Z IBM Power Systems Management Console -1645038548 b8561240-1a90-3817-93a1-bcb371c2cd60 1436920838757 0.00 0.00 0.00 24 0.00 SharedPool24 a1e3bd56-2334-3379-9abd-fd26f79995de SharedProcessorPool 2015-07-15T00:40:38.890Z IBM Power Systems Management Console -1643163018 a1e3bd56-2334-3379-9abd-fd26f79995de 1436920838757 0.00 0.00 0.00 25 0.00 SharedPool25 20c49eec-6837-394b-af7a-c8848f939ac4 SharedProcessorPool 2015-07-15T00:40:38.891Z IBM Power Systems Management Console -1641287488 20c49eec-6837-394b-af7a-c8848f939ac4 1436920838758 0.00 0.00 0.00 26 0.00 SharedPool26 16d26d1f-f8f3-3a10-b26e-6c23e4c81235 SharedProcessorPool 2015-07-15T00:40:38.892Z IBM Power Systems Management Console -1639411958 16d26d1f-f8f3-3a10-b26e-6c23e4c81235 1436920838758 0.00 0.00 0.00 27 0.00 SharedPool27 4c58b210-30ba-33d8-9073-dda5d9cce4ad SharedProcessorPool 2015-07-15T00:40:38.893Z IBM Power Systems Management Console -1637536428 4c58b210-30ba-33d8-9073-dda5d9cce4ad 1436920838758 0.00 0.00 0.00 28 0.00 SharedPool28 72f6c721-d6fd-31d1-ac64-463b88f3ab1f SharedProcessorPool 2015-07-15T00:40:38.894Z IBM Power Systems Management Console -1635660898 72f6c721-d6fd-31d1-ac64-463b88f3ab1f 1436920838758 0.00 0.00 0.00 29 0.00 SharedPool29 52602416-522d-303b-b320-6970b0bc68dd SharedProcessorPool 2015-07-15T00:40:38.895Z IBM Power Systems Management Console -1633756619 52602416-522d-303b-b320-6970b0bc68dd 1436920838758 0.00 0.00 0.00 30 0.00 SharedPool30 c7d10ebd-26b5-3103-b4b3-80179c89dfd1 SharedProcessorPool 2015-07-15T00:40:38.897Z IBM Power Systems Management Console -1631881089 c7d10ebd-26b5-3103-b4b3-80179c89dfd1 1436920838758 0.00 0.00 0.00 31 0.00 SharedPool31 09edbcc1-1172-30ad-905a-bcead8b8f487 SharedProcessorPool 2015-07-15T00:40:38.898Z IBM Power Systems Management Console -1630005559 09edbcc1-1172-30ad-905a-bcead8b8f487 1436920838758 0.00 0.00 0.00 32 0.00 SharedPool32 8fe4a5b4-cc3d-34ab-b56b-18c8b14dd53b SharedProcessorPool 2015-07-15T00:40:38.899Z IBM Power Systems Management Console -1628130029 8fe4a5b4-cc3d-34ab-b56b-18c8b14dd53b 1436920838759 0.00 0.00 0.00 33 0.00 SharedPool33 61d07d84-a940-3db4-b153-49f282387fc5 SharedProcessorPool 2015-07-15T00:40:38.900Z IBM Power Systems Management Console -1626254499 61d07d84-a940-3db4-b153-49f282387fc5 1436920838759 0.00 0.00 0.00 34 0.00 SharedPool34 bd095c6e-7b85-35ca-8748-921db4735687 SharedProcessorPool 2015-07-15T00:40:38.901Z IBM Power Systems Management Console -1624378969 bd095c6e-7b85-35ca-8748-921db4735687 1436920838759 0.00 0.00 0.00 35 0.00 SharedPool35 194b593f-db85-3e0c-8e74-104e7be8bbbe SharedProcessorPool 2015-07-15T00:40:38.902Z IBM Power Systems Management Console -1622503439 194b593f-db85-3e0c-8e74-104e7be8bbbe 1436920838759 0.00 0.00 0.00 36 0.00 SharedPool36 00df34e8-d2f8-34a1-accb-7f9d90eaea5c SharedProcessorPool 2015-07-15T00:40:38.904Z IBM Power Systems Management Console -1620627909 00df34e8-d2f8-34a1-accb-7f9d90eaea5c 1436920838759 0.00 0.00 0.00 37 0.00 SharedPool37 f41de85d-b00d-309c-93a7-1963abb8cdde SharedProcessorPool 2015-07-15T00:40:38.905Z IBM Power Systems Management Console -1618752379 f41de85d-b00d-309c-93a7-1963abb8cdde 1436920838759 0.00 0.00 0.00 38 0.00 SharedPool38 162ecdd1-11ce-3273-bf8c-3079e4f31dd4 SharedProcessorPool 2015-07-15T00:40:38.906Z IBM Power Systems Management Console -1616876849 162ecdd1-11ce-3273-bf8c-3079e4f31dd4 1436920838760 0.00 0.00 0.00 39 0.00 SharedPool39 241bf3e6-9d74-36c0-b292-43b0e12b2940 SharedProcessorPool 2015-07-15T00:40:38.907Z IBM Power Systems Management Console -1614972570 241bf3e6-9d74-36c0-b292-43b0e12b2940 1436920838760 0.00 0.00 0.00 40 0.00 SharedPool40 13190a4e-4ee0-38bb-9591-9146c1cae93f SharedProcessorPool 2015-07-15T00:40:38.908Z IBM Power Systems Management Console -1613097040 13190a4e-4ee0-38bb-9591-9146c1cae93f 1436920838760 0.00 0.00 0.00 41 0.00 SharedPool41 2d1f5f01-bfb3-3a4b-b56d-bcbea6d0b399 SharedProcessorPool 2015-07-15T00:40:38.909Z IBM Power Systems Management Console -1611221510 2d1f5f01-bfb3-3a4b-b56d-bcbea6d0b399 1436920838760 0.00 0.00 0.00 42 0.00 SharedPool42 10bb0f0d-8566-3fcb-b1ea-188b6299524a SharedProcessorPool 2015-07-15T00:40:38.910Z IBM Power Systems Management Console -1609345980 10bb0f0d-8566-3fcb-b1ea-188b6299524a 1436920838760 0.00 0.00 0.00 43 0.00 SharedPool43 d185aedf-efde-321b-b9b2-f6dde508cc1f SharedProcessorPool 2015-07-15T00:40:38.911Z IBM Power Systems Management Console -1607470450 d185aedf-efde-321b-b9b2-f6dde508cc1f 1436920838761 0.00 0.00 0.00 44 0.00 SharedPool44 ebbeadf7-1ed7-3b06-80d1-019c30a749f5 SharedProcessorPool 2015-07-15T00:40:38.913Z IBM Power Systems Management Console -1605594920 ebbeadf7-1ed7-3b06-80d1-019c30a749f5 1436920838761 0.00 0.00 0.00 45 0.00 SharedPool45 f1b5817d-0235-38e6-a2ce-46c4401994cd SharedProcessorPool 2015-07-15T00:40:38.914Z IBM Power Systems Management Console -1603719390 f1b5817d-0235-38e6-a2ce-46c4401994cd 1436920838761 0.00 0.00 0.00 46 0.00 SharedPool46 e1ef0903-2ff6-3f38-a852-13d17eca8879 SharedProcessorPool 2015-07-15T00:40:38.915Z IBM Power Systems Management Console -1601843860 e1ef0903-2ff6-3f38-a852-13d17eca8879 1436920838761 0.00 0.00 0.00 47 0.00 SharedPool47 3e832755-b298-3ad1-bbd0-0c650dd0f0f0 SharedProcessorPool 2015-07-15T00:40:38.916Z IBM Power Systems Management Console -1599968330 3e832755-b298-3ad1-bbd0-0c650dd0f0f0 1436920838761 0.00 0.00 0.00 48 0.00 SharedPool48 dc398021-96f1-32f8-87c5-f440bb8e208d SharedProcessorPool 2015-07-15T00:40:38.917Z IBM Power Systems Management Console -1598092800 dc398021-96f1-32f8-87c5-f440bb8e208d 1436920838762 0.00 0.00 0.00 49 0.00 SharedPool49 6316d644-8625-3eaa-99ee-93c8c8940069 SharedProcessorPool 2015-07-15T00:40:38.918Z IBM Power Systems Management Console -1596188521 6316d644-8625-3eaa-99ee-93c8c8940069 1436920838762 0.00 0.00 0.00 50 0.00 SharedPool50 ed8f75a3-429d-3ca9-aa0e-faf63c12e0a0 SharedProcessorPool 2015-07-15T00:40:38.919Z IBM Power Systems Management Console -1594312991 ed8f75a3-429d-3ca9-aa0e-faf63c12e0a0 1436920838762 0.00 0.00 0.00 51 0.00 SharedPool51 71170618-18b3-38b2-af86-9715c2ce281b SharedProcessorPool 2015-07-15T00:40:38.921Z IBM Power Systems Management Console -1592437461 71170618-18b3-38b2-af86-9715c2ce281b 1436920838762 0.00 0.00 0.00 52 0.00 SharedPool52 00a54c9e-8427-36f3-baf9-6158f28e6b89 SharedProcessorPool 2015-07-15T00:40:38.922Z IBM Power Systems Management Console -1590561931 00a54c9e-8427-36f3-baf9-6158f28e6b89 1436920838763 0.00 0.00 0.00 53 0.00 SharedPool53 700c9f48-9864-3032-8247-55f21276f51d SharedProcessorPool 2015-07-15T00:40:38.923Z IBM Power Systems Management Console -1588686401 700c9f48-9864-3032-8247-55f21276f51d 1436920838763 0.00 0.00 0.00 54 0.00 SharedPool54 3bfa1167-b828-3311-83df-cb5b6c564e57 SharedProcessorPool 2015-07-15T00:40:38.924Z IBM Power Systems Management Console -1586810871 3bfa1167-b828-3311-83df-cb5b6c564e57 1436920838763 0.00 0.00 0.00 55 0.00 SharedPool55 f958f795-6952-3937-a5ba-c4c229cbe2cb SharedProcessorPool 2015-07-15T00:40:38.925Z IBM Power Systems Management Console -1584935341 f958f795-6952-3937-a5ba-c4c229cbe2cb 1436920838763 0.00 0.00 0.00 56 0.00 SharedPool56 d373513d-bc2b-3709-83b9-a64c616d2d98 SharedProcessorPool 2015-07-15T00:40:38.926Z IBM Power Systems Management Console -1583059811 d373513d-bc2b-3709-83b9-a64c616d2d98 1436920838763 0.00 0.00 0.00 57 0.00 SharedPool57 be6e1731-7348-39fe-969b-d800e28a06e2 SharedProcessorPool 2015-07-15T00:40:38.927Z IBM Power Systems Management Console -1581184281 be6e1731-7348-39fe-969b-d800e28a06e2 1436920838764 0.00 0.00 0.00 58 0.00 SharedPool58 92bf61dd-de63-3850-9222-e14724d5156f SharedProcessorPool 2015-07-15T00:40:38.928Z IBM Power Systems Management Console -1579308751 92bf61dd-de63-3850-9222-e14724d5156f 1436920838764 0.00 0.00 0.00 59 0.00 SharedPool59 79ee42eb-aa95-3133-ad24-08ad837c9f81 SharedProcessorPool 2015-07-15T00:40:38.930Z IBM Power Systems Management Console -1577404472 79ee42eb-aa95-3133-ad24-08ad837c9f81 1436920838764 0.00 0.00 0.00 60 0.00 SharedPool60 ddabe687-b847-3f97-82bf-ec434f948ea7 SharedProcessorPool 2015-07-15T00:40:38.931Z IBM Power Systems Management Console -1575528942 ddabe687-b847-3f97-82bf-ec434f948ea7 1436920838764 0.00 0.00 0.00 61 0.00 SharedPool61 545f1f23-0788-3bc7-ac2d-3c99f4ab35b0 SharedProcessorPool 2015-07-15T00:40:38.932Z IBM Power Systems Management Console -1573653412 545f1f23-0788-3bc7-ac2d-3c99f4ab35b0 1436920838764 0.00 0.00 0.00 62 0.00 SharedPool62 f79a5052-33bd-3adc-a6d0-5d7e91daa114 SharedProcessorPool 2015-07-15T00:40:38.933Z IBM Power Systems Management Console -1571777882 f79a5052-33bd-3adc-a6d0-5d7e91daa114 1436920838764 0.00 0.00 0.00 63 0.00 SharedPool63 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/job_response_completed_failed.txt0000664000175000017500000001151713571367171026171 0ustar neoneo00000000000000INFO{ {'comment': 'For power on-off testing', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/20414ABB-D6F0-4B3D-BB46-3822240BC4E9'} END OF SECTION} HEADERS{ {'content-length': '20079', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000gMeCzUqIZcs3oxLu4apVINO:a034238f-7921-42e3-862d-89cae58dc68a; Path=/; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'cache-control': 'no-cache="set-cookie, set-cookie2"', 'date': 'Tue, 30 Jul 2013 14:43:59 GMT', 'content-type': 'application/xml'} END OF SECTION} BODY{ e3bfe81d-4a5d-33a9-992e-3575200ee194 JobResponse 2013-08-14T08:38:45.894-04:00 IBM Power Systems Management Console 67A8308B-2CCA-4407-A1BC-632E108B479A 1376478617707 1376483923381 1376483924729 COMPLETED_WITH_ERROR PowerOff LogicalPartition operation shutdown Started STARTED Completed with error COMPLETE 1.0 returnCode 1 result HSCL05DF The partition is not in a state under which this operation can be performed. Check the state of the partition. END OF SECTION}pypowervm-1.1.24/pypowervm/tests/data/vswitch_feed.txt0000664000175000017500000001127313571367171022612 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh vswitch_feed.txt # #################################################### INFO{ {'comment': None, 'path': 'ManagedSystem/95c2990a-e412-3ac8-92f4-058278b32ea2/VirtualSwitch', 'reason': 'OK', 'status': 200} END OF SECTION} HEADERS{ {'Content-Length': '3708', 'X-Powered-By': 'Servlet/3.1', 'Set-Cookie': 'JSESSIONID=0000GB1f9nIlVDdW57ZhA81QT-M:a17cec82-c18b-4c87-8be7-df4ca789296a; Path=/; Secure; HttpOnly', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Mon, 09 May 2016 22:39:17 GMT', 'X-Transaction-ID': 'XT11312943', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Date': 'Mon, 09 May 2016 22:39:17 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'X-TransactionRecord-Uuid': 'ebee115d-e281-4bfd-a138-86d8a78a645c', 'ETag': '-228271901'} END OF SECTION} BODY{ e6b27521-ef5f-337a-b82f-2a044dfbf07f 2016-05-09T18:39:17.856-04:00 IBM Power Systems Management Console 2affcc5d-b3d7-3e63-abbd-e739070ba1c2 VirtualSwitch 2016-05-09T18:39:17.859-04:00 IBM Power Systems Management Console 1845306236 2affcc5d-b3d7-3e63-abbd-e739070ba1c2 0 0 Veb ETHERNET0 31cd66a8-ddd5-3bf2-95fe-2301d10e0fad VirtualSwitch 2016-05-09T18:39:17.860-04:00 IBM Power Systems Management Console -1598191330 31cd66a8-ddd5-3bf2-95fe-2301d10e0fad 0 1 Vepa MGMTSWITCH END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/managedsystem.txt0000664000175000017500000103252413571367171023004 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh managedsystem.txt # #################################################### INFO{ {'comment': 'Created from query of ManagedSystem', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem'} END OF SECTION} HEADERS{ {'x-powered-by': 'Servlet/3.0', 'transfer-encoding': 'chunked', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Fri, 06 Sep 2013 11:57:23 GMT', 'etag': '449450179', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate', 'date': 'Fri, 06 Sep 2013 11:57:33 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 199a2279-872b-38c8-b0e4-95d7e71e68d7 2013-09-06T07:57:23.335-04:00 IBM Power Systems Management Console caae9209-25e5-35cd-a71a-ed55c03f294d ManagedSystem 2013-09-06T07:57:33.195-04:00 IBM Power Systems Management Console caae9209-25e5-35cd-a71a-ed55c03f294d 1378468647423 System_Management_Services Permanent manual false Reboot 2 System_Management_Services Permanent manual userinit standby fast fast true false true true true true true true true true Inactive false true true true false true true true true true true true true true false true true false true true true false false true true true true true true true true true true true true true true true true true true true true true true true true true true true true true true true true true true 65536 553713681 I/O Processor U5294.001.CEC1234-P01-C011 U5294.001.CEC1234-P01-C011 C011 4660 553779217 I/O Processor U5294.001.CEC1234-P01-C012 U5294.001.CEC1234-P01-C012 C012 4660 553844753 PCI Ultra2 RAID Disk Controller U5294.001.CEC1234-P01-C013 U5294.001.CEC1234-P01-C013 C013 73 553910273 Ethernet controller U5294.001.CEC1234-P01-C014 U5294.001.CEC1234-P01-C014 C014 22136 553713682 Ethernet controller U5294.001.CEC1234-P11-C111 U5294.001.CEC1234-P11-C111 C111 22136 553779218 Ethernet controller U5294.001.CEC1234-P11-C112 U5294.001.CEC1234-P11-C112 C112 22136 19088736 Ethernet controller U5294.001.CEC1234-P13-C113 U5294.001.CEC1234-P13-C113 C113 22136 19088737 Ethernet controller U5294.001.CEC1234-P13-C114 U5294.001.CEC1234-P13-C114 C114 22136 19088738 Ethernet controller U5294.001.CEC1234-P13-C115 U5294.001.CEC1234-P13-C115 C115 22136 553713697 Ethernet controller U5791.001.CEC1202-P02-C021 U5791.001.CEC1202-P02-C021 C021 4660 553779233 Ethernet controller U5791.001.CEC1202-P02-C022 U5791.001.CEC1202-P02-C022 C022 4660 553844769 PCI I/O Processor U5791.001.CEC1202-P02-C023 U5791.001.CEC1202-P02-C023 C023 128 553910305 PCI I/O Processor U5791.001.CEC1202-P02-C024 U5791.001.CEC1202-P02-C024 C024 128 553975841 Ethernet controller U5791.001.CEC1202-P02-C025 U5791.001.CEC1202-P02-C025 C025 22136 554041377 PCI I/O Processor U5791.001.CEC1202-P02-C026 U5791.001.CEC1202-P02-C026 C026 128 554106913 PCI I/O Processor U5791.001.CEC1202-P02-C027 U5791.001.CEC1202-P02-C027 C027 128 553713698 Ethernet controller U5791.001.CEC1202-P18-C121 U5791.001.CEC1202-P18-C121 C121 4660 553779234 Ethernet controller U5791.001.CEC1202-P18-C122 U5791.001.CEC1202-P18-C122 C122 4660 570490914 Fibre Channel Serial Bus U78AF.001.WZS01Z5-P1-C35-L1 U78AF.001.WZS01Z5-P1-C35-L1 C35-L1 9522 570556450 Fibre Channel Serial Bus U78AF.001.WZS01Z5-P1-C37-L1 U78AF.001.WZS01Z5-P1-C37-L1 C37-L1 9522 U5294.001.CEC1234-P01 536870929 U1 Bus 1 17 U5294.001.CEC1234-P11 536870930 U1 Bus 17 18 U5294.001.CEC1234-P13 536870931 U1 Bus 13 19 U5791.001.CEC1202-P02 536870945 U2 Bus 2 33 U5791.001.CEC1202-P18 536936482 U2 Bus 18 34 U78AF.001.WZS01Z5-P1 536936483 U2 Bus 19 35 false I/O Processor 0 U5294.001.CEC1234 553713681 4660 512 4660 1202 4116 0 4116 4116 false false false true false false false false false false 553713681 I/O Processor U5294.001.CEC1234-P01-C011 U5294.001.CEC1234-P01-C011 C011 4660 553713681 U5294.001.CEC1234-P01-C011 C011 true I/O Processor U5294.001.CEC1234 553779217 4660 2 02899D96-9D20-490F-8B1B-4D3DEE1210ED vios1 Virtual IO Server 512 4660 4116 0 4116 false false false true false false false false false false 553779217 I/O Processor U5294.001.CEC1234-P01-C012 U5294.001.CEC1234-P01-C012 C012 4660 553779217 U5294.001.CEC1234-P01-C012 C012 false PCI Ultra2 RAID Disk Controller U5294.001.CEC1234 73 256 73 4116 3 4116 false false false false false false false false false false 553844753 PCI Ultra2 RAID Disk Controller U5294.001.CEC1234-P01-C013 U5294.001.CEC1234-P01-C013 C013 73 553844753 U5294.001.CEC1234-P01-C013 C013 001 3333333 2748 false Ethernet controller U5294.001.CEC1234 22136 512 22136 4116 1 4116 false false false false false false false false false false 553910273 Ethernet controller U5294.001.CEC1234-P01-C014 U5294.001.CEC1234-P01-C014 C014 22136 553910273 U5294.001.CEC1234-P01-C014 C014 444 4444444 4444 false Ethernet controller U5294.001.CEC1234 22136 512 22136 4116 1 4116 false false false false false false false false false false 553713682 Ethernet controller U5294.001.CEC1234-P11-C111 U5294.001.CEC1234-P11-C111 C111 22136 553713682 U5294.001.CEC1234-P11-C111 C111 111 11111111 1111 false Ethernet controller U5294.001.CEC1234 22136 512 22136 4116 1 4116 false false false false false false false false false false 553779218 Ethernet controller U5294.001.CEC1234-P11-C112 U5294.001.CEC1234-P11-C112 C112 22136 553779218 U5294.001.CEC1234-P11-C112 C112 222 22222222 2222 false Ethernet controller U5294.001.CEC1234 22136 512 22136 4116 1 4116 false false false false false false false false false false 19088736 Ethernet controller U5294.001.CEC1234-P13-C113 U5294.001.CEC1234-P13-C113 C113 22136 19088736 U5294.001.CEC1234-P13-C113 C113 333 33333333 3333 false Ethernet controller U5294.001.CEC1234 22136 512 22136 4116 1 4116 false false false false false false false false false false 19088737 Ethernet controller U5294.001.CEC1234-P13-C114 U5294.001.CEC1234-P13-C114 C114 22136 19088737 U5294.001.CEC1234-P13-C114 C114 444 44444444 4444 false Ethernet controller U5294.001.CEC1234 22136 512 22136 4116 1 4116 false false false false false false false false false false 19088738 Ethernet controller U5294.001.CEC1234-P13-C115 U5294.001.CEC1234-P13-C115 C115 22136 19088738 U5294.001.CEC1234-P13-C115 C115 444 44444444 4444 false Ethernet controller U5791.001.CEC1202 4660 512 4660 4116 0 4116 false false false false false false false false false false 553713697 Ethernet controller U5791.001.CEC1202-P02-C021 U5791.001.CEC1202-P02-C021 C021 4660 553713697 U5791.001.CEC1202-P02-C021 C021 false Ethernet controller U5791.001.CEC1202 4660 512 4660 4116 0 4116 false false false false false false false false false false 553779233 Ethernet controller U5791.001.CEC1202-P02-C022 U5791.001.CEC1202-P02-C022 C022 4660 553779233 U5791.001.CEC1202-P02-C022 C022 false PCI I/O Processor U5791.001.CEC1202 128 1540 128 4114 2 4114 false false false false false false false false false false 553844769 PCI I/O Processor U5791.001.CEC1202-P02-C023 U5791.001.CEC1202-P02-C023 C023 128 553844769 U5791.001.CEC1202-P02-C023 C023 001 22222222 2843 false PCI I/O Processor U5791.001.CEC1202 128 1540 128 4114 2 4114 false false false false false false false false false false 553910305 PCI I/O Processor U5791.001.CEC1202-P02-C024 U5791.001.CEC1202-P02-C024 C024 128 553910305 U5791.001.CEC1202-P02-C024 C024 001 22222222 2843 false Ethernet controller U5791.001.CEC1202 22136 512 22136 4116 1 4116 false false false false false false false false false false 553975841 Ethernet controller U5791.001.CEC1202-P02-C025 U5791.001.CEC1202-P02-C025 C025 22136 553975841 U5791.001.CEC1202-P02-C025 C025 555 55555555 5555 false PCI I/O Processor U5791.001.CEC1202 128 1540 128 4114 2 4114 false false false false false false false false false false 554041377 PCI I/O Processor U5791.001.CEC1202-P02-C026 U5791.001.CEC1202-P02-C026 C026 128 554041377 U5791.001.CEC1202-P02-C026 C026 001 22222222 2843 false PCI I/O Processor U5791.001.CEC1202 128 1540 128 4114 2 4114 false false false false false false false false false false 554106913 PCI I/O Processor U5791.001.CEC1202-P02-C027 U5791.001.CEC1202-P02-C027 C027 128 554106913 U5791.001.CEC1202-P02-C027 C027 001 22222222 2843 false Ethernet controller U5791.001.CEC1202 4660 512 4660 4116 0 4116 false false false false false false false false false false 553713698 Ethernet controller U5791.001.CEC1202-P18-C121 U5791.001.CEC1202-P18-C121 C121 4660 553713698 U5791.001.CEC1202-P18-C121 C121 false Ethernet controller U5791.001.CEC1202 4660 512 4660 4116 0 4116 false false false false false false false false false false 553779234 Ethernet controller U5791.001.CEC1202-P18-C122 U5791.001.CEC1202-P18-C122 C122 4660 553779234 U5791.001.CEC1202-P18-C122 C122 false Fibre Channel Serial Bus U5791.001.CEC1202 9522 3076 9522 4215 2 4215 false false false false false false false false false false 570490914 Fibre Channel Serial Bus U78AF.001.WZS01Z5-P1-C35-L1 U78AF.001.WZS01Z5-P1-C35-L1 C35-L1 9522 570490914 U78AF.001.WZS01Z5-P1-C35-L1 C35-L1 false Fibre Channel Serial Bus U5791.001.CEC1202 9522 3076 9522 4215 2 4215 false false false false false false false false false false 570556450 Fibre Channel Serial Bus U78AF.001.WZS01Z5-P1-C37-L1 U78AF.001.WZS01Z5-P1-C37-L1 C37-L1 9522 570556450 U78AF.001.WZS01Z5-P1-C37-L1 C37-L1 FA627A 12379814471884843981 5 6 7 8 9 1:2,1:4,1:8,1:16,1:32,1:64,1:128,1:256,1:512,1:1024 64 8 8 0 5242880 2883584 No Affinity 0 0 5242752 64 system firmware only 0 0 7 4 7 0 16 5767168 327 1 0 Not_Mirrored Mirrored 64 0 2883584 0 0 No Affinity 0 5242752 64 none 64 0 false 8 500 500 32 32 512 20 30 10 20 2 500 0 24 0.05 500 64 default POWER5 POWER6 POWER6_Enhanced POWER6_Plus_Enhanced POWER7 false Turbocore_Capable 32 1 1 None 8203 E4A ACE0001 false 254 1 4 0 30 false 127.0.0.1 false Not Ready 00070000 operating HV4 1378468644488 false 5 0 0 0 9 true true true false 6689 true e955e11a-6ee0-3e04-987c-6fa319f8cb8c ManagedSystem 2013-09-06T07:57:33.154-04:00 IBM Power Systems Management Console e955e11a-6ee0-3e04-987c-6fa319f8cb8c 1378467297288 Normal Temporary normal Reboot 1 Normal Temporary normal autostart autostart fast none true true true true false false true false false true true false false true true true true true false false true true true true false true true true true true true false true true true true true true false true true true true true false false true false true true true true true true true true true true 65536 553713674 PCI-E SAS Controller U78AB.001.WZSJBM3-P1-T9 U78AB.001.WZSJBM3-P1-T9 T9 825 553779211 Universal Serial Bus UHC Spec U78AB.001.WZSJBM3-P1-T5 U78AB.001.WZSJBM3-P1-T5 T5 53 553844748 Empty slot U78AB.001.WZSJBM3-P1-C18 U78AB.001.WZSJBM3-P1-C18 C18 65535 553910285 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78AB.001.WZSJBM3-P1-C7 U78AB.001.WZSJBM3-P1-C7 C7 5719 553714177 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSJBM3-P1-C2 U78AB.001.WZSJBM3-P1-C2 C2 61696 553714178 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSJBM3-P1-C3 U78AB.001.WZSJBM3-P1-C3 C3 61696 553714179 10 Gigabit Ethernet-SFP+ SR PCI-E adapter U78AB.001.WZSJBM3-P1-C4 U78AB.001.WZSJBM3-P1-C4 C4 1808 553714180 Empty slot U78AB.001.WZSJBM3-P1-C5 U78AB.001.WZSJBM3-P1-C5 C5 65535 553714181 Empty slot U78AB.001.WZSJBM3-P1-C6 U78AB.001.WZSJBM3-P1-C6 C6 65535 U78AB.001.WZSJBM3-P1 536870922 PHB 10 10 U78AB.001.WZSJBM3-P1 536870923 PHB 11 11 U78AB.001.WZSJBM3-P1 536870924 PHB 12 12 U78AB.001.WZSJBM3-P1 536870925 PHB 13 13 U78AB.001.WZSJBM3-P1 536871424 PHB 512 512 U78AB.001.WZSJBM3-P1 536871425 PHB 513 513 U78AB.001.WZSJBM3-P1 536871426 PHB 514 514 U78AB.001.WZSJBM3-P1 536871427 PHB 515 515 U78AB.001.WZSJBM3-P1 536871428 PHB 516 516 U78AB.001.WZSJBM3-P1 536871429 PHB 517 517 false PCI-E SAS Controller 2053 2054 2055 5901 5909 5911 2053 2054 2055 5901 5909 5911 2053 2054 2055 5901 5909 5911 U78AB.001.WZSJBM3 825 260 825 4116 1 4116 false false false false false false false false false false 553713674 PCI-E SAS Controller U78AB.001.WZSJBM3-P1-T9 U78AB.001.WZSJBM3-P1-T9 T9 825 553713674 U78AB.001.WZSJBM3-P1-T9 T9 false Universal Serial Bus UHC Spec U78AB.001.WZSJBM3 53 4099 53 4147 170 4147 false false false false false false false false false false 553779211 Universal Serial Bus UHC Spec U78AB.001.WZSJBM3-P1-T5 U78AB.001.WZSJBM3-P1-T5 T5 53 553779211 U78AB.001.WZSJBM3-P1-T5 T5 false Empty slot 0 0 0 U78AB.001.WZSJBM3 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553844748 Empty slot U78AB.001.WZSJBM3-P1-C18 U78AB.001.WZSJBM3-P1-C18 C18 65535 553844748 U78AB.001.WZSJBM3-P1-C18 C18 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78AB.001.WZSJBM3 5719 512 5719 5348 1 5348 false false false false false false false false false false 553910285 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78AB.001.WZSJBM3-P1-C7 U78AB.001.WZSJBM3-P1-C7 C7 5719 553910285 U78AB.001.WZSJBM3-P1-C7 C7 false 8 Gigabit PCI Express Dual Port Fibre Channel Adapter 5735 5735 5735 U78AB.001.WZSJBM3 61696 3076 61696 4319 3 4319 false false false false false false false false false false 553714177 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSJBM3-P1-C2 U78AB.001.WZSJBM3-P1-C2 C2 61696 553714177 U78AB.001.WZSJBM3-P1-C2 C2 false 8 Gigabit PCI Express Dual Port Fibre Channel Adapter 5735 5735 5735 U78AB.001.WZSJBM3 61696 3076 61696 4319 3 4319 false false false false false false false false false false 553714178 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSJBM3-P1-C3 U78AB.001.WZSJBM3-P1-C3 C3 61696 553714178 U78AB.001.WZSJBM3-P1-C3 C3 false 10 Gigabit Ethernet-SFP+ SR PCI-E adapter 5287 5284 5287 5284 U78AB.001.WZSJBM3 1808 512 1808 6562 2 6562 false false false false false false false false false false 553714179 10 Gigabit Ethernet-SFP+ SR PCI-E adapter U78AB.001.WZSJBM3-P1-C4 U78AB.001.WZSJBM3-P1-C4 C4 1808 553714179 U78AB.001.WZSJBM3-P1-C4 C4 false Empty slot 0 0 0 U78AB.001.WZSJBM3 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553714180 Empty slot U78AB.001.WZSJBM3-P1-C5 U78AB.001.WZSJBM3-P1-C5 C5 65535 553714180 U78AB.001.WZSJBM3-P1-C5 C5 false Empty slot 0 0 0 U78AB.001.WZSJBM3 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553714181 Empty slot U78AB.001.WZSJBM3-P1-C6 U78AB.001.WZSJBM3-P1-C6 C6 65535 553714181 U78AB.001.WZSJBM3-P1-C6 C6 BEEAB2 13857705823079956480 5 6 7 8 9 1:256,1:512,1:1024,1:2048,1:4096,1:8192 256 256 4096 0 98304 0 0 0 88832 256 none 0 0 6 6 0 16 98304 6 1 0 Not_Mirrored Not_Mirrored 256 1280 0 0 0 0 88832 256 none 256 0 false 256 8 7.6 64 64 256 64 64 64 64 0.00 8 0 256 0.1 7.6 64 default POWER6 POWER6_Plus POWER7 false 32 2 1 16 None 8246 L1C 0604CAA false 80 1 80 65535 80 16 true 9.1.2.5 false Not Ready 00070000 operating Server-8246-L1C-SN0604CAA 1378504498157 true 4 16 0 0 16 true true false a168a3ec-bb3e-3ead-86c1-7d98b9d50239 ManagedSystem 2013-09-06T07:57:33.227-04:00 IBM Power Systems Management Console a168a3ec-bb3e-3ead-86c1-7d98b9d50239 1378467301640 Normal Temporary normal Reboot 1 Normal Temporary normal autostart autostart fast none true true true true false false true false false true true false false true true true true true false false true true true true false true true true true true true false true true true true true true false true true true true true false false true false true true true true true true true true true true 65518 553713674 PCI-E SAS Controller U78AB.001.WZSJA7T-P1-T9 U78AB.001.WZSJA7T-P1-T9 T9 825 553779211 Universal Serial Bus UHC Spec U78AB.001.WZSJA7T-P1-T5 U78AB.001.WZSJA7T-P1-T5 T5 53 553844748 Empty slot U78AB.001.WZSJA7T-P1-C18 U78AB.001.WZSJA7T-P1-C18 C18 65535 553910285 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78AB.001.WZSJA7T-P1-C7 U78AB.001.WZSJA7T-P1-C7 C7 5719 553714177 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSJA7T-P1-C2 U78AB.001.WZSJA7T-P1-C2 C2 61696 553714178 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSJA7T-P1-C3 U78AB.001.WZSJA7T-P1-C3 C3 61696 553714179 10 Gigabit Ethernet-SFP+ SR PCI-E adapter U78AB.001.WZSJA7T-P1-C4 U78AB.001.WZSJA7T-P1-C4 C4 1808 553714180 Empty slot U78AB.001.WZSJA7T-P1-C5 U78AB.001.WZSJA7T-P1-C5 C5 65535 553714181 Empty slot U78AB.001.WZSJA7T-P1-C6 U78AB.001.WZSJA7T-P1-C6 C6 65535 U78AB.001.WZSJA7T-P1 536870922 PHB 10 10 U78AB.001.WZSJA7T-P1 536870923 PHB 11 11 U78AB.001.WZSJA7T-P1 536870924 PHB 12 12 U78AB.001.WZSJA7T-P1 536870925 PHB 13 13 U78AB.001.WZSJA7T-P1 536871424 PHB 512 512 U78AB.001.WZSJA7T-P1 536871425 PHB 513 513 U78AB.001.WZSJA7T-P1 536871426 PHB 514 514 U78AB.001.WZSJA7T-P1 536871427 PHB 515 515 U78AB.001.WZSJA7T-P1 536871428 PHB 516 516 U78AB.001.WZSJA7T-P1 536871429 PHB 517 517 false PCI-E SAS Controller 2053 2054 2055 5901 5909 5911 2053 2054 2055 5901 5909 5911 2053 2054 2055 5901 5909 5911 U78AB.001.WZSJA7T 825 260 825 4116 1 4116 false false false false false false false false false false 553713674 PCI-E SAS Controller U78AB.001.WZSJA7T-P1-T9 U78AB.001.WZSJA7T-P1-T9 T9 825 553713674 U78AB.001.WZSJA7T-P1-T9 T9 false Universal Serial Bus UHC Spec U78AB.001.WZSJA7T 53 4099 53 4147 170 4147 false false false false false false false false false false 553779211 Universal Serial Bus UHC Spec U78AB.001.WZSJA7T-P1-T5 U78AB.001.WZSJA7T-P1-T5 T5 53 553779211 U78AB.001.WZSJA7T-P1-T5 T5 false Empty slot 0 0 0 U78AB.001.WZSJA7T 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553844748 Empty slot U78AB.001.WZSJA7T-P1-C18 U78AB.001.WZSJA7T-P1-C18 C18 65535 553844748 U78AB.001.WZSJA7T-P1-C18 C18 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78AB.001.WZSJA7T 5719 512 5719 5348 1 5348 false false false false false false false false false false 553910285 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78AB.001.WZSJA7T-P1-C7 U78AB.001.WZSJA7T-P1-C7 C7 5719 553910285 U78AB.001.WZSJA7T-P1-C7 C7 false 8 Gigabit PCI Express Dual Port Fibre Channel Adapter 5735 5735 5735 U78AB.001.WZSJA7T 61696 3076 61696 4319 3 4319 false false false false false false false false false false 553714177 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSJA7T-P1-C2 U78AB.001.WZSJA7T-P1-C2 C2 61696 553714177 U78AB.001.WZSJA7T-P1-C2 C2 false 8 Gigabit PCI Express Dual Port Fibre Channel Adapter 5735 5735 5735 U78AB.001.WZSJA7T 61696 3076 61696 4319 3 4319 false false false false false false false false false false 553714178 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSJA7T-P1-C3 U78AB.001.WZSJA7T-P1-C3 C3 61696 553714178 U78AB.001.WZSJA7T-P1-C3 C3 false 10 Gigabit Ethernet-SFP+ SR PCI-E adapter 5287 5284 5287 5284 U78AB.001.WZSJA7T 1808 512 1808 6562 2 6562 false false false false false false false false false false 553714179 10 Gigabit Ethernet-SFP+ SR PCI-E adapter U78AB.001.WZSJA7T-P1-C4 U78AB.001.WZSJA7T-P1-C4 C4 1808 553714179 U78AB.001.WZSJA7T-P1-C4 C4 false Empty slot 0 0 0 U78AB.001.WZSJA7T 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553714180 Empty slot U78AB.001.WZSJA7T-P1-C5 U78AB.001.WZSJA7T-P1-C5 C5 65535 553714180 U78AB.001.WZSJA7T-P1-C5 C5 false Empty slot 0 0 0 U78AB.001.WZSJA7T 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553714181 Empty slot U78AB.001.WZSJA7T-P1-C6 U78AB.001.WZSJA7T-P1-C6 C6 65535 553714181 U78AB.001.WZSJA7T-P1-C6 C6 8A6984 13857705823079366656 5 6 7 8 9 1:256,1:512,1:1024,1:2048,1:4096,1:8192 256 0 4096 0 98304 0 0 0 76032 256 none 0 0 6 6 0 16 98304 6 1 0 Not_Mirrored Not_Mirrored 256 1536 0 0 0 0 76032 256 none 256 0 false 256 8 1 64 64 256 64 64 64 64 0.00 8 0 256 0.1 1 64 default POWER6 POWER6_Plus POWER7 false 32 2 1 16 None 8246 L1C 0604C6A false 80 1 80 65535 80 16 true 9.1.2.4 false Not Ready 00070000 operating Server-8246-L1C-SN0604C6A 1378504492299 true 4 16 0 0 16 true true false END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_lpar_feed.txt0000664000175000017500000062614013571367171023054 0ustar neoneo00000000000000INFO{ {'comment': 'Created for networking-powervm unit tests', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'LogicalPartition'} END OF SECTION} HEADERS{ {'x-powered-by': 'Servlet/3.0', 'transfer-encoding': 'chunked', 'set-cookie': 'JSESSIONID=00002EyMEecWDIzdx_K0LwQNiUO:aa95eb5b-d145-4cd8-9030-8b370106cfee; Path=/; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Fri, 16 Aug 2013 09:49:34 GMT', 'etag': '959374938', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Fri, 16 Aug 2013 09:49:40 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 2b20e3c9-f0b1-34dc-b321-5ed659c22798 2014-12-01T22:56:38.358Z IBM Power Systems Management Console 4AC82E78-8EFF-4BAA-87C9-6B243D0C1359 LogicalPartition 2014-12-01T22:56:46.781Z IBM Power Systems Management Console -384789939 4AC82E78-8EFF-4BAA-87C9-6B243D0C1359 1416448881635 false 127 0 POWER7 On true false false false false false normal 0604C7AQ Unknown false false false false false 26 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false nls2-a7444660-00000002 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 4AC82E78-8EFF-4BAA-87C9-6B243D0C1359 default 0 0 none 24755014396416 false Linux ppc64 false false false false Not_Migrating Invalid None 0C8BC782-E312-40F7-ACAD-81EB3B9FB06D LogicalPartition 2014-12-01T22:56:46.783Z IBM Power Systems Management Console -626757425 0C8BC782-E312-40F7-ACAD-81EB3B9FB06D 1416448881690 false 127 0 POWER7 On true false false false false false normal 0604C7AP Unknown false false false false false 25 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false phil11-b8e59817-00000031 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 0C8BC782-E312-40F7-ACAD-81EB3B9FB06D default 0 0 none 24755107533824 false Linux ppc64 false false false false Not_Migrating Invalid None 7CFDD55B-E0D7-4B8C-8254-9305E31BB1DC LogicalPartition 2014-12-01T22:56:46.786Z IBM Power Systems Management Console 321859187 7CFDD55B-E0D7-4B8C-8254-9305E31BB1DC 1416448881697 false 127 0 POWER7 On true false false false true false normal 0604C7AM Unknown false false false false false 22 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false vm_2-9093a051-00000015 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 0 error AIX/Linux 7CFDD55B-E0D7-4B8C-8254-9305E31BB1DC default 0 0 none 24755106659072 false B200F005 LP=00022 false false false false Not_Migrating Invalid None 292ACAF5-C96B-447A-8C7E-7503D80AA33E LogicalPartition 2014-12-01T22:56:46.788Z IBM Power Systems Management Console 1348160875 292ACAF5-C96B-447A-8C7E-7503D80AA33E 1416448881702 false 127 0 POWER7 On true false false false false false normal 0604C7AI Unknown false false false false false 18 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false Joe_dbug-6ea996ec-00000017 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 292ACAF5-C96B-447A-8C7E-7503D80AA33E default 0 0 none 24754853956096 false Linux ppc64 false false false false Not_Migrating Invalid None 615C9134-243D-4A11-93EB-C0556664B761 LogicalPartition 2014-12-01T22:56:46.791Z IBM Power Systems Management Console -267132877 615C9134-243D-4A11-93EB-C0556664B761 1416448881707 false 127 0 POWER7 On true false false false false false normal 0604C7AG Unknown false false false false false 16 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false RHEL-3a7154de-00000015 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 615C9134-243D-4A11-93EB-C0556664B761 default 0 0 none 24754852563968 false Linux ppc64 false false false false Not_Migrating Invalid None 0C0A6EBE-7BF4-4707-8780-A140F349E42E LogicalPartition 2014-12-01T22:56:46.794Z IBM Power Systems Management Console -690716895 0C0A6EBE-7BF4-4707-8780-A140F349E42E 1416448881712 false 127 0 POWER7 On true false false false false false normal 0604C7AF Unknown false false false false false 15 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false 49-9e525235-0000008e false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 0C0A6EBE-7BF4-4707-8780-A140F349E42E default 0 0 none 24754913647360 false Linux ppc64 false false false false Not_Migrating Invalid None 0FB69DD7-4B93-4C09-8916-8BC9821ABAAC LogicalPartition 2014-12-01T22:56:46.796Z IBM Power Systems Management Console -512453581 0FB69DD7-4B93-4C09-8916-8BC9821ABAAC 1416448881717 false 127 0 POWER7 On true false false false false false normal 0604C7AE Unknown false false false false false 14 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 256 2048 false Ipv4_RHEL_Clo-dcd3edb4-0000000b false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 0FB69DD7-4B93-4C09-8916-8BC9821ABAAC default 0 0 inactive 24755025661440 false 00000000 false false false false Not_Migrating Invalid None 231B2637-CA04-4322-B752-DEF133D2D1CE LogicalPartition 2014-12-01T22:56:46.799Z IBM Power Systems Management Console 1973963698 231B2637-CA04-4322-B752-DEF133D2D1CE 1416448881724 true 127 0 POWER6_Plus Disabled false false false false false false normal 0604C7A97 Linux/Red Hat 2.6.18-348.el5 5.9 true true true true true 151 100 100 false false 0 0 8192 0.0 6 0 8192 0 8192 0 0.0 6 0 0 8192 8192 0 8192 false false 0 8192 8192 false PowerVC_Import_RHEL59 false 0.2 2 0.2 2 0.2 2 0 capped false capped false 2 0.2 0.2 0.2 0 2 2 0.2 running AIX/Linux 231B2637-CA04-4322-B752-DEF133D2D1CE default 0 0 active 9.1.2.4 24755061688576 false Linux ppc64 false false false false Not_Migrating Invalid None 3547B708-FF25-43C3-B308-F9A161FCC7E0 LogicalPartition 2014-12-01T22:56:46.801Z IBM Power Systems Management Console -825277951 3547B708-FF25-43C3-B308-F9A161FCC7E0 1416448881729 true 127 0 POWER7 Disabled false false false false false false normal 0604C7A96 Unknown false false false false false 150 40 40 false false 0 0 8192 0.0 6 0 8192 0 8192 0 0.0 6 0 0 8192 8192 0 8192 false false 0 8192 8192 false PowerVC_Import_RHEL63 false 0.4 2 0.4 2 0.4 2 0 capped false capped false 2 0.4 0.4 0.4 0 2 2 0.4 running AIX/Linux 3547B708-FF25-43C3-B308-F9A161FCC7E0 default 0 0 none 24754873859584 false AA060011 false false false false Not_Migrating Invalid None 55562855-B8FD-4526-8C4A-D0A7FBA0B983 LogicalPartition 2014-12-01T22:56:46.804Z IBM Power Systems Management Console 556170948 55562855-B8FD-4526-8C4A-D0A7FBA0B983 1416448881734 true 127 0 POWER7 Disabled false false false false false false normal 0604C7A3 Linux/SuSE 2.6.27.19-5-ppc64 11 true true true true true 3 100 100 false false 0 0 8192 0.0 6 0 8192 0 8192 0 0.0 6 0 0 8192 8192 0 8192 false false 0 8192 8192 false PowerVC_Import_SLES11 false 0.2 2 0.2 2 0.2 2 0 capped false capped false 2 0.2 0.2 0.2 0 2 2 0.2 running AIX/Linux 55562855-B8FD-4526-8C4A-D0A7FBA0B983 default 0 0 active 9.1.2.5 24754987783680 false SuSE Linux false false false false Not_Migrating Invalid None 39467CA1-7382-4495-83EA-F7B0B17CACF9 LogicalPartition 2014-12-01T22:56:46.807Z IBM Power Systems Management Console 1386014245 39467CA1-7382-4495-83EA-F7B0B17CACF9 1416448881740 false 127 0 POWER7 On false true false false false false false normal 0604C7AU Unknown false false false false false 30 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false nls2-52baafb3-00000002 false 0.1 1 0.5 1 0.1 1 0 128 uncapped false uncapped false 1 0.5 0.1 0.1 0 128 1 1 0.1 128 running AIX/Linux 39467CA1-7382-4495-83EA-F7B0B17CACF9 default 0 0 inactive 24754852103680 false Linux ppc64 false false false false Not_Migrating Invalid None 32AA6AA5-CCE6-4523-860C-0852455036BE LogicalPartition 2014-12-01T22:56:46.810Z IBM Power Systems Management Console -320367139 32AA6AA5-CCE6-4523-860C-0852455036BE 1416448881747 false 127 0 POWER7 On false true false false false false false normal 0604C7A2 Unknown false false false false false 2 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 256 2048 false RHEL_1-0c472430-00000001 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 32AA6AA5-CCE6-4523-860C-0852455036BE default 0 0 inactive 24754949942272 false 00000000 false false false false Not_Migrating Invalid None 263EE77B-AD6E-4920-981A-4B7D245B8571 LogicalPartition 2014-12-01T22:56:46.813Z IBM Power Systems Management Console 2018406272 263EE77B-AD6E-4920-981A-4B7D245B8571 1416448881753 false 127 0 POWER7 On false true false false false false false normal 0604C7A5 Unknown false false false false false 5 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 256 2048 false rhel_65_fvt_2-2ff8b411-00000016 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.00 0 not activated AIX/Linux 263EE77B-AD6E-4920-981A-4B7D245B8571 default 0 0 inactive 24755094916608 false 00000000 false false false false Not_Migrating Invalid None 3CE30EC6-C98A-4A58-A764-09DAC7C324BC LogicalPartition 2014-12-01T22:56:46.815Z IBM Power Systems Management Console 2109022456 3CE30EC6-C98A-4A58-A764-09DAC7C324BC 1416448881759 false 127 0 POWER7 On false true false false false false false normal 0604C7A6 Unknown false false false false false 6 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false rhel_65_fvt_2-2e51e8b4-00000018 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 3CE30EC6-C98A-4A58-A764-09DAC7C324BC default 0 0 inactive 24754893841664 false Linux ppc64 false false false false Not_Migrating Invalid None 25244EF9-7299-497B-A6CE-20E5052B8253 LogicalPartition 2014-12-01T22:56:46.818Z IBM Power Systems Management Console 967528247 25244EF9-7299-497B-A6CE-20E5052B8253 1416530114667 false 127 0 POWER7 On false true false false false false false normal 0604C7A7 Unknown false false false false false 7 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false test1-57909402-00000001 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 25244EF9-7299-497B-A6CE-20E5052B8253 default 0 0 inactive 24755106855168 false Linux ppc64 false false false false Not_Migrating Invalid None 11CFB75E-B320-4564-882C-9F07BF76D0C3 LogicalPartition 2014-12-01T22:56:46.820Z IBM Power Systems Management Console 1594741490 11CFB75E-B320-4564-882C-9F07BF76D0C3 1416980337521 false 127 0 POWER7 On false true false false false false false normal 0604C7A8 Unknown false false false false false 8 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false volume-Image_-d939b135-00000008 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 11CFB75E-B320-4564-882C-9F07BF76D0C3 default 0 0 inactive 24754913874944 false Linux ppc64 false false false false Not_Migrating Invalid None 4BA68943-9A77-4B0B-A936-6A3F9EE9B7C7 LogicalPartition 2014-12-01T22:56:46.823Z IBM Power Systems Management Console 818759935 4BA68943-9A77-4B0B-A936-6A3F9EE9B7C7 1416981582506 false 127 0 POWER7 On false true false false false false false normal 0604C7A9 Linux/Red Hat 2.6.32-358.el6.ppc64 6.4 true true true true true 9 64 64 false false 0 0 2048 0.0 6 0 2048 0 2048 0 0.0 6 0 0 2048 2048 0 2048 false false 0 2048 2048 false dst_demo-a981b540-00000009 false 0.5 1 0.5 1 0.5 1 0 128 uncapped false uncapped false 1 0.5 0.5 0.5 0 128 1 1 0.5 128 running AIX/Linux 4BA68943-9A77-4B0B-A936-6A3F9EE9B7C7 default 0 0 active 9.114.254.251 24754873945088 false Linux ppc64 false false false false Not_Migrating Invalid None END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/vscsibus_feed.txt0000664000175000017500000007054413571367171022772 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh vscsibus_feed1.txt # #################################################### INFO{ {'comment': None, 'path': 'VirtualIOServer/3E3F9BFC-C4EE-439E-B70A-1D369213ED83/VirtualSCSIBus', 'reason': 'OK', 'status': 200} END OF SECTION} HEADERS{ {'Content-Length': '27934', 'X-Powered-By': 'Servlet/3.1', 'X-TransactionRecord-Uuid': '8b7ca9fd-71b6-4168-bdce-37ebb3600f0c', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Wed, 13 Jul 2016 14:51:28 GMT', 'X-Transaction-ID': 'XT10000338', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Set-Cookie': 'JSESSIONID=00003kvClDKUhedByC9kuLLr6Tj:cb9a9e54-8c19-495b-b950-fb8f07c26228; Path=/; Secure; HttpOnly', 'Date': 'Wed, 13 Jul 2016 14:51:29 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'ETag': '-1862965557'} END OF SECTION} BODY{ fd5ac123-81b9-30db-8aaa-01cf6c5ecf61 2016-07-13T10:51:28.681-04:00 IBM Power Systems Management Console 1f25efc1-a42b-3384-85e7-f37158f46615 VirtualSCSIBus 2016-07-13T10:51:29.635-04:00 IBM Power Systems Management Console 96346471 1f25efc1-a42b-3384-85e7-f37158f46615 0 Client U8286.42A.21C1B6V-V5-C2 U8286.42A.21C1B6V-V5-C2 5 2 2 5 Server U8286.42A.21C1B6V-V2-C5 U8286.42A.21C1B6V-V2-C5 2 5 vhost0 cfg_My_OS_Image_V_3dff2ef5_000000.iso 5 2 1eU8286.42A.21C1B6V-V2-C5 MPIO IBM 2076 FC Disk U78C9.001.WZS094N-P1-C12-T1-W500507680B22447B-LA000000000000 none NoReserve Round_Robin 01M0lCTTIxNDU1MjQ2MDA1MDc2NDAwODEwMDk0QjAwMDAwMDAwMDAwNjQ1Mg== false 10240 hdisk10 active 332136005076400810094B00000000000645204214503IBMfcp false dm9sdW1lLU15X09TX0ltYWdlX1YtM2RmZjJlZjUtMDAwMDAwMjctYm9vdC0tMTM1MTJmODEtMmU2ZA== NjAwNTA3NjQwMDgxMDA5NEIwMDAwMDAwMDAwMDY0NTI= 0x8100000000000000 vtscsi0 0864aae4f28be41501 cfg_My_OS_Image_V_3dff2ef5_000000.iso 0ecfg_My_OS_Image_V_3dff2ef5_000000.iso rw 0 0x8200000000000000 vtopt0 19545ebb7b46f0f5f3 b9480e09-fc17-37f3-b1fd-2e9f61409c14 VirtualSCSIBus 2016-07-13T10:51:29.636-04:00 IBM Power Systems Management Console -352516241 b9480e09-fc17-37f3-b1fd-2e9f61409c14 0 Client U8286.42A.21C1B6V-V7-C3 U8286.42A.21C1B6V-V7-C3 7 3 2 7 Server U8286.42A.21C1B6V-V2-C7 U8286.42A.21C1B6V-V2-C7 2 7 vhost1 cfg_RHEL65Image_81cb5414_00000005.iso 7 3 1eU8286.42A.21C1B6V-V2-C7 MPIO IBM 2076 FC Disk U78C9.001.WZS094N-P1-C12-T1-W500507680B22447B-LB000000000000 none NoReserve Round_Robin 01M0lCTTIxNDU1MjQ2MDA1MDc2NDAwODEwMDk0QjAwMDAwMDAwMDAwNjRGOQ== false 10240 hdisk11 active 332136005076400810094B0000000000064F904214503IBMfcp false dm9sdW1lLVJIRUw2NUltYWdlLTgxY2I1NDE0LTAwMDAwMDA1LWJvb3QtMC1jZWI0YmU2ZC0zNzM3 NjAwNTA3NjQwMDgxMDA5NEIwMDAwMDAwMDAwMDY0Rjk= 0x8100000000000000 vtscsi2 08904425b7f1223a2e cfg_RHEL65Image_81cb5414_00000005.iso 0ecfg_RHEL65Image_81cb5414_00000005.iso rw 0 0x8200000000000000 vtopt2 1922ec16abad552a57 983a2155-1d85-37b2-8ed9-72bfed018bda VirtualSCSIBus 2016-07-13T10:51:29.637-04:00 IBM Power Systems Management Console -1378494912 983a2155-1d85-37b2-8ed9-72bfed018bda 0 Client U8286.42A.21C1B6V-V4-C2 U8286.42A.21C1B6V-V4-C2 4 2 2 8 Server U8286.42A.21C1B6V-V2-C8 U8286.42A.21C1B6V-V2-C8 2 8 vhost2 cfg_RHEL65CI_5eb54307_00000023.iso 4 2 1eU8286.42A.21C1B6V-V2-C8 MPIO IBM 2076 FC Disk U78C9.001.WZS094N-P1-C12-T1-W500507680B22447B-L9000000000000 none NoReserve Round_Robin 01M0lCTTIxNDU1MjQ2MDA1MDc2NDAwODEwMDk0QjAwMDAwMDAwMDAwNTc5Rg== false 10240 hdisk8 active 332136005076400810094B00000000000579F04214503IBMfcp false dm9sdW1lLVJIRUw2NUNJLTVlYjU0MzA3LTAwMDAwMDIzLWJvb3QtMC1kM2ZiOWQ0Mi0yYjA0 NjAwNTA3NjQwMDgxMDA5NEIwMDAwMDAwMDAwMDU3OUY= 0x8100000000000000 vtscsi1 08295db3a41c2c9ceb cfg_RHEL65CI_5eb54307_00000023.iso 0ecfg_RHEL65CI_5eb54307_00000023.iso rw 0 0x8200000000000000 vtopt1 19f5590818621572a8 a0d5bae9-fd1a-3a07-9f75-93ff928abbee VirtualSCSIBus 2016-07-13T10:51:29.639-04:00 IBM Power Systems Management Console 460874914 a0d5bae9-fd1a-3a07-9f75-93ff928abbee 0 Client U8286.42A.21C1B6V-V8-C4 U8286.42A.21C1B6V-V8-C4 8 4 2 9 Server U8286.42A.21C1B6V-V2-C9 U8286.42A.21C1B6V-V2-C9 2 9 vhost3 cfg_myvm826_fd7fdfe0_pvm.iso 8 4 1eU8286.42A.21C1B6V-V2-C9 1 None b_myvm826__fd7f 0300f9c1b600004c0000000155cc7642a6.1 0x8100000000000000 vtscsi3 09c57279efec761e0c cfg_myvm826_fd7fdfe0_pvm.iso 0ecfg_myvm826_fd7fdfe0_pvm.iso rw 0 0x8200000000000000 vtopt3 19aa6872e4f55163d8 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_virtual_switch.txt0000664000175000017500000001063213571367171024173 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: k2http.py -refresh sea_mgr_virtual_switch.txt # #################################################### INFO{ {'comment': 'Created by thorst.', 'status': 200, 'pw': 'Passw0rd', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/VirtualSwitch'} END OF SECTION} HEADERS{ {'content-length': '2927', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000NDn35hM1wmiEggkwHYt86o5:375f7c5b-d5fc-4d80-91de-1d4d29170e01; Path=/; Secure; HttpOnly', 'x-hmc-schema-version': 'V1_1_0', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Thu, 18 Sep 2014 07:04:42 GMT', 'x-transaction-id': 'XT10023645', 'etag': '249747277', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Thu, 18 Sep 2014 07:04:42 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 0b81f37e-06b3-39dc-8d10-35734aaddac5 2015-02-17T23:07:17.073Z IBM Power Systems Management Console e1a852cb-2be5-3a51-9147-43761bc3d720 VirtualSwitch 2015-02-17T23:07:17.128Z IBM Power Systems Management Console -1301754041 e1a852cb-2be5-3a51-9147-43761bc3d720 1424132682404 0 Veb ETHERNET0(Default) END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/fake_vios_mappings.txt0000664000175000017500000070104713571367171024011 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh fake_vios_mappings.txt # #################################################### INFO{ {'comment': 'Use for checking the mappings of the VIOS', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'VirtualIOServer/014C1F78-B210-40B2-A12E-C82316B878CA'} END OF SECTION} HEADERS{ {'content-length': '31567', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000qnsHzMw-UAQPrFwtNaS5qcC:945f5b75-c329-4322-89cf-e90b74473dc9; Path=/; Secure; HttpOnly', 'expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'last-modified': 'Wed, 05 Feb 2014 13:53:20 GMT', 'etag': '378412626', 'cache-control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'date': 'Wed, 05 Feb 2014 13:53:19 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ 014C1F78-B210-40B2-A12E-C82316B878CA VirtualIOServer 2015-01-09T02:00:04.658Z IBM Power Systems Management Console 1680529533 014C1F78-B210-40B2-A12E-C82316B878CA 1420693073454 true 191 256 POWER7 On true false false false false false normal 0604C7A1 VIOS 2.2.3.3 6100-09-03-1415 true true true true true 1 200 false Universal Serial Bus UHC Spec U78AB.001.WZSH5ZY 53 4099 53 53 4147 170 4147 4147 false false false false false false false false false false 553779211 Universal Serial Bus UHC Spec U78AB.001.WZSH5ZY-P1-T5 U78AB.001.WZSH5ZY-P1-T5 T5 53 true 553779211 U78AB.001.WZSH5ZY-P1-T5 T5 false Empty slot 0 0 0 U78AB.001.WZSH5ZY 65535 65535 65535 65535 255 65535 false false false false false false false false false false 553844748 Empty slot U78AB.001.WZSH5ZY-P1-C18 U78AB.001.WZSH5ZY-P1-C18 C18 65535 true 553844748 U78AB.001.WZSH5ZY-P1-C18 C18 false 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short 5899 5899 5899 U78AB.001.WZSH5ZY 5719 512 5719 1056 5348 1 5348 4116 false false false false false false false false false false 553910285 1 Gigabit Ethernet (UTP) 4 Port Adapter PCIE-4x/Short U78AB.001.WZSH5ZY-P1-C7 U78AB.001.WZSH5ZY-P1-C7 C7 5719 true 553910285 U78AB.001.WZSH5ZY-P1-C7 C7 false Unknown U78AB.001.WZSH5ZY 0 65535 0 0 255 0 false false false false false false false false false false 553714177 Unknown U78AB.001.WZSH5ZY-P1-C2 U78AB.001.WZSH5ZY-P1-C2 C2 0 true 553714177 U78AB.001.WZSH5ZY-P1-C2 C2 false Quad 10/100/1000 Base-TX PCI-Express Adapter U78AB.001.WZSH5ZY 4284 0 4284 872 32902 14 32902 4116 false false false false false false false false false false 553714178 Quad 10/100/1000 Base-TX PCI-Express Adapter U78AB.001.WZSH5ZY-P1-C3 U78AB.001.WZSH5ZY-P1-C3 C3 4284 true 553714178 U78AB.001.WZSH5ZY-P1-C3 C3 false 8 Gigabit PCI Express Dual Port Fibre Channel Adapter 5735 5735 5735 U78AB.001.WZSH5ZY 61696 3076 61696 906 4319 3 4319 4116 false false false false false false false false false false 553714180 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSH5ZY-P1-C5 U78AB.001.WZSH5ZY-P1-C5 C5 U78AB.001.WZSH5ZY-P1-C5-T1 MPIO IBM 2076 FC Disk U78AB.001.WZSH5ZY-P1-C5-T1-W500507680215B741-L1000000000000 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODE4OTk0QTAwMDAwMDAwMDAwMDhEMg== true 20480 hdisk2 active 332136005076802818994A0000000000008D204214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSH5ZY-P1-C5-T1-W500507680304104A-L0 none SinglePath Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMTE2Mw== false 102400 hdisk0 active 332136005076300838041300000000000116304214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSH5ZY-P1-C5-T1-W500507680215B741-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODE4OTk0QTAwMDAwMDAwMDAwMDAxOQ== false 0 hdisk1 active 332136005076802818994A00000000000001904214503IBMfcp true fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T2 fcs3 1aU78AB.001.WZSH5ZY-P1-C5-T2 10000090FA1B6899 553714180 U78AB.001.WZSH5ZY-P1-C5 C5 false 8 Gigabit PCI Express Dual Port Fibre Channel Adapter 5735 5735 5735 U78AB.001.WZSH5ZY 61696 3076 61696 906 4319 3 4319 4116 false false false false false false false false false false 553714181 8 Gigabit PCI Express Dual Port Fibre Channel Adapter U78AB.001.WZSH5ZY-P1-C6 U78AB.001.WZSH5ZY-P1-C6 C6 U78AB.001.WZSH5ZY-P1-C6-T1 fcs4 1aU78AB.001.WZSH5ZY-P1-C6-T1 10000090FA109924 64 64 U78AB.001.WZSH5ZY-P1-C6-T2 fcs5 1aU78AB.001.WZSH5ZY-P1-C6-T2 10000090FA109925 553714181 U78AB.001.WZSH5ZY-P1-C6 C6 200 false false 256 28672 0.0 6 28672 1024 256 0.0 6 0 0 28672 28672 0 1024 false false 0 28672 1024 false vios_1_Drew false 1.6 16 16 16 0.1 1 0 128 uncapped false uncapped false 16 16 0.1 1.6 0 128 1 16 1.6 128 running Virtual IO Server 014C1F78-B210-40B2-A12E-C82316B878CA default 0 0 active 9.1.2.4 24754952464128 true true true vopt_2c7aa01349714368a3d040bb0d613a67 0evopt_2c7aa01349714368a3d040bb0d613a67 rw 0.000000 vopt_2e51e8b4b9f04b159700e654b2436a01 0evopt_2e51e8b4b9f04b159700e654b2436a01 rw 0.000000 vopt_84d7bfcf44964f398e60254776b94d41 0evopt_84d7bfcf44964f398e60254776b94d41 rw 0.000000 vopt_8ecbc2bd6daa4c8bb6cf5121a8a3540b 0evopt_8ecbc2bd6daa4c8bb6cf5121a8a3540b rw 0.000000 vopt_9f0a80e31f244404b39e713ce5e50337 0evopt_9f0a80e31f244404b39e713ce5e50337 rw 0.000000 vopt_d020202e29ab486c9c7b32e543c154a3 0evopt_d020202e29ab486c9c7b32e543c154a3 rw 0.000000 vopt_de86c46e07004993b412c948bd5047c2 0evopt_de86c46e07004993b412c948bd5047c2 rw 0.000000 vopt_e81565af2b124db8947b8e84295991be 0evopt_e81565af2b124db8947b8e84295991be rw 0.000000 vopt_f970c7fccb0243b98b161f16346fd7bd 0evopt_f970c7fccb0243b98b161f16346fd7bd rw 0.000000 VMLibrary 1 true MPIO IBM 2076 FC Disk U78AB.001.WZSH5ZY-P1-C5-T1-W500507680215B741-L1000000000000 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODE4OTk0QTAwMDAwMDAwMDAwMDhEMg== true 20480 hdisk2 active 332136005076802818994A0000000000008D204214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSH5ZY-P1-C5-T1-W500507680304104A-L0 none SinglePath Failover 01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMTE2Mw== false 102400 hdisk0 active 332136005076300838041300000000000116304214503IBMfcp true MPIO IBM 2076 FC Disk U78AB.001.WZSH5ZY-P1-C5-T1-W500507680215B741-L0 none SinglePath Failover 01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODE4OTk0QTAwMDAwMDAwMDAwMDAxOQ== false 0 hdisk1 active 332136005076802818994A00000000000001904214503IBMfcp true 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent6 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C3-T1 U78AB.001.WZSH5ZY-P1-C3-T1 13U78AB.001.WZSH5ZY-P1-C3-T1 disabled ent14 false 1 disabled 8192 true U8246.L2C.0604C7A-V1-C2 U8246.L2C.0604C7A-V1-C2 false true 2 ALL 1683B625E702 1 false false 0 ent10 1 U8246.L2C.0604C7A-V1-C3 U8246.L2C.0604C7A-V1-C3 false true 3 ALL 1683B625E703 4094 false 29 100 123 1000 2227 2777 2881 3001 3901 true 0 ent11 1 en14 9.1.2.4 255.255.255.0 Active 10b3fb44b976a3dc51 true U8246.L2C.0604C7A-V1-C2 U8246.L2C.0604C7A-V1-C2 false true 2 ALL 1683B625E702 1 false false 0 ent10 1 U8246.L2C.0604C7A-V1-C3 U8246.L2C.0604C7A-V1-C3 false true 3 ALL 1683B625E703 4094 false 29 100 123 1000 2227 2777 2881 3001 3901 true 0 ent11 1 true U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C43 U8246.L2C.0604C7A-V1-C43 1 false true 43 vfchost26 24 3 1dU8246.L2C.0604C7A-V1-C43 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C41 U8246.L2C.0604C7A-V1-C41 1 false true 41 vfchost25 23 3 1dU8246.L2C.0604C7A-V1-C41 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V22-C3 U8246.L2C.0604C7A-V22-C3 22 false true 3 1 39 c05076065a7c02e4 c05076065a7c02e5 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C39 U8246.L2C.0604C7A-V1-C39 1 false true 39 vfchost6 22 3 1dU8246.L2C.0604C7A-V1-C39 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C37 U8246.L2C.0604C7A-V1-C37 1 false true 37 vfchost24 21 3 1dU8246.L2C.0604C7A-V1-C37 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C35 U8246.L2C.0604C7A-V1-C35 1 false true 35 vfchost23 19 3 1dU8246.L2C.0604C7A-V1-C35 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V18-C3 U8246.L2C.0604C7A-V18-C3 18 false true 3 1 33 c05076065a7c02e2 c05076065a7c02e3 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C33 U8246.L2C.0604C7A-V1-C33 1 false true 33 vfchost22 18 3 1dU8246.L2C.0604C7A-V1-C33 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C79 U8246.L2C.0604C7A-V1-C79 1 false true 79 vfchost11 36 3 1dU8246.L2C.0604C7A-V1-C79 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C31 U8246.L2C.0604C7A-V1-C31 1 false true 31 vfchost21 17 3 1dU8246.L2C.0604C7A-V1-C31 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C77 U8246.L2C.0604C7A-V1-C77 1 false true 77 vfchost10 35 3 1dU8246.L2C.0604C7A-V1-C77 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V16-C3 U8246.L2C.0604C7A-V16-C3 16 false true 3 1 29 c05076065a7c02d6 c05076065a7c02d7 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C29 U8246.L2C.0604C7A-V1-C29 1 false true 29 vfchost20 16 3 1dU8246.L2C.0604C7A-V1-C29 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V6-C3 U8246.L2C.0604C7A-V6-C3 6 false true 3 1 27 c05076065a7c030e c05076065a7c030f U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C27 U8246.L2C.0604C7A-V1-C27 1 false true 27 vfchost4 6 3 1dU8246.L2C.0604C7A-V1-C27 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C74 U8246.L2C.0604C7A-V1-C74 1 false true 74 vfchost9 40 3 1dU8246.L2C.0604C7A-V1-C74 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V15-C3 U8246.L2C.0604C7A-V15-C3 15 false true 3 1 26 c05076065a7c02d4 c05076065a7c02d5 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C26 U8246.L2C.0604C7A-V1-C26 1 false true 26 vfchost19 15 3 1dU8246.L2C.0604C7A-V1-C26 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C72 U8246.L2C.0604C7A-V1-C72 1 false true 72 vfchost8 39 3 1dU8246.L2C.0604C7A-V1-C72 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V14-C3 U8246.L2C.0604C7A-V14-C3 14 false false 3 1 24 c05076065a7c02e0 c05076065a7c02e1 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C24 U8246.L2C.0604C7A-V1-C24 1 false true 24 vfchost18 14 3 1dU8246.L2C.0604C7A-V1-C24 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V14-C3 U8246.L2C.0604C7A-V14-C3 24 false false 3 1 24 c05076065a7c02e0 c05076065a7c02e1 Server U8246.L2C.0604C7A-V1-C24 U8246.L2C.0604C7A-V1-C24 1 false true 24 vfchost18 24 3 1dU8246.L2C.0604C7A-V1-C24 fcs2 Client U8246.L2C.0604C7A-V14-C3 U8246.L2C.0604C7A-V14-C3 124 false false 3 1 24 c05076065a7c02e0 c05076065a7c02e1 Server U8246.L2C.0604C7A-V1-C24 U8246.L2C.0604C7A-V1-C24 1 false true 24 vfchost18 124 3 1dU8246.L2C.0604C7A-V1-C24 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C71 U8246.L2C.0604C7A-V1-C71 1 false true 71 vfchost7 33 3 1dU8246.L2C.0604C7A-V1-C71 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C21 U8246.L2C.0604C7A-V1-C21 1 false true 21 vfchost17 13 3 1dU8246.L2C.0604C7A-V1-C21 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C19 U8246.L2C.0604C7A-V1-C19 1 false true 19 vfchost16 12 3 1dU8246.L2C.0604C7A-V1-C19 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C17 U8246.L2C.0604C7A-V1-C17 1 false true 17 vfchost15 11 3 1dU8246.L2C.0604C7A-V1-C17 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C14 U8246.L2C.0604C7A-V1-C14 1 false true 14 vfchost3 10 3 1dU8246.L2C.0604C7A-V1-C14 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V151-C3 U8246.L2C.0604C7A-V151-C3 151 true true 3 1 12 c05076065a7c0002 c05076065a7c0003 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C12 U8246.L2C.0604C7A-V1-C12 1 true true 12 vfchost2 151 3 1dU8246.L2C.0604C7A-V1-C12 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V2-C3 U8246.L2C.0604C7A-V2-C3 2 false false 3 1 57 c05076065a7c030a c05076065a7c030b U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C57 U8246.L2C.0604C7A-V1-C57 1 false true 57 vfchost32 2 3 1dU8246.L2C.0604C7A-V1-C57 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V150-C8 U8246.L2C.0604C7A-V150-C8 150 true true 8 1 9 c05076065a7c0000 c05076065a7c0001 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C9 U8246.L2C.0604C7A-V1-C9 1 true true 9 vfchost1 150 8 1dU8246.L2C.0604C7A-V1-C9 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C7 U8246.L2C.0604C7A-V1-C7 1 false true 7 vfchost0 7 3 1dU8246.L2C.0604C7A-V1-C7 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C53 U8246.L2C.0604C7A-V1-C53 1 false true 53 vfchost30 29 3 1dU8246.L2C.0604C7A-V1-C53 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Client U8246.L2C.0604C7A-V5-C3 U8246.L2C.0604C7A-V5-C3 5 false false 3 1 5 c05076065a7c030c c05076065a7c030d U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C5 U8246.L2C.0604C7A-V1-C5 1 false true 5 vfchost12 5 3 1dU8246.L2C.0604C7A-V1-C5 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C51 U8246.L2C.0604C7A-V1-C51 1 false true 51 vfchost29 28 3 1dU8246.L2C.0604C7A-V1-C51 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C49 U8246.L2C.0604C7A-V1-C49 1 false true 49 vfchost28 27 3 1dU8246.L2C.0604C7A-V1-C49 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C48 U8246.L2C.0604C7A-V1-C48 1 false true 48 vfchost5 20 3 1dU8246.L2C.0604C7A-V1-C48 fcs2 U78AB.001.WZSH5ZY-P1-C5-T1 fcs2 1aU78AB.001.WZSH5ZY-P1-C5-T1 10000090FA1B6898 59 64 Server U8246.L2C.0604C7A-V1-C46 U8246.L2C.0604C7A-V1-C46 1 false true 46 vhost25 27 2 1eU8246.L2C.0604C7A-V1-C46 Server U8246.L2C.0604C7A-V1-C42 U8246.L2C.0604C7A-V1-C42 1 false true 42 vhost23 vopt_84d7bfcf44964f398e60254776b94d41 24 2 1eU8246.L2C.0604C7A-V1-C42 vopt_84d7bfcf44964f398e60254776b94d41 0evopt_84d7bfcf44964f398e60254776b94d41 rw 0.000000 0x8100000000000000 vtopt1 19e1c8c2b323a0afb6 Server U8246.L2C.0604C7A-V1-C40 U8246.L2C.0604C7A-V1-C40 1 false true 40 vhost22 23 2 1eU8246.L2C.0604C7A-V1-C40 Client U8246.L2C.0604C7A-V22-C2 U8246.L2C.0604C7A-V22-C2 22 false true 2 1 38 U8246.L2C.0604C7A-V1-C38 Server U8246.L2C.0604C7A-V1-C38 U8246.L2C.0604C7A-V1-C38 1 false true 38 vhost3 22 2 U8246.L2C.0604C7A-V22-C2 1eU8246.L2C.0604C7A-V1-C38 Server U8246.L2C.0604C7A-V1-C36 U8246.L2C.0604C7A-V1-C36 1 false true 36 vhost21 21 2 1eU8246.L2C.0604C7A-V1-C36 Server U8246.L2C.0604C7A-V1-C34 U8246.L2C.0604C7A-V1-C34 1 false true 34 vhost20 19 2 1eU8246.L2C.0604C7A-V1-C34 Client U8246.L2C.0604C7A-V18-C2 U8246.L2C.0604C7A-V18-C2 18 false true 2 1 32 U8246.L2C.0604C7A-V1-C32 Server U8246.L2C.0604C7A-V1-C32 U8246.L2C.0604C7A-V1-C32 1 false true 32 vhost19 18 2 U8246.L2C.0604C7A-V18-C2 1eU8246.L2C.0604C7A-V1-C32 Server U8246.L2C.0604C7A-V1-C78 U8246.L2C.0604C7A-V1-C78 1 false true 78 vhost8 36 2 1eU8246.L2C.0604C7A-V1-C78 Server U8246.L2C.0604C7A-V1-C30 U8246.L2C.0604C7A-V1-C30 1 false true 30 vhost18 17 2 1eU8246.L2C.0604C7A-V1-C30 Server U8246.L2C.0604C7A-V1-C76 U8246.L2C.0604C7A-V1-C76 1 false true 76 vhost7 35 2 1eU8246.L2C.0604C7A-V1-C76 Client U8246.L2C.0604C7A-V16-C2 U8246.L2C.0604C7A-V16-C2 16 false true 2 1 28 U8246.L2C.0604C7A-V1-C28 Server U8246.L2C.0604C7A-V1-C28 U8246.L2C.0604C7A-V1-C28 1 false true 28 vhost17 16 2 U8246.L2C.0604C7A-V16-C2 1eU8246.L2C.0604C7A-V1-C28 Server U8246.L2C.0604C7A-V1-C73 U8246.L2C.0604C7A-V1-C73 1 false true 73 vhost6 40 2 1eU8246.L2C.0604C7A-V1-C73 Client U8246.L2C.0604C7A-V15-C2 U8246.L2C.0604C7A-V15-C2 15 false true 2 1 25 U8246.L2C.0604C7A-V1-C25 Server U8246.L2C.0604C7A-V1-C25 U8246.L2C.0604C7A-V1-C25 1 false true 25 vhost16 asdfasfdasdf 15 2 U8246.L2C.0604C7A-V15-C2 1eU8246.L2C.0604C7A-V1-C25 0.125 None asdfasfdasdf 0300004c7a00007a00000001466c54110f.16 0x8100000000000000 vtscsi0 09c2a3212da4d24568 Server U8246.L2C.0604C7A-V1-C70 U8246.L2C.0604C7A-V1-C70 1 false true 70 vhost5 33 2 1eU8246.L2C.0604C7A-V1-C70 Client U8246.L2C.0604C7A-V6-C2 U8246.L2C.0604C7A-V6-C2 6 false true 2 1 23 U8246.L2C.0604C7A-V1-C23 Server U8246.L2C.0604C7A-V1-C23 U8246.L2C.0604C7A-V1-C23 1 false true 23 vhost1 vopt_2e51e8b4b9f04b159700e654b2436a01 6 2 U8246.L2C.0604C7A-V6-C2 1eU8246.L2C.0604C7A-V1-C23 vopt_2e51e8b4b9f04b159700e654b2436a01 0evopt_2e51e8b4b9f04b159700e654b2436a01 rw 0.000000 0x8100000000000000 vtopt2 194b906f47ed151ac3 Client U8246.L2C.0604C7A-V14-C2 U8246.L2C.0604C7A-V14-C2 14 false false 2 1 22 U8246.L2C.0604C7A-V1-C22 Server U8246.L2C.0604C7A-V1-C22 U8246.L2C.0604C7A-V1-C22 1 false true 22 vhost15 14 2 U8246.L2C.0604C7A-V14-C2 1eU8246.L2C.0604C7A-V1-C22 Server U8246.L2C.0604C7A-V1-C20 U8246.L2C.0604C7A-V1-C20 1 false true 20 vhost14 13 2 1eU8246.L2C.0604C7A-V1-C20 Server U8246.L2C.0604C7A-V1-C18 U8246.L2C.0604C7A-V1-C18 1 false true 18 vhost13 12 2 1eU8246.L2C.0604C7A-V1-C18 Server U8246.L2C.0604C7A-V1-C16 U8246.L2C.0604C7A-V1-C16 1 false true 16 vhost12 11 2 1eU8246.L2C.0604C7A-V1-C16 Server U8246.L2C.0604C7A-V1-C61 U8246.L2C.0604C7A-V1-C61 1 false true 61 vhost4 39 2 1eU8246.L2C.0604C7A-V1-C61 Client U8246.L2C.0604C7A-V2-C2 U8246.L2C.0604C7A-V2-C2 2 false false 2 1 56 U8246.L2C.0604C7A-V1-C56 Server U8246.L2C.0604C7A-V1-C56 U8246.L2C.0604C7A-V1-C56 1 false true 56 vhost29 2 2 U8246.L2C.0604C7A-V2-C2 1eU8246.L2C.0604C7A-V1-C56 Server U8246.L2C.0604C7A-V1-C52 U8246.L2C.0604C7A-V1-C52 1 false true 52 vhost27 29 2 1eU8246.L2C.0604C7A-V1-C52 Client U8246.L2C.0604C7A-V5-C2 U8246.L2C.0604C7A-V5-C2 5 false false 2 1 4 U8246.L2C.0604C7A-V1-C4 Server U8246.L2C.0604C7A-V1-C4 U8246.L2C.0604C7A-V1-C4 1 false true 4 vhost9 5 2 U8246.L2C.0604C7A-V5-C2 1eU8246.L2C.0604C7A-V1-C4 Server U8246.L2C.0604C7A-V1-C50 U8246.L2C.0604C7A-V1-C50 1 false true 50 vhost26 28 2 1eU8246.L2C.0604C7A-V1-C50 Server U8246.L2C.0604C7A-V1-C47 U8246.L2C.0604C7A-V1-C47 1 false true 47 vhost2 20 2 1eU8246.L2C.0604C7A-V1-C47 Server U8246.L2C.0604C7A-V1-C6 U8246.L2C.0604C7A-V1-C6 1 false true 6 vhost0 vopt_2c7aa01349714368a3d040bb0d613a67 7 2 1eU8246.L2C.0604C7A-V1-C6 vopt_2c7aa01349714368a3d040bb0d613a67 0evopt_2c7aa01349714368a3d040bb0d613a67 rw 0.000000 0x8100000000000000 vtopt0 198aead5099de9c5bd Client U8246.L2C.0604C7A-V5-C2 U8246.L2C.0604C7A-V5-C2 5 false false 2 1 4 U8246.L2C.0604C7A-V1-C4 Server U8246.L2C.0604C7A-V1-C13 U8246.L2C.0604C7A-V1-C13 1 false true 13 vhost11 vopt_de86c46e07004993b412c948bd5047c2 10 2 1eU8246.L2C.0604C7A-V1-C13 vopt_de86c46e07004993b412c948bd5047c2 0evopt_de86c46e07004993b412c948bd5047c2 rw 0.000000 0x8100000000000000 vtopt3 192083fe745a3e7c03 Server U8286.42A.103B264-V2-C3 U8286.42A.103B264-V2-C3 2 3 vhost1 hdisk5 65535 65535 1eU8286.42A.103B264-V2-C3 N/A U78C9.001.WZS0ATG-P1-C7-T1-W500507605E828362-L1000000000000 none NoReserve Load_Balance 01M0lCTUZsYXNoU3lzdGVtLTk4NDA2MDA1MDc2ODA5OEIxMEI4MDgwMDAwMDAwNTAwMDAzMA== false 40960 hdisk5 active 54361IBM FlashSystem-98402e0262c42e02-0000-0005-00003010FlashSystem-984003IBMfcp false VU5LTk9XTg== NjAwNTA3NjgwOThCMTBCODA4MDAwMDAwMDUwMDAwMzA= 0x8100000000000000 vhdisk5 081e43b8cf2481a047 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C7-T1 U78AB.001.WZSH5ZY-P1-C7-T1 13U78AB.001.WZSH5ZY-P1-C7-T1 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C7-T3 U78AB.001.WZSH5ZY-P1-C7-T3 13U78AB.001.WZSH5ZY-P1-C7-T3 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C7-T2 U78AB.001.WZSH5ZY-P1-C7-T2 13U78AB.001.WZSH5ZY-P1-C7-T2 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent8 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C3-T3 U78AB.001.WZSH5ZY-P1-C3-T3 13U78AB.001.WZSH5ZY-P1-C3-T3 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent9 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C3-T4 U78AB.001.WZSH5ZY-P1-C3-T4 13U78AB.001.WZSH5ZY-P1-C3-T4 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C7-T4 U78AB.001.WZSH5ZY-P1-C7-T4 13U78AB.001.WZSH5ZY-P1-C7-T4 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent7 physicalEthernetAdpter U78AB.001.WZSH5ZY-P1-C3-T2 U78AB.001.WZSH5ZY-P1-C3-T2 13U78AB.001.WZSH5ZY-P1-C3-T2 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent0 U78AB.001.WZSH5ZY-P1-C7-T1 en0 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent2 U78AB.001.WZSH5ZY-P1-C7-T3 en2 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent1 U78AB.001.WZSH5ZY-P1-C7-T2 en1 Inactive 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent8 U78AB.001.WZSH5ZY-P1-C3-T3 en8 Inactive 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent9 U78AB.001.WZSH5ZY-P1-C3-T4 en9 Inactive 1 4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004) ent3 U78AB.001.WZSH5ZY-P1-C7-T4 en3 Inactive 1 4-Port 10/100/1000 Base-TX PCI-Express Adapter (14106803) ent7 U78AB.001.WZSH5ZY-P1-C3-T2 en7 Inactive END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/enterprise_pool_feed.txt0000664000175000017500000001133513571367171024333 0ustar neoneo00000000000000#################################################### # THIS IS AN AUTOMATICALLY GENERATED FILE # DO NOT EDIT. ANY EDITS WILL BE LOST ON NEXT UPDATE # # To update file, run: create_httpresp.py -refresh enterprise_pool_feed.txt # #################################################### INFO{ {'comment': None, 'status': 200, 'pw': 'passw0rd', 'reason': 'OK', 'host': 'localhost', 'user': 'neo', 'path': 'PowerEnterprisePool?group=None'} END OF SECTION} HEADERS{ {'Content-Length': '3725', 'X-Powered-By': 'Servlet/3.1', 'Set-Cookie': 'JSESSIONID=0000hpIms97XPzHUl4K1JmfMKnD:44ae1077-95b8-46ac-9bab-6ba92c1af191; Path=/; Secure; HttpOnly', 'X-HMC-Schema-Version': 'V1_3_0', 'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT', 'Last-Modified': 'Wed, 10 Feb 2016 17:54:26 GMT', 'X-Transaction-ID': 'XT10001956', 'Cache-Control': 'no-transform, must-revalidate, proxy-revalidate, no-cache=set-cookie', 'Date': 'Wed, 10 Feb 2016 17:54:26 GMT', 'X-MC-Type': 'PVM', 'Content-Type': 'application/atom+xml', 'X-TransactionRecord-Uuid': 'e25c118f-9669-4a59-84f9-bdfb02fa19f7', 'ETag': '-1118621103'} END OF SECTION} BODY{ 031e997b-1b00-3ca3-884e-9812178e106b 2016-02-10T12:54:26.872-05:00 IBM Power Systems Management Console e0bc50d4-196a-3d8b-87b9-5ca694c2eb66 PowerEnterprisePool 2016-02-10T12:54:26.935-05:00 IBM Power Systems Management Console -1118621134 e0bc50d4-196a-3d8b-87b9-5ca694c2eb66 0 0328 FVT_pool1 40 20 0 16 0 0 0 InCompliance ip9-1-2-3 7042 CR7 10B6EDC true false 9.1.2.3 END OF SECTION} pypowervm-1.1.24/pypowervm/tests/data/vios_pcm_data_sparse.txt0000664000175000017500000000225313571367171024323 0ustar neoneo00000000000000#################################################### # This file was manually generated. # #################################################### INFO{ {'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/pcm/ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/RawMetrics/LongTermMonitor/LTM_8247-22L*2125D4A_vios_1_20150527T081730+0000.json'} END OF SECTION} HEADERS{ {'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'} END OF SECTION} BODY{ { "systemUtil": { "utilInfo": { "version": "1.0.0", "metricType": "Raw", "monitoringType": "LTM", "mtms": "8247-22L*2125D4A" }, "utilSample": { "timeStamp": "2015-05-27T00:22:00+0000", "viosUtil": [ { "id": "1", "name": "IOServer - SN2125D4A", "memory": {}, "network": {}, "storage": {} } ], "status": 0, "errorInfo": [] } } } END OF SECTION} pypowervm-1.1.24/pypowervm/tests/utils/0000775000175000017500000000000013571367172017623 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/utils/test_retry.py0000664000175000017500000003157013571367171022406 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import random import testtools from pypowervm import adapter as adpt from pypowervm import const as c from pypowervm import exceptions as pvm_exc from pypowervm.utils import retry as pvm_retry called_count = 0 class TestRetry(testtools.TestCase): """Unit tests for pypowervm.util.""" def test_retry(self): global called_count # Test normal call with return values @pvm_retry.retry(tries=4) def some_method(x, y): global called_count called_count += 1 return x, y called_count = 0 val = some_method(1, 2) self.assertEqual(val, (1, 2)) self.assertEqual(called_count, 1) # Test with an unexpected exception class OurException(Exception): pass @pvm_retry.retry(tries=4) def except_method(x, y): global called_count called_count += 1 raise OurException() called_count = 0 self.assertRaises(OurException, except_method, 1, 2) self.assertEqual(called_count, 1) # Test retry with an http code @pvm_retry.retry(tries=4, http_codes=(c.HTTPStatus.ETAG_MISMATCH,)) def http_except_method(x, y): global called_count called_count += 1 resp = adpt.Response('reqmethod', 'reqpath', c.HTTPStatus.ETAG_MISMATCH, 'reason', 'headers', None) http_exc = pvm_exc.HttpError(resp) raise http_exc called_count = 0 self.assertRaises(pvm_exc.HttpError, http_except_method, 1, 2) self.assertEqual(called_count, 4) # Test retry with an test func and custom exception def cust_test(e, try_, tries, *args, **kwds): return try_ != 2 @pvm_retry.retry(tries=10, test_func=cust_test, limit_except=OurException()) def func_except_method(x, y): global called_count called_count += 1 resp = adpt.Response('reqmethod', 'reqpath', c.HTTPStatus.ETAG_MISMATCH, 'reason', 'headers', None) http_exc = pvm_exc.HttpError(resp) raise http_exc called_count = 0 # Should get back OurException after just 2 calls self.assertRaises(OurException, func_except_method, 1, 2) self.assertEqual(called_count, 2) # Test custom exceptions to retry @pvm_retry.retry(tries=3, retry_except=OurException) def func_except_method(x, y): global called_count called_count += 1 raise OurException() called_count = 0 # Should get back OurException after just 3 calls with self.assertLogs(pvm_retry.__name__, 'WARNING') as warn_logs: self.assertRaises(OurException, func_except_method, 1, 2) self.assertEqual(2, len(warn_logs.output)) self.assertEqual(called_count, 3) # Test the response checking function def resp_chkr(resp, try_, tries, *args, **kwds): if try_ == 2: raise OurException() # Tell it to retry return True @pvm_retry.retry(tries=10, resp_checker=resp_chkr) def func_resp_chkr(x, y): global called_count called_count += 1 return x, y called_count = 0 # Should get back OurException after just 2 calls self.assertRaises(OurException, func_resp_chkr, 1, 2) self.assertEqual(called_count, 2) def test_retry_example(self): global called_count called_count = 0 def _resp_checker(resp, try_, _tries, *args, **kwds): # If the VIOS is busy, then retry return resp == 'VIOS IS BUSY' @pvm_retry.retry(tries=4, http_codes=pvm_retry.DFT_RETRY_CODES, resp_checker=_resp_checker) def _powervm_update(parm): global called_count called_count += 1 if called_count == 1: # etag mismatch resp = adpt.Response('reqmethod', 'reqpath', c.HTTPStatus.ETAG_MISMATCH, 'reason', 'headers') http_exc = pvm_exc.HttpError(resp) raise http_exc if called_count == 2: # Pretend we got a valid response, but the VIOS is busy return 'VIOS IS BUSY' if called_count == 3: # Pretend we got a good response return parm return None with self.assertLogs(pvm_retry.__name__, 'WARNING') as warn_logs: self.assertEqual(_powervm_update('Req'), 'Req') # only one warning (etag mismatch). The 'VIOS IS BUSY' is # returned as OK by the _resp_checker, but doesn't do its own # logging self.assertEqual(1, len(warn_logs.output)) self.assertEqual(called_count, 3) def test_retry_argmod(self): global called_count called_count = 0 def argmod_func(this_try, max_tries, *args, **kwargs): argl = list(args) if this_try == 1: argl[0] += 1 kwargs['five'] += ' bar' if this_try == 2: kwargs['seven'] = 7 return argl, kwargs @pvm_retry.retry(argmod_func=argmod_func, resp_checker=lambda *a, **kwa: True) def _func(one, two, three='four', five='six', seven=None): global called_count called_count += 1 self.assertEqual(20, two) self.assertEqual('four', three) if called_count == 1: self.assertEqual(10, one) self.assertEqual('foo', five) self.assertIsNone(seven) else: self.assertEqual(11, one) if called_count == 2: self.assertEqual('foo bar', five) elif called_count == 3: self.assertEqual(7, seven) self.assertEqual('foo bar bar', five) _func(10, 20, five='foo') self.assertEqual(3, called_count) def test_retry_refresh_wrapper(self): """Test @retry with the 'refresh_wrapper' argmod_func.""" global called_count called_count = 0 mock_wrapper = mock.Mock() mock_wrapper.refreshes = 0 def _refresh(**kwargs): mock_wrapper.refreshes += 1 self.assertIn('use_etag', kwargs) self.assertFalse(kwargs['use_etag']) return mock_wrapper mock_wrapper.refresh.side_effect = _refresh @pvm_retry.retry(argmod_func=pvm_retry.refresh_wrapper, resp_checker=lambda *a, **k: True) def _func(wrapper, arg1, arg2, kw0=None, kw1=None): global called_count self.assertEqual(called_count, wrapper.refreshes) # Ensure the other args didn't change self.assertEqual('a1', arg1) self.assertEqual('a2', arg2) self.assertEqual('k0', kw0) self.assertEqual('k1', kw1) called_count += 1 _func(mock_wrapper, 'a1', 'a2', kw0='k0', kw1='k1') # Three calls (overall attempts) self.assertEqual(3, called_count) # ...equals two refreshes self.assertEqual(2, mock_wrapper.refreshes) @mock.patch('time.sleep') def test_stepped_delay(self, mock_sleep): # Last set of delays should hit the cap. delays = [0, .5, 2.0, 6.5, 20.0, 30.0, 30.0] for i in range(1, 7): pvm_retry.STEPPED_DELAY(i, 7) mock_sleep.assert_called_once_with(delays[i-1]) mock_sleep.reset_mock() @mock.patch('time.sleep') def test_random_delay(self, mock_sleep): """Test gen_random_delay.""" def _validate_range(start, end): # Sleep was called once. self.assertEqual(1, mock_sleep.call_count) args, kwargs = mock_sleep.call_args # Called with one arg self.assertEqual(1, len(args)) # ...and no kwargs self.assertEqual({}, kwargs) # Extract the arg slept_with = args[0] # It should be at least 'start' self.assertGreaterEqual(slept_with, start) # ...and at most 'end'. self.assertLessEqual(slept_with, end) mock_sleep.reset_mock() # Defaults pvm_retry.gen_random_delay()(1, 1) _validate_range(0, 10) # Use a randomizer to test a randomizer. It's... poetry. min_s = random.random() * random.randint(1, 100) max_s = min_s * random.randint(1, 100) pvm_retry.gen_random_delay(min_s=min_s, max_s=max_s)(1, 1) _validate_range(min_s, max_s) def _validate_stepped_random_range(self, mock_sleep, attempt, start, end, max_attempts=6): pvm_retry.STEPPED_RANDOM_DELAY(attempt, max_attempts) # Sleep was called once. self.assertEqual(1, mock_sleep.call_count) args, kwargs = mock_sleep.call_args # Called with one arg self.assertEqual(1, len(args)) # ...and no kwargs self.assertEqual({}, kwargs) # Extract the arg slept_with = args[0] # It should be at least 'start' self.assertGreaterEqual(slept_with, start) # ...and at most 'end'. self.assertLessEqual(slept_with, end) mock_sleep.reset_mock() @mock.patch('time.sleep') def test_stepped_random_delay(self, mock_sleep): """Test STEPPED_RANDOM_DELAY.""" # These ranges from RANDOM_DELAY_STEPS self._validate_stepped_random_range(mock_sleep, 1, 0, 0) self._validate_stepped_random_range(mock_sleep, 2, 0, 1) self._validate_stepped_random_range(mock_sleep, 3, 0.5, 4) self._validate_stepped_random_range(mock_sleep, 4, 2, 13) self._validate_stepped_random_range(mock_sleep, 5, 6.5, 30) # Use a range beyond the max attempts. Make sure it does not fail. for att in (6, 7, 8, 9, 10): self._validate_stepped_random_range(mock_sleep, att, 0, 60) @mock.patch('time.sleep') def test_stepped_random_delay_large(self, mock_sleep): """Test STEPPED_RANDOM_DELAY with a very large range.""" max_attempts = 60 # These ranges from RANDOM_DELAY_STEPS for i in range(1, 10): self._validate_stepped_random_range(mock_sleep, i, 0, 0, max_attempts=max_attempts) for i in range(11, 20): self._validate_stepped_random_range(mock_sleep, i, 0, 1, max_attempts=max_attempts) for i in range(21, 30): self._validate_stepped_random_range(mock_sleep, i, 0.5, 4, max_attempts=max_attempts) for i in range(31, 40): self._validate_stepped_random_range(mock_sleep, i, 2, 13, max_attempts=max_attempts) for i in range(41, 50): self._validate_stepped_random_range(mock_sleep, i, 6.5, 30, max_attempts=max_attempts) for i in range(51, 60): self._validate_stepped_random_range(mock_sleep, i, 0, 60, max_attempts=max_attempts) @mock.patch('time.sleep') def test_stepped_random_delay_small(self, mock_sleep): """Test STEPPED_RANDOM_DELAY with a very small range. Uses a 'max_attempts' whose range is less than the size of RANDOM_DELAY_STEPS """ # Run this a few times so as to make sure the ranges are fully # adhered to. i = 0 while i < 50: i += 1 self._validate_stepped_random_range(mock_sleep, 1, 0, 0, max_attempts=3) self._validate_stepped_random_range(mock_sleep, 2, .5, 4, max_attempts=3) self._validate_stepped_random_range(mock_sleep, 3, 6.5, 60, max_attempts=3) pypowervm-1.1.24/pypowervm/tests/utils/__init__.py0000664000175000017500000000000013571367171021721 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/utils/test_transaction.py0000664000175000017500000011300513571367171023560 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for pypowervm.utils.transaction.""" import copy import mock import oslo_concurrency.lockutils as lock import oslo_context.context as ctx from taskflow import engines as tf_eng from taskflow import exceptions as tf_ex from taskflow.patterns import unordered_flow as tf_uf from taskflow import task as tf_task import unittest import pypowervm.const as c import pypowervm.exceptions as ex import pypowervm.tests.test_fixtures as fx import pypowervm.tests.test_utils.test_wrapper_abc as twrap from pypowervm.utils import retry import pypowervm.utils.transaction as tx import pypowervm.wrappers.entry_wrapper as ewrap import pypowervm.wrappers.logical_partition as lpar class TestWrapperTask(twrap.TestWrapper): file = 'lpar.txt' wrapper_class_to_test = lpar.LPAR def setUp(self): super(TestWrapperTask, self).setUp() self.useFixture(fx.SleepFx()) self.getter = lpar.LPAR.getter(self.adpt, 'getter_uuid') # Set this up for getter.get() self.adpt.read.return_value = self.dwrap.entry self.tracker = mock.Mock(counter=0) class LparNameAndMem(tx.Subtask): """Subtask modifying an LPAR's name and desired memory.""" def execute(self, lpar_wrapper, new_name, des_mem=None, logger=None): """Modify an LPAR's name and desired memory. :param lpar_wrapper: The LPAR EntryWrapper to update. :param new_name: The new name to give the LPAR. :param des_mem: (Optional) The new desired memory value, an int. :param logger: (Optional) If specified, "log" the class name for test inspection purposes. :return: The (possibly modified) lpar_wrapper. """ update_needed = False if logger: logger.log('LparNameAndMem_%s' % new_name) old_name = lpar_wrapper.name if old_name != new_name: lpar_wrapper.name = new_name update_needed = True if des_mem is not None: orig_mem = lpar_wrapper.mem_config.desired if des_mem != orig_mem: lpar_wrapper.mem_config.desired = des_mem update_needed = True return update_needed @staticmethod def retry_twice(wrapper, tracker, logger): # Force a couple of retries tracker.counter += 1 logger.log('update %d' % tracker.counter) if tracker.counter < 3: raise ex.HttpError(mock.Mock(status=c.HTTPStatus.ETAG_MISMATCH)) return wrapper @mock.patch('oslo_concurrency.lockutils.Semaphores.get') def test_synchronized_called_with_uuid(self, mock_semget): """Ensure the synchronizer is locking with the first arg's .uuid.""" @tx.entry_transaction def blacklist_this(wrapper_or_getter): pass # At this point, the outer decorator has been invoked, but the # synchronizing decorator has not. self.assertEqual(0, mock_semget.call_count) # If we call the decorated method with an EntryWrapper, synchronize # should be invoked with the EntryWrapper's UUID blacklist_this(self.dwrap) self.assertEqual(1, mock_semget.call_count) mock_semget.assert_called_with('089FFB20-5D19-4A8C-BB80-13650627D985') # Calling with an EntryWrapperGetter should synchronize on the getter's # registered UUID. (IRL, this will match the wrapper's UUID. Here we # are making sure the right code path is being taken.) mock_semget.reset_mock() blacklist_this(self.getter) self.assertEqual(1, mock_semget.call_count) mock_semget.assert_called_with('getter_uuid') def test_sequence(self): """Prove the sequence of events on a transaction-decorated method. We expect it to look like: lock get the wrapper if necessary invoke the method while the method raises etag error, refresh the wrapper and re-invoke unlock """ txfx = self.useFixture(fx.WrapperTaskFx(self.dwrap)) @tx.entry_transaction def blacklist_this(wrapper_or_getter): # Always converted by now self.assertIsInstance(wrapper_or_getter, ewrap.EntryWrapper) return self.retry_twice(wrapper_or_getter, self.tracker, txfx) # With an EntryWrapperGetter, get() is invoked self.assertEqual(self.dwrap, blacklist_this(self.getter)) self.assertEqual(['lock', 'get', 'update 1', 'refresh', 'update 2', 'refresh', 'update 3', 'unlock'], txfx.get_log()) # With an EntryWrapper, get() is not invoked self.tracker.counter = 0 txfx.reset_log() self.assertEqual(self.dwrap, blacklist_this(self.dwrap)) self.assertEqual(['lock', 'update 1', 'refresh', 'update 2', 'refresh', 'update 3', 'unlock'], txfx.get_log()) @mock.patch('pypowervm.utils.retry.retry') def test_retry_args(self, mock_retry): """Ensure the correct arguments are passed to @retry.""" @tx.entry_transaction def blacklist_this(wrapper_or_getter): pass blacklist_this(mock.Mock()) # Stepped random delay func was invoked mock_retry.assert_called_once_with( argmod_func=retry.refresh_wrapper, tries=60, delay_func=retry.STEPPED_RANDOM_DELAY) @staticmethod def tx_subtask_invoke(tst, wrapper): """Simulates how Subtasks are invoked by WrapperTask. :param tst: A Subtask :param wrapper: The wrapper with which to invoke execute() :return: The value returned by execute() """ return tst.execute(wrapper, *tst.save_args, **tst.save_kwargs) def test_wrapper_task_subtask(self): """Tests around Subtask.""" # Same name, should result in no changes and no update_needed txst1 = self.LparNameAndMem('z3-9-5-126-127-00000001') self.assertFalse(self.tx_subtask_invoke(txst1, self.dwrap)) self.assertEqual('z3-9-5-126-127-00000001', self.dwrap.name) self.assertEqual(512, self.dwrap.mem_config.desired) # New name should prompt update_needed. Specified-but-same des_mem. txst2 = self.LparNameAndMem('new-name', des_mem=512) self.assertTrue(self.tx_subtask_invoke(txst2, self.dwrap)) self.assertEqual('new-name', self.dwrap.name) self.assertEqual(512, self.dwrap.mem_config.desired) # New name and mem should prompt update_needed txst3 = self.LparNameAndMem('newer-name', des_mem=1024) self.assertTrue(self.tx_subtask_invoke(txst3, self.dwrap)) self.assertEqual('newer-name', self.dwrap.name) self.assertEqual(1024, self.dwrap.mem_config.desired) # Same name and explicit same mem - no update_needed txst4 = self.LparNameAndMem('newer-name', des_mem=1024) self.assertFalse(self.tx_subtask_invoke(txst4, self.dwrap)) self.assertEqual('newer-name', self.dwrap.name) self.assertEqual(1024, self.dwrap.mem_config.desired) def test_wrapper_task_subtask_returns(self): """Test that execute methods' return values are processed properly.""" # Use internal _FunctorSubtask to make this easier. Bonus: testing # _FunctorSubtask at the same time. def returns_second_arg(wrapper, boolable): """Used to test various boolable single returns.""" return boolable # Various valid 'False' boolables - update not needed falseables = (0, '', [], {}, False) for falseable in falseables: txst = tx._FunctorSubtask(returns_second_arg, falseable) self.assertFalse(self.tx_subtask_invoke(txst, self.dwrap)) # Various valid 'True' boolables - update needed trueables = (1, 'string', [0], {'k': 'v'}, True) for trueable in trueables: txst = tx._FunctorSubtask(returns_second_arg, trueable) self.assertTrue(self.tx_subtask_invoke(txst, self.dwrap)) def test_wrapper_task_allow_empty(self): """Test the allow_empty=True condition.""" # No mocks - no REST calls should be run. tx1 = tx.WrapperTask('tx1', self.getter, allow_empty=True) # Does not raise, returns None self.assertIsNone(tx1.execute()) def test_wrapper_task1(self): txfx = self.useFixture(fx.WrapperTaskFx(self.dwrap)) # Must supply a wrapper or getter to instantiate self.assertRaises(ValueError, tx.WrapperTask, 'foo', 'bar') # Create a valid WrapperTask tx1 = tx.WrapperTask('tx1', self.getter) self.assertEqual('tx1', tx1.name) self.assertIn('wrapper_getter_uuid', tx1.provides) self.assertIn('subtask_rets_getter_uuid', tx1.provides) # Nothing has been run yet self.assertEqual([], txfx.get_log()) # Try running with no subtasks self.assertRaises(ex.WrapperTaskNoSubtasks, tx1.execute) # Try adding something that isn't a Subtask self.assertRaises(ValueError, tx1.add_subtask, 'Not a Subtask') # Error paths don't run anything. self.assertEqual([], txfx.get_log()) # Add a subtask that doesn't change anything tx1.add_subtask(self.LparNameAndMem('z3-9-5-126-127-00000001', logger=txfx)) # Adding a subtask does not run anything self.assertEqual([], txfx.get_log()) # Get the wrapper - this should invoke GET, but *not* under lock self.assertEqual(self.dwrap, tx1.wrapper) self.assertEqual(['get'], txfx.get_log()) # Run the transaction lwrap, subtask_rets = tx1.execute() # The name should be unchanged self.assertEqual('z3-9-5-126-127-00000001', lwrap.name) # And update should not have been called, which should be reflected in # the log. Note that 'get' is NOT called a second time. self.assertEqual(['get', 'lock', 'LparNameAndMem_z3-9-5-126-127-00000001', 'unlock'], txfx.get_log()) self.assertEqual({}, subtask_rets) txfx.reset_log() # These subtasks do change the name. tx1.add_subtask(self.LparNameAndMem('new_name', logger=txfx)) tx1.add_subtask(self.LparNameAndMem('newer_name', logger=txfx)) # But this one doesn't. We're making sure the last 'no update needed' # doesn't make the overall update_needed status False. tx1.add_subtask(self.LparNameAndMem('newer_name', logger=txfx)) # Get the wrapper - this should *not* reinvoke GET self.assertEqual(self.dwrap, tx1.wrapper) self.assertEqual([], txfx.get_log()) # Now execute the transaction lwrap, subtask_rets = tx1.execute() # The last change should be the one that stuck self.assertEqual('newer_name', lwrap.name) # Check the overall order. Update was called. self.assertEqual([ 'lock', 'LparNameAndMem_z3-9-5-126-127-00000001', 'LparNameAndMem_new_name', 'LparNameAndMem_newer_name', 'LparNameAndMem_newer_name', 'update', 'unlock'], txfx.get_log()) self.assertEqual({}, subtask_rets) # Test 'cloning' the subtask list txfx.reset_log() tx2 = tx.WrapperTask('tx2', self.getter, subtasks=tx1.subtasks) # Add another one to make sure it goes at the end tx2.add_subtask(self.LparNameAndMem('newest_name', logger=txfx)) # Add one to the original transaction to make sure it doesn't affect # this one. tx1.add_subtask(self.LparNameAndMem('bogus_name', logger=txfx)) lwrap, subtask_rets = tx2.execute() # The last change should be the one that stuck self.assertEqual('newest_name', lwrap.name) # Check the overall order. This one GETs under lock. Update called. self.assertEqual([ 'lock', 'get', 'LparNameAndMem_z3-9-5-126-127-00000001', 'LparNameAndMem_new_name', 'LparNameAndMem_newer_name', 'LparNameAndMem_newer_name', 'LparNameAndMem_newest_name', 'update', 'unlock'], txfx.get_log()) self.assertEqual({}, subtask_rets) def test_logspec(self): txfx = self.useFixture(fx.WrapperTaskFx(self.dwrap)) tx1 = tx.WrapperTask('tx1', self.getter) mock_log = mock.Mock() mock_log.side_effect = lambda *args: txfx.log('log') def functor(wrp): txfx.log('functor') # "False" logspec ignored tx1.add_functor_subtask(functor, logspec=[]) # logspec must have at least two args self.assertRaises(ValueError, tx1.add_functor_subtask, functor, logspec=[1]) # First arg must be callable self.assertRaises(ValueError, tx1.add_functor_subtask, functor, logspec=[1, 2]) # Valid call with just a string tx1.add_functor_subtask(functor, logspec=[mock_log, "string"]) # Valid call with a format string and args tx1.add_functor_subtask(functor, logspec=[ mock_log, "one %s two %s", 1, 2]) # Valid call with named args tx1.add_functor_subtask(functor, logspec=[ mock_log, "three %(three)s four %(four)s", {'three': 3, 'four': 4}]) tx1.execute() self.assertEqual([ 'lock', 'get', 'functor', 'log', 'functor', 'log', 'functor', 'log', 'functor', 'unlock'], txfx.get_log()) mock_log.assert_has_calls([ mock.call("string"), mock.call("one %s two %s", 1, 2), mock.call("three %(three)s four %(four)s", {'three': 3, 'four': 4}) ]) def test_flag_update(self): """flag_update=False avoids update even if Subtask returns True.""" txfx = self.useFixture(fx.WrapperTaskFx(self.dwrap)) tx1 = tx.WrapperTask('tx1', self.getter) tx1.add_functor_subtask(lambda x: True, flag_update=False) tx1.execute() self.assertEqual(0, txfx.patchers['update'].mock.call_count) # But if there's another Subtask that returns True without # flag_update=False, it does trigger an update. tx1.add_functor_subtask(lambda x: True) tx1.execute() self.assertEqual(1, txfx.patchers['update'].mock.call_count) def test_wrapper_task2(self): # Now: # o Fake like update forces retry # o Test add_functor_subtask, including chaining # o Ensure GET is deferred when .wrapper() is not called ahead of time. # o Make sure subtask args are getting to the subtask. txfx = fx.WrapperTaskFx(self.dwrap) def _update_retries_twice(timeout=-1): self.assertEqual(123, timeout) return self.retry_twice(self.dwrap, self.tracker, txfx) txfx.patchers['update'].side_effect = _update_retries_twice self.useFixture(txfx) def functor(wrapper, arg1, arg2, kwarg3=None, kwarg4=None): txfx.log('functor') # Make sure args are getting here self.assertEqual(['arg', 1], arg1) self.assertEqual('arg2', arg2) self.assertIsNone(kwarg3) self.assertEqual('kwarg4', kwarg4) return wrapper, True # Instantiate-add-execute chain tx.WrapperTask( 'tx2', self.getter, update_timeout=123).add_functor_subtask(functor, ['arg', 1], 'arg2', kwarg4='kwarg4').execute() # Check the overall order. Update should have been called thrice (two # retries) self.assertEqual(3, txfx.patchers['update'].mock.call_count) self.assertEqual(['lock', 'get', 'functor', 'update 1', 'refresh', 'functor', 'update 2', 'refresh', 'functor', 'update 3', 'unlock'], txfx.get_log()) def test_subtask_provides(self): self.useFixture(fx.WrapperTaskFx(self.dwrap)) test_case = self class ChainSubtask(tx.Subtask): def __init__(self, val, *args, **kwargs): self.val = val super(ChainSubtask, self).__init__(*args, **kwargs) def execute(self, *args, **kwargs): test_case.assertEqual(test_case.dwrap, args[0]) # If execute accepts **kwargs, 'provided' is provided. test_case.assertIn('provided', kwargs) test_case.assertEqual(kwargs['expected_provided'], kwargs['provided']) return self.val class ChainSubtask2(tx.Subtask): def execute(self, wrp, provided, expected_provided): test_case.assertEqual(test_case.dwrap, wrp) # Able to get 'provided' as a named parameter test_case.assertEqual(expected_provided, provided) wtsk = tx.WrapperTask('name', self.getter) wtsk.add_subtask(ChainSubtask(1, provides='one', expected_provided={})) # Can't add another Subtask with the same 'provides' self.assertRaises(ValueError, wtsk.add_subtask, ChainSubtask(2, provides='one')) # Next subtask should see the result from the first. wtsk.add_subtask(ChainSubtask(2, provides='two', expected_provided={ 'one': 1})) # Add one that doesn't provide. Its return shouldn't show up in # 'provided'. wtsk.add_subtask(ChainSubtask(3, expected_provided={ 'one': 1, 'two': 2})) # 'provided' works implicitly when it's a named parameter on execute wtsk.add_subtask(ChainSubtask2(expected_provided={'one': 1, 'two': 2})) # Even when execute doesn't return anything, we 'provide' that None wtsk.add_subtask(ChainSubtask2(provides='four', expected_provided={ 'one': 1, 'two': 2})) # Make sure the same stuff works for functors def ret_val_kwargs(*args, **kwargs): self.assertEqual(self.dwrap, args[0]) self.assertIn('provided', kwargs) self.assertEqual(kwargs['expected_provided'], kwargs['provided']) return args[1] def ret_val_explicit(wrp, val, provided, expected_provided): self.assertEqual(self.dwrap, wrp) self.assertEqual(expected_provided, provided) return val self.assertRaises(ValueError, wtsk.add_functor_subtask, int, provides='one') wtsk.add_functor_subtask( ret_val_kwargs, 5, provides='five', expected_provided={'one': 1, 'two': 2, 'four': None}) wtsk.add_functor_subtask( ret_val_kwargs, 6, expected_provided={'one': 1, 'two': 2, 'four': None, 'five': 5}) wtsk.add_functor_subtask( ret_val_explicit, 7, provides='seven', expected_provided={'one': 1, 'two': 2, 'four': None, 'five': 5}) wtsk.add_functor_subtask( ret_val_explicit, 8, expected_provided={'one': 1, 'two': 2, 'four': None, 'five': 5, 'seven': 7}) # Execute the WrapperTask, verifying assertions in ChainSubtask[2] and # ret_val_{kwargs|explicit) wrapper, subtask_rets = wtsk.execute() self.assertEqual(self.dwrap, wrapper) # Verify final form of subtask_rets returned from WrapperTask.execute() self.assertEqual( {'one': 1, 'two': 2, 'four': None, 'five': 5, 'seven': 7}, subtask_rets) class TestFeedTask(twrap.TestWrapper): file = 'lpar.txt' wrapper_class_to_test = lpar.LPAR def setUp(self): super(TestFeedTask, self).setUp() self.getter = lpar.LPAR.getter(self.adpt) # Set this up for getter.get() self.adpt.read.return_value = self.resp self.feed_task = tx.FeedTask('name', lpar.LPAR.getter(self.adpt)) def test_invalid_feed_or_getter(self): """Various evil inputs to FeedTask.__init__'s feed_or_getter.""" self.assertRaises(ValueError, tx.FeedTask, 'name', 'something bogus') # A "feed" of things that aren't EntryWrappers self.assertRaises(ValueError, tx.FeedTask, 'name', [1, 2, 3]) # This one fails because .getter(..., uuid) produces EntryWrapperGetter self.assertRaises(ValueError, tx.FeedTask, 'name', lpar.LPAR.getter(self.adpt, 'a_uuid')) # Init with explicit empty feed tested below in test_empty_feed @mock.patch('pypowervm.wrappers.entry_wrapper.FeedGetter.get') def test_empty_feed(self, mock_get): mock_get.return_value = [] # We're allowed to initialize it with a FeedGetter fm = tx.FeedTask('name', ewrap.FeedGetter('mock', ewrap.EntryWrapper)) # But as soon as we call a 'greedy' method, which does a .get, we raise self.assertRaises(ex.FeedTaskEmptyFeed, fm.get_wrapper, 'uuid') # Init with an explicit empty feed (list) raises right away self.assertRaises(ex.FeedTaskEmptyFeed, tx.FeedTask, 'name', []) def test_wrapper_task_adds_and_replication(self): """Deferred replication of individual WrapperTasks with adds. Covers: - wrapper_tasks - get_wrapper - add_subtask - add_functor_subtask """ def wt_check(wt1, wt2, len1, len2=None, upto=None): """Assert that two WrapperTasks have the same Subtasks. :param wt1, wt2: The WrapperTask instances to compare. :param len1, len2: The expected lengths of the WrapperTask.subtasks of wt1 and wt2, respectively. If len2 is None, it is assumed to be the same as len1. :param upto: (Optional, int) If specified, only the first 'upto' Subtasks are compared. Otherwise, the subtask lists are compared up to the lesser of len1 and len2. """ if len2 is None: len2 = len1 self.assertEqual(len1, len(wt1.subtasks)) self.assertEqual(len2, len(wt2.subtasks)) if upto is None: upto = min(len1, len2) for i in range(upto): self.assertIs(wt1.subtasks[i], wt2.subtasks[i]) # "Functors" for easy subtask creation. Named so we can identify them. foo = lambda: None bar = lambda: None baz = lambda: None xyz = lambda: None abc = lambda: None # setUp's initialization of feed_task creates empty dict and common_tx self.assertEqual({}, self.feed_task._tx_by_uuid) self.assertEqual(0, len(self.feed_task._common_tx.subtasks)) # Asking for the feed does *not* replicate the WrapperTasks feed = self.feed_task.feed self.assertEqual({}, self.feed_task._tx_by_uuid) self.assertEqual(0, len(self.feed_task._common_tx.subtasks)) # Add to the FeedTask self.feed_task.add_subtask(tx._FunctorSubtask(foo)) self.feed_task.add_functor_subtask(bar) # Still does not replicate self.assertEqual({}, self.feed_task._tx_by_uuid) subtasks = self.feed_task._common_tx.subtasks # Make sure the subtasks are legit and in order self.assertEqual(2, len(subtasks)) self.assertIsInstance(subtasks[0], tx.Subtask) self.assertIsInstance(subtasks[1], tx.Subtask) # Yes, these are both _FunctorSubtasks, but the point is verifying that # they are in the right order. self.assertIs(foo, subtasks[0]._func) self.assertIs(bar, subtasks[1]._func) # Now call something that triggers replication wrap10 = self.feed_task.get_wrapper(feed[10].uuid) self.assertEqual(feed[10], wrap10) self.assertNotEqual({}, self.feed_task._tx_by_uuid) self.assertEqual({lwrap.uuid for lwrap in feed}, set(self.feed_task.wrapper_tasks.keys())) # Pick a couple of wrapper tasks at random. wt5, wt8 = (self.feed_task.wrapper_tasks[feed[i].uuid] for i in (5, 8)) # They should not be the same self.assertNotEqual(wt5, wt8) # Their subtasks should not refer to the same lists self.assertIsNot(wt5.subtasks, wt8.subtasks) # But they should have the same Subtasks (the same actual instances) wt_check(wt5, wt8, 2) # Adding more subtasks to the feed manager adds to all (and by the way, # we don't have to refetch the WrapperTasks). self.feed_task.add_functor_subtask(baz) wt_check(wt5, wt8, 3) self.assertIs(baz, wt5.subtasks[2]._func) # Adding to an individual WrapperTask just adds to that one wt5.add_functor_subtask(xyz) wt_check(wt5, wt8, 4, 3) self.assertIs(xyz, wt5.subtasks[3]._func) # And we can still add another to both afterward self.feed_task.add_functor_subtask(abc) wt_check(wt5, wt8, 5, 4, upto=3) # Check the last couple by hand self.assertIs(xyz, wt5.subtasks[3]._func) self.assertIs(wt5.subtasks[4], wt8.subtasks[3]) self.assertIs(abc, wt5.subtasks[4]._func) def test_deferred_feed_get(self): """Test deferred and unique GET of the internal feed.""" # setUp inits self.feed_task with FeedGetter. This doesn't call read. self.assertEqual(0, self.adpt.read.call_count) lfeed = self.feed_task.feed self.assertEqual(1, self.adpt.read.call_count) self.adpt.read.assert_called_with( 'LogicalPartition', None, child_id=None, child_type=None, xag=None) self.assertEqual(21, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) # Getting feed again doesn't invoke GET again. lfeed = self.feed_task.feed self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(21, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) # Init with a feed - read is never called self.adpt.read.reset_mock() ftsk = tx.FeedTask('name', lfeed) self.assertEqual(0, self.adpt.read.call_count) nfeed = ftsk.feed self.assertEqual(0, self.adpt.read.call_count) self.assertEqual(lfeed, nfeed) def test_rebuild_feed(self): """Feed gets rebuilt when transactions exist and an etag mismatches.""" # Populate and retrieve the feed lfeed = self.feed_task.feed # Pick out a wrapper UUID to use, from somewhere in the middle uuid = lfeed[13].uuid # Populate etags for i in range(len(lfeed)): lfeed[i]._etag = i + 100 # This get_wrapper will replicate the UUID-to-WrapperTask dict. # Create a *copy* of the wrapper so that changing it will simulate how # a WrapperTask modifies its internal EntryWrapper on update() without # that change being reflected back to the FeedTask's _feed. (Current # mocks are just returning the same wrapper all the time.) lpar13 = copy.deepcopy(self.feed_task.get_wrapper(uuid)) self.assertNotEqual({}, self.feed_task._tx_by_uuid) # Set unique etag. lpar13._etag = 42 # And stuff it back in the WrapperTask self.feed_task.wrapper_tasks[uuid]._wrapper = lpar13 # Now we're set up. First prove that the feed (as previously grabbed) # isn't already reflecting the new entry. self.assertNotEqual(lpar13.etag, lfeed[13].etag) # Ask for the feed again and this should change # The feed may have been reshuffled, so we have to find our LPAR again. lfind = None for entry in self.feed_task.feed: if entry.uuid == uuid: lfind = entry break self.assertEqual(lpar13.etag, lfind.etag) # And it is in fact the new one now in the feed. self.assertEqual(42, lfind.etag) def test_execute(self): """Execute a 'real' FeedTask.""" feed = self.feed_task.feed # Initialize expected/actual flags dicts: # {uuid: [ordered, list, of, flags]} # The list of flags for a given UUID should be ordered the same as the # subtasks, though they may get shotgunned to the dict via parallel # execution of the WrapperTasks. exp_flags = {ent.uuid: [] for ent in feed} act_flags = {ent.uuid: [] for ent in feed} # A function that we can run within a Subtask. No triggering update # since we're just making sure the Subtasks run. def func(wrapper, flag): with lock.lock('act_flags'): act_flags[wrapper.uuid].append(flag) return False # Start with a subtask common to all self.feed_task.add_functor_subtask(func, 'common1') for ent in feed: exp_flags[ent.uuid].append('common1') # Add individual separate subtasks to a few of the WrapperTasks for i in range(5, 15): self.feed_task.wrapper_tasks[ feed[i].uuid].add_functor_subtask(func, i) exp_flags[feed[i].uuid].append(i) # Add another common subtask self.feed_task.add_functor_subtask(func, 'common2') for ent in feed: exp_flags[ent.uuid].append('common2') # Run it! self.feed_task.execute() self.assertEqual(exp_flags, act_flags) @mock.patch('taskflow.patterns.unordered_flow.Flow.__init__') def test_no_subtasks(self, mock_flow): """Ensure that a FeedTask with no Subtasks is a no-op.""" # No REST mocks - any REST calls will blow up. # Mocking Flow initializer to fail, ensuring it doesn't get called. mock_flow.side_effect = self.fail tx.FeedTask('feed_task', lpar.LPAR.getter(None)).execute() def test_post_exec(self): def log_func(msg): def _log(*a, **k): ftfx.log(msg) return _log def log_task(msg): return tf_task.FunctorTask(log_func(msg), name='functor_%s' % msg) # Limit the feed to two to keep the logging sane ftfx = self.useFixture(fx.FeedTaskFx(self.entries[:2])) # Make the logging predictable by limiting to one thread ftsk = tx.FeedTask('post_exec', lpar.LPAR.getter(None), max_workers=1) # First prove that a FeedTask with *only* post-execs can run. ftsk.add_post_execute(log_task('post1')) ftsk.add_post_execute(log_task('post2')) ftsk.execute() # Note that no GETs or locks happen self.assertEqual(['post1', 'post2'], ftfx.get_log()) # Now add regular subtasks ftfx.reset_log() ftsk.add_functor_subtask(log_func('main1')) ftsk.add_functor_subtask(log_func('main2')) ftsk.execute() # One GET, up front. Posts happen at the end. self.assertEqual(['get', 'lock', 'main1', 'main2', 'unlock', 'lock', 'main1', 'main2', 'unlock', 'post1', 'post2'], ftfx.get_log()) def test_wrapper_task_rets(self): # Limit the feed to two to keep the return size sane ftfx = self.useFixture(fx.FeedTaskFx(self.entries[:2])) ftsk = tx.FeedTask('subtask_rets', lpar.LPAR.getter(None), update_timeout=123) exp_wtr = { wrp.uuid: { 'wrapper': wrp, 'the_id': wrp.id, 'the_name': wrp.name} for wrp in ftsk.feed} called = [] def return_wrapper_name(wrapper): return wrapper.name def return_wrapper_id(wrapper): return wrapper.id def verify_rets_implicit(wrapper_task_rets): called.append('implicit') self.assertEqual(exp_wtr, wrapper_task_rets) return 'verify_rets_implicit_return' def verify_rets_explicit(**kwargs): called.append('explicit') self.assertEqual(exp_wtr, kwargs['wrapper_task_rets']) return 'verify_rets_explicit_return' ftsk.add_functor_subtask(return_wrapper_name, provides='the_name') ftsk.add_functor_subtask(return_wrapper_id, provides='the_id') # Execute once here to make sure the return is in the right shape when # there are no post-execs self.assertEqual({ 'wrapper_task_rets': { self.entries[0].uuid: {'the_name': self.entries[0].name, 'the_id': self.entries[0].id, 'wrapper': self.entries[0]}, self.entries[1].uuid: {'the_name': self.entries[1].name, 'the_id': self.entries[1].id, 'wrapper': self.entries[1]}}}, ftsk.execute()) ftsk.add_post_execute(tf_task.FunctorTask( verify_rets_implicit, provides='post_exec_implicit')) ftsk.add_post_execute(tf_task.FunctorTask( verify_rets_explicit, requires='wrapper_task_rets', provides='post_exec_explicit')) ret = ftsk.execute() # Make sure the post-execs actually ran (to guarantee their internal # assertions passed). self.assertEqual(['implicit', 'explicit'], called) ftfx.patchers['update'].mock.assert_called_with(mock.ANY, timeout=123) # Verify that we got the returns from the subtasks AND the post-execs self.assertEqual({ 'wrapper_task_rets': { self.entries[0].uuid: {'the_name': self.entries[0].name, 'the_id': self.entries[0].id, 'wrapper': self.entries[0]}, self.entries[1].uuid: {'the_name': self.entries[1].name, 'the_id': self.entries[1].id, 'wrapper': self.entries[1]}}, 'post_exec_implicit': 'verify_rets_implicit_return', 'post_exec_explicit': 'verify_rets_explicit_return'}, ret) def test_subtask_thread_local(self): """Security context and locks, if set, propagates to WrapperTasks.""" def verify_no_ctx(wrapper): self.assertIsNone(ctx.get_current()) tx.FeedTask('test_no_context', lpar.LPAR.getter( self.adpt)).add_functor_subtask(verify_no_ctx).execute() def verify_ctx(wrapper): _context = ctx.get_current() self.assertIsNotNone(_context) self.assertEqual('123', _context.request_id) # Copy the base set of locks to expect our_locks = list(locks) # Add our wrappers uuid since that will be set also. our_locks.append(wrapper.uuid) self.assertEqual(set(our_locks), set(tx._get_locks())) ctx.RequestContext(request_id='123') locks = ['L123', 'L456', 'L789'] tx._set_locks(locks) tx.FeedTask('test_set_context', lpar.LPAR.getter( self.adpt)).add_functor_subtask(verify_ctx).execute() # Context propagates even if FeedTask is executed in a subthread, as # long as our executor is used. # Make two to ensure they're run in separate threads ft1 = tx.FeedTask('subthread1', lpar.LPAR.getter( self.adpt)).add_functor_subtask(verify_ctx) ft2 = tx.FeedTask('subthread2', lpar.LPAR.getter( self.adpt)).add_functor_subtask(verify_ctx) self.assertRaises(tf_ex.WrappedFailure, tf_eng.run, tf_uf.Flow('subthread_flow').add(ft1, ft2), engine='parallel') tf_eng.run( tf_uf.Flow('subthread_flow').add(ft1, ft2), engine='parallel', executor=tx.ContextThreadPoolExecutor(2)) class TestExceptions(unittest.TestCase): def test_exceptions(self): def bad1(wrapper, s): bad2(wrapper) def bad2(wrapper): bad3(wrapper.field) def bad3(tag): raise IOError("this is an exception on %s!" % tag) # With one entry in the feed, one exception should be raised, and it # should bubble up as normal. feed = [mock.Mock(spec=lpar.LPAR, field='lpar1')] ft = tx.FeedTask('ft', feed).add_functor_subtask(bad1, 'this is bad') flow = tf_uf.Flow('the flow') flow.add(ft) self.assertRaises(IOError, tf_eng.run, flow) # With multiple entries in the feed, TaskFlow will wrap the exceptions # in a WrappedFailure. We should repackage it, and the message in the # resulting MultipleExceptionsInFeedTask should contain all the # exception messages. feed.append(mock.Mock(spec=lpar.LPAR, field='lpar2')) ft = tx.FeedTask('ft', feed).add_functor_subtask(bad1, 'this is bad') flow = tf_uf.Flow('the flow') flow.add(ft) with self.assertRaises(ex.MultipleExceptionsInFeedTask) as mult_ex: tf_eng.run(flow) # Make sure the wrapped exception messages show up in the exception. self.assertIn('exception on lpar1!', mult_ex.exception.args[0]) self.assertIn('exception on lpar2!', mult_ex.exception.args[0]) pypowervm-1.1.24/pypowervm/tests/utils/test_validation.py0000664000175000017500000005601413571367171023373 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from pypowervm.utils import validation as vldn from pypowervm.wrappers import base_partition as bp from pypowervm.wrappers import logical_partition as lpar from pypowervm.wrappers import managed_system as mgd_sys class TestValidator(testtools.TestCase): """Unit tests for validation.""" def setUp(self): super(TestValidator, self).setUp() def _bld_mgd_sys(proc_units_avail=20.0, mem_free=32768, system_name='default_sys_name', max_procs_per_aix_linux_lpar=10, max_sys_procs_limit=15, max_vcpus_per_aix_linux_lpar=10, max_sys_vcpus_limit=15, dynamic_srr_capable=True): # Build a fake managed system wrapper mngd_sys = mock.MagicMock(spec=mgd_sys.System) mngd_sys.system_name = system_name mngd_sys.proc_units_avail = proc_units_avail mngd_sys.memory_free = mem_free mngd_sys.max_procs_per_aix_linux_lpar = ( max_procs_per_aix_linux_lpar) mngd_sys.max_sys_procs_limit = max_sys_procs_limit mngd_sys.max_vcpus_per_aix_linux_lpar = ( max_vcpus_per_aix_linux_lpar) mngd_sys.max_sys_vcpus_limit = max_sys_vcpus_limit mngd_sys.get_capability.return_value = dynamic_srr_capable return mngd_sys def _bld_lpar(proc_units=1.0, min_mem=512, des_mem=2048, max_mem=4096, has_dedicated=False, name='default', rmc_state='active', mem_dlpar=True, proc_dlpar=True, state='running', env='AIX/Linux', proc_compat='Default', srr_enabled=True, min_vcpus=1, des_vcpus=2, max_vcpus=4, min_proc_units=0.1, max_proc_units=1.0, pool_id=None, exp_factor=0.0, ame_enabled=False, ppt_ratio=None): lpar_w = mock.MagicMock() # name, states, env, etc. lpar_w.name = name lpar_w.state = state lpar_w.rmc_state = rmc_state lpar_w.env = env lpar_w.proc_compat_mode = proc_compat lpar_w.srr_enabled = srr_enabled # Proc lpar_w.proc_config.has_dedicated = has_dedicated if has_dedicated: lpar_w.proc_config.dedicated_proc_cfg.desired = proc_units lpar_w.proc_config.dedicated_proc_cfg.max = max_vcpus lpar_w.proc_config.dedicated_proc_cfg.min = min_vcpus else: lpar_w.proc_config.shared_proc_cfg.desired_units = proc_units lpar_w.proc_config.shared_proc_cfg.desired_virtual = des_vcpus lpar_w.proc_config.shared_proc_cfg.max_virtual = max_vcpus lpar_w.proc_config.shared_proc_cfg.min_virtual = min_vcpus lpar_w.proc_config.shared_proc_cfg.pool_id = ( pool_id if pool_id else 0) lpar_w.proc_config.shared_proc_cfg.min_units = min_proc_units lpar_w.proc_config.shared_proc_cfg.max_units = max_proc_units # Mem lpar_w.mem_config.desired = des_mem lpar_w.mem_config.min = min_mem lpar_w.mem_config.max = max_mem lpar_w.mem_config.exp_factor = exp_factor lpar_w.mem_config.ppt_ratio = ppt_ratio # Can Modify if (state != bp.LPARState.NOT_ACTIVATED and rmc_state != bp.RMCState.ACTIVE): lpar_w.can_modify_proc.return_value = (False, 'Bad RMC') lpar_w.can_modify_mem.return_value = (False, 'Bad RMC') else: # Doesn't matter what the message is unless it's bad # so always make it bad lpar_w.can_modify_proc.return_value = (proc_dlpar, 'Bad proc DLPAR') lpar_w.can_modify_mem.return_value = (mem_dlpar, 'Bad mem DLPAR') mocked = mock.MagicMock(spec_set=lpar.LPAR, return_value=lpar_w) return mocked() self.mngd_sys = _bld_mgd_sys() self.mngd_sys_no_dyn_srr = _bld_mgd_sys(dynamic_srr_capable=False) self.lpar_21_procs = _bld_lpar(proc_units=21.0, name='lpar_21_procs') self.lpar_1_proc = _bld_lpar() self.lpar_11_vcpus = _bld_lpar(des_vcpus=11, name='11_vcpus') self.lpar_16_max_vcpus = _bld_lpar(max_vcpus=16, name='16_max_vcpus') self.lpar_1_proc_ded = _bld_lpar(has_dedicated=True, name='1_proc_ded') self.lpar_11_proc_ded = _bld_lpar(proc_units=11, has_dedicated=True, name='11_proc_ded') self.lpar_16_proc_max_ded = _bld_lpar(max_vcpus=16, has_dedicated=True, name='16_proc_max_ded') self.lpar_21_proc_ded = _bld_lpar(proc_units=21, has_dedicated=True, name='21_proc_ded') self.lpar_no_rmc = _bld_lpar(rmc_state='inactive') self.lpar_bad_mem_dlpar = _bld_lpar(mem_dlpar=False) self.lpar_bad_proc_dlpar = _bld_lpar(proc_dlpar=False) self.lpar_48g_mem = _bld_lpar(des_mem=48000, name='lpar_48g_mem') self.lpar_1_min_vcpus = _bld_lpar(min_vcpus=1, name='1_min_vcpus') self.lpar_2_min_vcpus = _bld_lpar(min_vcpus=2, name='2_min_vcpus') self.lpar_1_min_proc_units = _bld_lpar(min_proc_units=0.1, name='0.1_min_procs') self.lpar_3_min_proc_units = _bld_lpar(min_proc_units=0.3, name='0.3_min_procs') self.lpar_6_max_proc_units = _bld_lpar(max_proc_units=0.6, name='0.6_max_procs') self.lpar_9_max_proc_units = _bld_lpar(max_proc_units=0.9, name='0.9_max_procs') self.lpar_6_max_vcpus = _bld_lpar(max_vcpus=6, name='6_max_vcpus') self.lpar_8_max_vcpus = _bld_lpar(max_vcpus=8, name='8_max_vcpus') self.lpar_512mb_min_mem = _bld_lpar(min_mem=512, name='512_min_mem') self.lpar_1gb_min_mem = _bld_lpar(min_mem=1024, name='1gb_min_mem') self.lpar_6g_max_mem = _bld_lpar(max_mem=6144, name='6gb_max_mem') self.lpar_8g_max_mem = _bld_lpar(max_mem=8192, name='8gb_max_mem') self.lpar_default_spp = _bld_lpar(pool_id=0, name='default_spp') self.lpar_non_default_spp = _bld_lpar(pool_id=2, name='non_default_spp') self.lpar_power8_proc_compat = _bld_lpar(proc_compat="POWER8", name='power8_compat_mode') self.lpar_srr_disabled = _bld_lpar(srr_enabled=False, name='srr_disabled') self.lpar_1_proc_ded_inactive = _bld_lpar(has_dedicated=True, name='1_proc_ded_inactive', state='not activated') self.lpar_22_procs = _bld_lpar(proc_units=22.0, name='lpar_22_procs') self.lpar_4_proc_ded = _bld_lpar(proc_units=4.0, has_dedicated=True, name='4_proc_ded') self.lpar_22_proc_ded = _bld_lpar(proc_units=22, has_dedicated=True, name='21_proc_ded') self.lpar_4g_mem = _bld_lpar(des_mem=4096, name='4gb_mem') self.lpar_6g_mem = _bld_lpar(des_mem=6144, name='6gb_mem') self.lpar_1dot6_proc_units = _bld_lpar(proc_units=1.6, name='1.6_procs') self.lpar_2dot2_proc_units = _bld_lpar(proc_units=2.2, name='2.2_procs') self.lpar_1_vcpus = _bld_lpar(des_vcpus=1, name='lpar_1_vcpus') self.lpar_not_activated = _bld_lpar(name='lpar_not_activated', state='not activated') self.lpar_running = _bld_lpar(name='lpar_running', state='running') self.lpar_starting = _bld_lpar(name='lpar_starting', state='starting') self.lpar_ame_2 = _bld_lpar(name='ame_2', exp_factor=2.0, ame_enabled=True) self.lpar_ame_3 = _bld_lpar(name='ame_3', exp_factor=3.0, ame_enabled=True) self.lpar_ppt_1 = _bld_lpar(name='ppt_1', ppt_ratio=4) self.lpar_ppt_2 = _bld_lpar(name='ppt_2', ppt_ratio=2) def test_validator(self): # Test desired proc units > host avail proc units fails for shared vldr = vldn.LPARWrapperValidator(self.lpar_21_procs, self.mngd_sys) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test desired proc units < host avail proc units passes for shared vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys).validate_all() # Test desired proc units > host avail proc units fails for dedicated vldr = vldn.LPARWrapperValidator(self.lpar_21_proc_ded, self.mngd_sys) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test desired proc units < host avail proc units passes for dedicated vldn.LPARWrapperValidator(self.lpar_1_proc_ded, self.mngd_sys).validate_all() # Test resize fails with inactive rmc vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys, cur_lpar_w=self.lpar_no_rmc) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test resize fails with no mem dlpar vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys, cur_lpar_w=self.lpar_bad_mem_dlpar) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test resize fails with no proc dlpar vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys, cur_lpar_w=self.lpar_bad_proc_dlpar) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test dedicated procs > host max allowed procs per lpar fails vldr = vldn.LPARWrapperValidator(self.lpar_11_proc_ded, self.mngd_sys) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test dedicated max procs > host max sys procs limit fails vldr = vldn.LPARWrapperValidator(self.lpar_16_proc_max_ded, self.mngd_sys) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test shared desired vcpus > host max allowed vcpus per lpar fails vldr = vldn.LPARWrapperValidator(self.lpar_11_vcpus, self.mngd_sys) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test shared desired max vcpus > host max sys vcpus limit fails vldr = vldn.LPARWrapperValidator(self.lpar_16_max_vcpus, self.mngd_sys) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test desired memory > host available memory fails vldr = vldn.LPARWrapperValidator(self.lpar_48g_mem, self.mngd_sys) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing min vcpus fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_1_min_vcpus, self.mngd_sys, cur_lpar_w=self.lpar_2_min_vcpus) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing max vcpus fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_8_max_vcpus, self.mngd_sys, cur_lpar_w=self.lpar_6_max_vcpus) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing min proc units fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_3_min_proc_units, self.mngd_sys, cur_lpar_w=self.lpar_1_min_proc_units) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing max proc units fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_9_max_proc_units, self.mngd_sys, cur_lpar_w=self.lpar_6_max_proc_units) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing min memory fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_512mb_min_mem, self.mngd_sys, cur_lpar_w=self.lpar_1gb_min_mem) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing max memory fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_8g_max_mem, self.mngd_sys, cur_lpar_w=self.lpar_6g_max_mem) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing AME expansion factor from 2 to 3 fails active resize vldr = vldn.LPARWrapperValidator(self.lpar_ame_3, self.mngd_sys, cur_lpar_w=self.lpar_ame_2) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test toggling AME fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_ame_2, self.mngd_sys, cur_lpar_w=self.lpar_1_proc) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing PPT ratio fails during active resize vldr = vldn.LPARWrapperValidator(self.lpar_ppt_1, self.mngd_sys, cur_lpar_w=self.lpar_ppt_2) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test not changing PPT ratio passes during active resize vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys, cur_lpar_w=self.lpar_ppt_2) vldr.validate_all() # Test resizing lpar from defaultSPP to non-defaultSPP passes vldr = vldn.LPARWrapperValidator(self.lpar_non_default_spp, self.mngd_sys, cur_lpar_w=self.lpar_default_spp) vldr.validate_all() # Test resizing lpar from non-defaultSPP to defaultSPP passes vldr = vldn.LPARWrapperValidator(self.lpar_default_spp, self.mngd_sys, cur_lpar_w=self.lpar_non_default_spp) vldr.validate_all() # Test changing from dedicated to non-defaultSPP passes vldr = vldn.LPARWrapperValidator(self.lpar_non_default_spp, self.mngd_sys, self.lpar_1_proc_ded_inactive) vldr.validate_all() # Test changing processor mode (shared -> ded) fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_1_proc_ded, self.mngd_sys, cur_lpar_w=self.lpar_1_proc) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing processor mode (ded to shared) fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys, cur_lpar_w=self.lpar_1_proc_ded) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing processor compatibility mode fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_power8_proc_compat, self.mngd_sys, cur_lpar_w=self.lpar_1_proc) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test changing SRR capabilty fails for active resize vldr = vldn.LPARWrapperValidator(self.lpar_srr_disabled, self.mngd_sys_no_dyn_srr, cur_lpar_w=self.lpar_1_proc) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # ...unless dynamic_srr_capable vldr = vldn.LPARWrapperValidator(self.lpar_srr_disabled, self.mngd_sys, cur_lpar_w=self.lpar_1_proc) vldr.validate_all() # Test desired delta proc units > host avail proc units fails # during resize (shared -> shared) vldr = vldn.LPARWrapperValidator(self.lpar_22_procs, self.mngd_sys, cur_lpar_w=self.lpar_1_proc) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test desired delta proc units <= host avail proc units passes # during resize (shared -> shared) vldn.LPARWrapperValidator(self.lpar_21_procs, self.mngd_sys, cur_lpar_w=self.lpar_1_proc).validate_all() # Test desired delta proc units > host avail proc units fails # during resize (dedicated -> dedicated) vldr = vldn.LPARWrapperValidator(self.lpar_22_proc_ded, self.mngd_sys, cur_lpar_w=self.lpar_1_proc_ded) self.assertRaises(vldn.ValidatorException, vldr.validate_all) # Test desired delta proc units <= host avail proc units passes # during resize (dedicated -> dedicated) vldn.LPARWrapperValidator(self.lpar_4_proc_ded, self.mngd_sys, self.lpar_1_proc_ded).validate_all() # Test resize delta mem mem_vldr = vldn.MemValidator(self.lpar_6g_mem, self.mngd_sys, cur_lpar_w=self.lpar_4g_mem) mem_vldr._populate_new_values() mem_vldr._populate_resize_diffs() self.assertEqual(2048, mem_vldr.delta_des_mem, 'Incorrect resize delta memory calculation') # Test resize delta procs proc_vldr = vldn.ProcValidator(self.lpar_4_proc_ded, self.mngd_sys, cur_lpar_w=self.lpar_1_proc_ded) proc_vldr._populate_new_values() proc_vldr._populate_resize_diffs() self.assertEqual(3, proc_vldr.delta_des_vcpus, 'Incorrect resize delta proc calculation' ' in dedicated mode') proc_vldr = vldn.ProcValidator(self.lpar_2dot2_proc_units, self.mngd_sys, cur_lpar_w=self.lpar_1dot6_proc_units) proc_vldr._populate_new_values() proc_vldr._populate_resize_diffs() self.assertEqual(0.60, proc_vldr.delta_des_vcpus, 'Incorrect resize delta proc calculation in' ' shared mode') proc_vldr = vldn.ProcValidator(self.lpar_1dot6_proc_units, self.mngd_sys, cur_lpar_w=self.lpar_1_proc_ded) proc_vldr._populate_new_values() proc_vldr._populate_resize_diffs() self.assertEqual(0.60, proc_vldr.delta_des_vcpus, 'Incorrect delta proc calculation while resizing ' 'from dedicated to shared mode') proc_vldr = vldn.ProcValidator(self.lpar_4_proc_ded, self.mngd_sys, cur_lpar_w=self.lpar_1dot6_proc_units) proc_vldr._populate_new_values() proc_vldr._populate_resize_diffs() self.assertEqual(2.40, proc_vldr.delta_des_vcpus, 'Incorrect delta proc calculation while resizing ' 'from shared to dedicated mode') # Test resizing not activated state lpar makes inactive_resize_checks with mock.patch('pypowervm.utils.validation.ProcValidator.' '_validate_inactive_resize') as inactive_resize_checks: proc_vldr = vldn.ProcValidator(self.lpar_not_activated, self.mngd_sys, cur_lpar_w=self.lpar_not_activated) proc_vldr.validate() self.assertTrue(inactive_resize_checks.called, 'Inactive resize validations not performed.') # Test resizing running state lpar makes active_resize_checks with mock.patch('pypowervm.utils.validation.ProcValidator.' '_validate_active_resize') as active_resize_checks: proc_vldr = vldn.ProcValidator(self.lpar_running, self.mngd_sys, cur_lpar_w=self.lpar_running) proc_vldr.validate() self.assertTrue(active_resize_checks.called, 'Active resize validations not performed.') # Test resizing starting state lpar makes active_resize_checks with mock.patch('pypowervm.utils.validation.ProcValidator.' '_validate_active_resize') as active_resize_checks: proc_vldr = vldn.ProcValidator(self.lpar_starting, self.mngd_sys, cur_lpar_w=self.lpar_starting) proc_vldr.validate() self.assertTrue(active_resize_checks.called, 'Active resize validations not performed.') @mock.patch('pypowervm.utils.validation.ProcValidator.validate') @mock.patch('pypowervm.utils.validation.MemValidator.validate') def test_validator_check_dlpar(self, mem_val_validate, proc_val_validate): vldr = vldn.LPARWrapperValidator(self.lpar_1_proc, self.mngd_sys, cur_lpar_w=self.lpar_no_rmc) vldr.validate_all(check_dlpar=False) mem_val_validate.assert_called_once_with(check_dlpar=False) proc_val_validate.assert_called_once_with(check_dlpar=False) mem_val_validate.reset_mock() proc_val_validate.reset_mock() vldr = vldn.LPARWrapperValidator(self.lpar_running, self.mngd_sys, cur_lpar_w=self.lpar_running) vldr.validate_all() mem_val_validate.assert_called_once_with(check_dlpar=True) proc_val_validate.assert_called_once_with(check_dlpar=True) pypowervm-1.1.24/pypowervm/tests/utils/test_uuid.py0000664000175000017500000000400113571367171022174 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from pypowervm.utils import uuid as uuid_utils import unittest class TestUUID(unittest.TestCase): """Unit tests for the uuid.""" def test_uuid_conversion(self): uuid = '089ffb20-5d19-4a8c-bb80-13650627d985' pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid) self.assertEqual(uuid, pvm_uuid) uuid = '989ffb20-5d19-4a8c-bb80-13650627d985' pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid) self.assertEqual('1' + uuid[1:], pvm_uuid) uuid = 'c89ffb20-5d19-4a8c-bb80-13650627d985' pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid) self.assertEqual('4' + uuid[1:], pvm_uuid) def test_id_or_uuid(self): self.assertEqual((False, 123), uuid_utils.id_or_uuid(123)) # Test all stringish permutations converters = [lambda x: x, six.text_type] for conv in converters: self.assertEqual((False, 123), uuid_utils.id_or_uuid(conv('123'))) uuid = conv('12345678-abcd-ABCD-0000-0a1B2c3D4e5F') self.assertEqual((True, uuid), uuid_utils.id_or_uuid(uuid)) uuid = conv('12345678abcdABCD00000a1B2c3D4e5F') self.assertEqual((True, uuid), uuid_utils.id_or_uuid(uuid)) # This one has too many digits self.assertRaises(ValueError, uuid_utils.id_or_uuid, conv('12345678-abcd-ABCD-0000-0a1B2c3D4e5F0')) pypowervm-1.1.24/pypowervm/tests/utils/test_lpar_bldr.py0000664000175000017500000006313113571367171023200 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock import six import testtools from pypowervm.tests import test_fixtures as fx from pypowervm.tests.test_utils import xml_sections from pypowervm.utils import lpar_builder as lpar_bldr import pypowervm.utils.uuid as pvm_uuid from pypowervm.wrappers import base_partition as bp from pypowervm.wrappers import logical_partition as lpar LPAR_BLDR_DATA = 'lpar_builder.txt' class TestLPARBuilder(testtools.TestCase): """Unit tests for the lpar builder.""" def setUp(self): super(TestLPARBuilder, self).setUp() self.sections = xml_sections.load_xml_sections(LPAR_BLDR_DATA) self.adpt = self.useFixture(fx.AdapterFx()).adpt def _bld_mgd_sys(proc_units, mem_reg, srr, pcm, ame, ppt, affinity=False, psbc=False): # Build a fake managed system wrapper mngd_sys = mock.Mock() type(mngd_sys).proc_units_avail = ( mock.PropertyMock(return_value=proc_units)) type(mngd_sys).memory_region_size = ( mock.PropertyMock(return_value=mem_reg)) def get_cap(cap): capabilities = { 'simplified_remote_restart_capable': srr, 'ibmi_restrictedio_capable': True, 'active_memory_expansion_capable': ame, 'physical_page_table_ratio_capable': ppt, 'affinity_check_capable': affinity, 'partition_secure_boot_capable': psbc } return capabilities[cap] mngd_sys.get_capability.side_effect = get_cap type(mngd_sys).proc_compat_modes = ( mock.PropertyMock(return_value=pcm)) return mngd_sys self.mngd_sys = _bld_mgd_sys(20.0, 128, True, bp.LPARCompat.ALL_VALUES, False, False) self.mngd_sys_no_srr = _bld_mgd_sys(20.0, 128, False, ['POWER6'], False, False) self.mngd_sys_ame = _bld_mgd_sys(20.0, 128, True, bp.LPARCompat.ALL_VALUES, True, False) self.mngd_sys_ppt = _bld_mgd_sys(20.0, 128, True, bp.LPARCompat.ALL_VALUES, False, True) self.mngd_sys_affinity = _bld_mgd_sys(20.0, 128, True, bp.LPARCompat.ALL_VALUES, False, True, affinity=True) self.mngd_sys_secure_boot = _bld_mgd_sys(20.0, 128, True, bp.LPARCompat.ALL_VALUES, False, True, psbc=True) self.stdz_sys1 = lpar_bldr.DefaultStandardize(self.mngd_sys) self.stdz_sys2 = lpar_bldr.DefaultStandardize(self.mngd_sys_no_srr) self.stdz_sys3 = lpar_bldr.DefaultStandardize(self.mngd_sys_ame) self.stdz_sys4 = lpar_bldr.DefaultStandardize(self.mngd_sys_ppt) self.stdz_sys5 = lpar_bldr.DefaultStandardize(self.mngd_sys_affinity) self.stdz_sys6 = lpar_bldr.DefaultStandardize( self.mngd_sys_secure_boot) def assert_xml(self, entry, string): self.assertEqual(six.b(string.rstrip('\n')), entry.element.toxmlstring()) def test_proc_modes(self): # Base minimum attrs attr = dict(name='TheName', memory=1024, vcpu=1) # No proc keys specified bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertFalse(bldr._shared_proc_keys_specified()) self.assertFalse(bldr._dedicated_proc_keys_specified()) # Default is shared self.assertTrue(bldr._shared_procs_specified()) # Shared proc keys specified for key in ('proc_units', 'max_proc_units', 'min_proc_units', 'uncapped_weight'): bldr = lpar_bldr.LPARBuilder(self.adpt, dict(attr, **{key: 1}), self.stdz_sys1) self.assertTrue(bldr._shared_proc_keys_specified()) self.assertFalse(bldr._dedicated_proc_keys_specified()) self.assertTrue(bldr._shared_procs_specified()) # Shared modes specified for mode in ('capped', 'uncapped'): bldr = lpar_bldr.LPARBuilder( self.adpt, dict(attr, sharing_mode=mode), self.stdz_sys1) self.assertTrue(bldr._shared_proc_keys_specified()) self.assertFalse(bldr._dedicated_proc_keys_specified()) self.assertTrue(bldr._shared_procs_specified()) # Dedicated modes specified for mode in ('sre idle proces', 'sre idle procs active', 'sre idle procs always', 'keep idle procs'): bldr = lpar_bldr.LPARBuilder( self.adpt, dict(attr, sharing_mode=mode), self.stdz_sys1) self.assertFalse(bldr._shared_proc_keys_specified()) self.assertTrue(bldr._dedicated_proc_keys_specified()) self.assertFalse(bldr._shared_procs_specified()) # Dedicated proc explicitly true bldr = lpar_bldr.LPARBuilder( self.adpt, dict(attr, dedicated_proc='TRUE'), self.stdz_sys1) self.assertFalse(bldr._shared_proc_keys_specified()) self.assertFalse(bldr._dedicated_proc_keys_specified()) self.assertFalse(bldr._shared_procs_specified()) # Dedicated proc explicitly false bldr = lpar_bldr.LPARBuilder( self.adpt, dict(attr, dedicated_proc='NO'), self.stdz_sys1) self.assertFalse(bldr._shared_proc_keys_specified()) self.assertFalse(bldr._dedicated_proc_keys_specified()) self.assertTrue(bldr._shared_procs_specified()) def test_builder(self): # Build the minimum attributes, Shared Procs # shared_lpar test file uses non-default max I/O slots attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024, vcpu=1, max_io_slots=2000) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertIsNotNone(bldr) new_lpar = bldr.build() self.assertIsNotNone(new_lpar) self.assert_xml(new_lpar, self.sections['shared_lpar']) self.assertEqual('TheName', new_lpar.name) # Rebuild the same lpar with a different name attr['name'] = 'NewName' bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) rbld_lpar = bldr.rebuild(new_lpar) self.assertEqual('NewName', rbld_lpar.name) # Build the minimum attributes, Dedicated Procs attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024, vcpu=1, dedicated_proc=True) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertIsNotNone(bldr) new_lpar = bldr.build() self.assertIsNotNone(new_lpar) self.assert_xml(new_lpar.entry, self.sections['dedicated_lpar']) # Build the minimum attributes, Dedicated Procs = 'true' attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024, vcpu=1, dedicated_proc='true') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assert_xml(new_lpar.entry, self.sections['dedicated_lpar']) # Leave out memory attr = dict(name=lpar, env=bp.LPARType.AIXLINUX, vcpu=1) self.assertRaises( lpar_bldr.LPARBuilderException, lpar_bldr.LPARBuilder, self.adpt, attr, self.stdz_sys1) # Bad memory lmb multiple attr = dict(name='lpar', memory=3333, env=bp.LPARType.AIXLINUX, vcpu=1) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # Check the validation of the LPAR type when not specified attr = dict(name='TheName', memory=1024, vcpu=1, max_io_slots=2000) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assert_xml(new_lpar, self.sections['shared_lpar']) # Check the PPT ratio element builds correctly attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024, vcpu=1, ppt_ratio='1:512') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys4) new_lpar = bldr.build() self.assert_xml(new_lpar, self.sections['ppt_lpar']) # Ensure secure boot is set properly attr = dict(name='SecureBoot', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1, secure_boot=2) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys6) new_lpar = bldr.build() self.assert_xml(new_lpar, self.sections['secure_boot_lpar']) # Ensure secure boot disabled works for IBMi attr = dict(name='SecureBoot', memory=1024, env=bp.LPARType.OS400, vcpu=1, secure_boot=0) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys6) new_lpar = bldr.build() self.assert_xml(new_lpar, self.sections['secure_boot_ibmi_lpar']) # LPAR name too long attr = dict(name='lparlparlparlparlparlparlparlparlparlparlparlpar' 'lparlparlparlparlparlparlparlparlparlparlparlparlparlpar', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(lpar_bldr.LPARBuilderException, bldr.build) # Test setting uuid uuid1 = pvm_uuid.convert_uuid_to_pvm(str(uuid.uuid4())) attr = dict(name='lpar', memory=1024, uuid=uuid1, vcpu=1) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) lpar_w = bldr.build() self.assertEqual(uuid1.upper(), lpar_w.uuid) # Test setting id id1 = 1234 attr = dict(name='lpar', memory=1024, uuid=uuid1, vcpu=1, id=id1) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) lpar_w = bldr.build() self.assertEqual(id1, lpar_w.id) # Bad LPAR type attr = dict(name='lpar', memory=1024, env='BADLPARType', vcpu=1) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # Bad IO Slots attr = dict(name='lpar', memory=1024, max_io_slots=0, env=bp.LPARType.AIXLINUX, vcpu=1) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) attr = dict(name='lpar', memory=1024, max_io_slots=(65534+1), env=bp.LPARType.AIXLINUX, vcpu=1) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # Good non-default IO Slots and SRR attr = dict(name='TheName', memory=1024, max_io_slots=2000, env=bp.LPARType.AIXLINUX, vcpu=1, srr_capability=False) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assert_xml(new_lpar, self.sections['shared_lpar']) # Bad SRR value. attr = dict(name='lpar', memory=1024, max_io_slots=64, env=bp.LPARType.AIXLINUX, vcpu=1, srr_capability='Frog') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # Uncapped / capped shared procs and enabled lpar metrics attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024, vcpu=1, sharing_mode=bp.SharingMode.CAPPED, srr_capability='true', enable_lpar_metric=True) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assert_xml(new_lpar, self.sections['capped_lpar']) # Uncapped and no SRR capability attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024, vcpu=1, sharing_mode=bp.SharingMode.UNCAPPED, uncapped_weight=100, processor_compatibility='POWER6') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys2) new_lpar = bldr.build() self.assert_xml(new_lpar, self.sections['uncapped_lpar']) # Build dedicated but only via dedicated attributes m = bp.DedicatedSharingMode.SHARE_IDLE_PROCS_ALWAYS attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024, vcpu=1, sharing_mode=m, processor_compatibility='PoWeR7') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assert_xml(new_lpar.entry, self.sections['ded_lpar_sre_idle_procs_always']) # Desired mem outside min attr = dict(name='lpar', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1, min_mem=2048) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # Desired mem outside max attr = dict(name='lpar', memory=5000, env=bp.LPARType.AIXLINUX, vcpu=1, max_mem=2048) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # AME not supported on host attr = dict(name='lpar', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1, ame_factor='1.5') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # AME outside valid range attr = dict(name='lpar', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1, ame_factor='0.5') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys3) self.assertRaises(ValueError, bldr.build) # PPT not supported on host attr = dict(name='lpar', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1, ppt_ratio='1:64') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys3) self.assertRaises(ValueError, bldr.build) # PPT ratio not a valid choice attr = dict(name='lpar', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1, ppt_ratio='1:76') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys3) self.assertRaises(ValueError, bldr.build) # Affinity unsupported host attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, enforce_affinity_check='true') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys4) self.assertRaises(ValueError, bldr.build) # Enforce affinity score check for Lpar with incorrect value attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, enforce_affinity_check='BADVALUE') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys5) self.assertRaises(ValueError, bldr.build) # Secure boot on unsupported host attr = dict(name='SecureBoot', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1, secure_boot=2) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys5) self.assertRaises(ValueError, bldr.build) # Secure boot of IBMi LPAR attr = dict(name='SecureBoot', memory=1024, env=bp.LPARType.OS400, vcpu=1, secure_boot=2) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys6) self.assertRaises(ValueError, bldr.build) # Secure boot bad value attr = dict(name='SecureBoot', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1, secure_boot=10) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys6) self.assertRaises(ValueError, bldr.build) # Secure boot as 0 on unsupported host # This dictionary should equate to the 'dedicated_lpar' XML on an # unsupported host. attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024, vcpu=1, dedicated_proc=True, secure_boot=0) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assert_xml(new_lpar, self.sections['dedicated_lpar']) # Desired vcpu outside min attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=1, min_vcpu=2) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys5) self.assertRaises(ValueError, bldr.build) # Desired vcpu outside max attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, max_vcpu=2) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # Ensure the calculated procs are not below the min attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, min_proc_units=3) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() procs = new_lpar.proc_config.shared_proc_cfg self.assertEqual(3.0, procs.min_units) # Ensure the calculated procs are all 0.5 attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=1, proc_units=0.5) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() procs = new_lpar.proc_config.shared_proc_cfg self.assertEqual(0.5, procs.min_units) self.assertEqual(0.5, procs.max_units) self.assertEqual(0.5, procs.desired_units) # Create a temp standardizer with a smaller proc units factor stdz = lpar_bldr.DefaultStandardize(self.mngd_sys, proc_units_factor=0.1) # Ensure the min, max, and desired proc units works as VCPU is scaled. for x in [1, 5, 10, 17, 20]: attr = dict(name='lpar', memory=2048, vcpu=x) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, stdz) new_lpar = bldr.build() procs = new_lpar.proc_config.shared_proc_cfg self.assertEqual(round(0.1 * x, 2), procs.min_units) self.assertEqual(round(0.1 * x, 2), procs.max_units) self.assertEqual(round(0.1 * x, 2), procs.desired_units) # Ensure the calculated procs are below the max attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, max_proc_units=2.1) stdz = lpar_bldr.DefaultStandardize( self.mngd_sys, proc_units_factor=0.9) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, stdz) new_lpar = bldr.build() procs = new_lpar.proc_config.shared_proc_cfg self.assertEqual(2.1, procs.max_units) # Ensure proc units factor is between 0.1 and 1.0 self.assertRaises( lpar_bldr.LPARBuilderException, lpar_bldr.DefaultStandardize, self.mngd_sys, proc_units_factor=1.01) self.assertRaises( lpar_bldr.LPARBuilderException, lpar_bldr.DefaultStandardize, self.mngd_sys, proc_units_factor=0.01) # Avail priority outside max attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, avail_priority=332) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # Avail priority bad parm attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, avail_priority='BADVALUE') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # Avail priority at min value attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, avail_priority=0) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assertEqual(new_lpar.avail_priority, 0) # Avail priority at max value attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, avail_priority=255) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assertEqual(new_lpar.avail_priority, 255) # Enable Lpar metric with correct value as true attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, enable_lpar_metric='true') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assertEqual(new_lpar.allow_perf_data_collection, True) # Enable Lpar metric with correct value as false attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, enable_lpar_metric='false') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assertEqual(new_lpar.allow_perf_data_collection, False) # Enable Lpar Metric with bad parm other than true or false attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3, enable_lpar_metric='BADVALUE') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertRaises(ValueError, bldr.build) # Proc compat for pc in bp.LPARCompat.ALL_VALUES: attr = dict(name='name', memory=1024, vcpu=1, processor_compatibility=pc) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) new_lpar = bldr.build() self.assertEqual(new_lpar.pending_proc_compat_mode, pc) attr = dict(name='name', memory=1024, vcpu=1, processor_compatibility='POWER6') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys2) new_lpar = bldr.build() self.assertEqual(new_lpar.pending_proc_compat_mode, 'POWER6') # Ensure failure occurs on validation after the host supported # proc modes are loaded and not on convert_value which converts # across all acceptable proc modes. # This works because 'POWER8' is in LPARCompat.ALL_VALUES attr = dict(name='name', memory=1024, vcpu=1, processor_compatibility='POWER8') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys2) exp_msg = ("Value 'POWER8' is not valid for field 'Processor " "Compatability Mode' with acceptable choices: ['POWER6']") try: bldr.build() except Exception as e: self.assertEqual(six.text_type(e), exp_msg) # Build a VIOS with I/O slots slots = [bp.IOSlot.bld(self.adpt, True, 12345), bp.IOSlot.bld(self.adpt, False, 54321)] attr = dict(name='TheName', env=bp.LPARType.VIOS, memory=1024, vcpu=1, dedicated_proc=True, phys_io_slots=slots) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertIsNotNone(bldr) new_lpar = bldr.build() self.assertIsNotNone(new_lpar) self.assert_xml(new_lpar.entry, self.sections['vios']) def test_IBMi(self): attr = dict(name='TheName', env=bp.LPARType.OS400, memory=1024, vcpu=1, ame_factor=False) bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertIsNotNone(bldr) new_lpar = bldr.build() self.assertIsNotNone(new_lpar) self.assertTrue(new_lpar.restrictedio) tag_io = new_lpar.io_config.tagged_io self.assertEqual('HMC', tag_io.console) self.assertEqual('0', tag_io.load_src) self.assertEqual('NONE', tag_io.alt_load_src) attr = dict(name='OS400LPAR', env=bp.LPARType.OS400, memory=1024, vcpu=1, console='CONSOLE', load_src='9', alt_load_src='9') bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1) self.assertIsNotNone(bldr) new_lpar = bldr.build() self.assertIsNotNone(new_lpar) self.assertTrue(new_lpar.restrictedio) tag_io = new_lpar.io_config.tagged_io self.assertEqual('CONSOLE', tag_io.console) self.assertEqual('9', tag_io.load_src) self.assertEqual('9', tag_io.alt_load_src) def test_io_slots(self): attr = dict(name='TheName', memory=1024, vcpu=1) nlpar = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1).build() self.assertEqual([], nlpar.io_config.io_slots) attr = dict(name='TheName', memory=1024, vcpu=1, phys_io_slots=[]) nlpar = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1).build() self.assertEqual([], nlpar.io_config.io_slots) slots = [bp.IOSlot.bld(self.adpt, True, 12345), bp.IOSlot.bld(self.adpt, False, 54321)] attr = dict(name='TheName', memory=1024, vcpu=1, phys_io_slots=slots) nlpar = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1).build() self.assertEqual(len(slots), len(nlpar.io_config.io_slots)) for exp, act in zip(slots, nlpar.io_config.io_slots): self.assertEqual(exp.drc_index, act.drc_index) self.assertEqual(exp.bus_grp_required, act.bus_grp_required) pypowervm-1.1.24/pypowervm/tests/wrappers/0000775000175000017500000000000013571367172020326 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/wrappers/test_cluster.py0000664000175000017500000001673613571367171023434 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import pypowervm.const as pc import pypowervm.entities as ent import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.wrappers.cluster as clust import pypowervm.wrappers.mtms as mtmwrap import pypowervm.wrappers.storage as stor CLUSTER_RESP = ('' 'neotest' '' 'hdisk123' '' '' 'a.example.com' '') class TestCluster(twrap.TestWrapper): file = 'cluster.txt' wrapper_class_to_test = clust.Cluster def test_name(self): self.assertEqual(self.dwrap.name, 'neoclust1') def test_id(self): self.assertEqual(self.dwrap.id, '22cfc907d2abf511e4b2d540f2e95daf30') def test_ssp_uri(self): self.assertEqual(self.dwrap.ssp_uri, 'https://9.1.2.3:12443/rest/api' '/uom/SharedStoragePool/e357a79a-7a3d-35b6-8405-55ab' '6a2d0de7') def test_ssp_uuid(self): self.assertEqual(self.dwrap.ssp_uuid.lower(), 'e357a79a-7a3d-35b6-8405-55ab6a2d0de7') def test_repos_pv(self): repos = self.dwrap.repos_pv # PV is tested elsewhere. Minimal verification here. self.assertEqual(repos.name, 'hdisk2') # Test setter newrepos = stor.PV.wrap( ent.Element( "PhysicalVolume", None, attrib={'schemaVersion': 'V1_2_0'}, children=[ ent.Element('Metadata', None, children=[ent.Element('Atom', None)]), ent.Element('VolumeName', None, text='hdisk99')])) self.dwrap.repos_pv = newrepos self.assertEqual(self.dwrap.repos_pv.name, 'hdisk99') # Now try the same thing, but using factory constructor to build PV newrepos = stor.PV.bld(None, name='hdisk123') self.dwrap.repos_pv = newrepos self.assertAlmostEqual(self.dwrap.repos_pv.name, 'hdisk123') def test_nodes(self): """Tests the Node and MTMS wrappers as well.""" nodes = self.dwrap.nodes self.assertEqual(3, len(nodes)) node = nodes[0] self.assertEqual(node.hostname, 'foo.example.com') self.assertEqual(node.lpar_id, 2) self.assertEqual( node.vios_uri, 'https://9.1.2.3:12443/rest/api/uom/ManagedSystem/' '98498bed-c78a-3a4f-b90a-4b715418fcb6/VirtualIOServer/58C9EB1D-' '7213-4956-A011-77D43CC4ACCC') self.assertEqual( node.vios_uuid, '58C9EB1D-7213-4956-A011-77D43CC4ACCC') self.assertEqual(clust.NodeState.UP, nodes[0].state) # Validate other NodeState enum values self.assertEqual(clust.NodeState.DOWN, nodes[1].state) self.assertEqual(clust.NodeState.UNKNOWN, nodes[2].state) # Make sure the different Node entries are there self.assertEqual(nodes[1].hostname, 'bar.example.com') # Test MTMS mtms = node.mtms self.assertEqual(mtms.machine_type, '8247') self.assertEqual(mtms.model, '22L') self.assertEqual(mtms.serial, '2125D1A') # Test nodes setters node2 = nodes[1] nodes.remove(node) self.assertEqual(2, len(self.dwrap.nodes)) node._hostname('blah.example.com') node._lpar_id(9) node._vios_uri('https://foo') self.dwrap.nodes = [node2, node] self.assertEqual(len(self.dwrap.nodes), 2) node = self.dwrap.nodes[1] self.assertEqual(node.hostname, 'blah.example.com') self.assertEqual(node.lpar_id, 9) self.assertEqual(node.vios_uri, 'https://foo') # MTMS needs a little more depth node._mtms('1234-567*ABCDEF0') mtms = node.mtms self.assertEqual(mtms.machine_type, '1234') self.assertEqual(mtms.model, '567') self.assertEqual(mtms.serial, 'ABCDEF0') # Now try with a MTMS ElementWrapper node._mtms(mtmwrap.MTMS.bld(None, '4321-765*0FEDCBA')) mtms = node.mtms self.assertEqual(mtms.machine_type, '4321') self.assertEqual(mtms.model, '765') self.assertEqual(mtms.serial, '0FEDCBA') def test_wrapper_classes(self): # Cluster self.assertEqual(clust.Cluster.schema_type, 'Cluster') self.assertEqual(clust.Cluster.schema_ns, pc.UOM_NS) self.assertTrue(clust.Cluster.has_metadata) self.assertEqual(clust.Cluster.default_attrib, pc.DEFAULT_SCHEMA_ATTR) # Node self.assertEqual(clust.Node.schema_type, 'Node') self.assertEqual(clust.Node.schema_ns, pc.UOM_NS) self.assertTrue(clust.Node.has_metadata) self.assertEqual(clust.Node.default_attrib, pc.DEFAULT_SCHEMA_ATTR) def test_bld_cluster(self): n1 = clust.Node.bld(None, hostname='a.example.com') repos = stor.PV.bld(None, name='hdisk123') cl = clust.Cluster.bld(None, 'foo', repos, n1) self.assertEqual(cl.name, 'foo') self.assertEqual(cl.repos_pv.name, 'hdisk123') self.assertEqual(cl.schema_type, 'Cluster') self.assertEqual(cl.schema_ns, pc.UOM_NS) nodes = cl.nodes self.assertEqual(len(nodes), 1) node = nodes[0] self.assertEqual(node.hostname, 'a.example.com') self.assertEqual(node.schema_type, clust.Node.schema_type) self.assertEqual(node.schema_ns, pc.UOM_NS) # Node.bld() n2 = clust.Node.bld(None, hostname='b.example.com', lpar_id=2, mtms='ABCD-XYZ*1234567', vios_uri='https://foo') nodes.append(n2) self.assertEqual(len(nodes), 2) node = nodes[1] self.assertEqual(node.hostname, 'b.example.com') self.assertEqual(node.lpar_id, 2) self.assertEqual(node.mtms.mtms_str, 'ABCD-XYZ*1234567') self.assertEqual(node.vios_uri, 'https://foo') self.assertEqual(node.schema_type, clust.Node.schema_type) self.assertEqual(node.schema_ns, pc.UOM_NS) def test_cluster_ordering(self): node = clust.Node.bld(None, hostname='a.example.com') repos = stor.PV.bld(None, name='hdisk123') cl = clust.Cluster.bld(None, 'neotest', repos, node) self.assertEqual(cl.toxmlstring(), CLUSTER_RESP.encode('utf-8')) if __name__ == "__main__": unittest.main() pypowervm-1.1.24/pypowervm/tests/wrappers/test_monitor.py0000664000175000017500000001062313571367171023427 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import pytz import pypowervm.tests.test_utils.test_wrapper_abc as twrap from pypowervm.wrappers import monitor _DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f%Z' class TestPCM(twrap.TestWrapper): file = 'pcm_pref.txt' wrapper_class_to_test = monitor.PcmPref def test_pcm(self): pcm_wrap = self.entries[0] self.assertEqual('dev-system-6', pcm_wrap.system_name) # Test enable getters when all are loaded and False self.assertFalse(pcm_wrap.ltm_enabled) self.assertFalse(pcm_wrap.aggregation_enabled) self.assertFalse(pcm_wrap.stm_enabled) self.assertFalse(pcm_wrap.compute_ltm_enabled) # Set all to True and test. pcm_wrap.ltm_enabled = True self.assertTrue(pcm_wrap.ltm_enabled) pcm_wrap.aggregation_enabled = True self.assertTrue(pcm_wrap.aggregation_enabled) pcm_wrap.stm_enabled = True self.assertTrue(pcm_wrap.stm_enabled) pcm_wrap.compute_ltm_enabled = True self.assertTrue(pcm_wrap.compute_ltm_enabled) def test_str_to_datetime(self): expected = datetime.datetime( year=2015, month=4, day=30, hour=6, minute=11, second=35).replace(tzinfo=pytz.utc) func = monitor.MonitorMetrics._str_to_datetime self.assertEqual(expected, func('2015-04-30T11:11:35.000-05:00')) self.assertEqual(expected, func('2015-04-30T01:11:35.000+05:00')) self.assertEqual(expected, func('2015-04-30T06:11:35.000-00:00')) self.assertEqual(expected, func('2015-04-30T06:11:35.000Z')) class TestLTMMetrics(twrap.TestWrapper): file = 'ltm_feed.txt' wrapper_class_to_test = monitor.LTMMetrics def test_ltm_metrics(self): link = ('https://9.1.2.3:12443/rest/api/pcm/ManagedSystem/98498bed' '-c78a-3a4f-b90a-4b715418fcb6/RawMetrics/LongTermMonitor/L' 'TM_8247-22L*1111111_vios_2_20150430T035300+0000.json') wrap = self.entries[0] self.assertEqual('15161241-b72f-41d5-8154-557ff699fb75', wrap.id) self.assertEqual('2015-04-30T03:53:00.000Z', wrap.published) self.assertEqual( 'LTM_8247-22L*1111111_vios_2_20150430T035300+0000.json', wrap.title) self.assertEqual('2015-04-30T03:53:00.000Z', wrap.updated) self.assertEqual('vios_2', wrap.category) self.assertEqual(link, wrap.link) # Test wrapping just one entry and we should get the same data wrap = monitor.LTMMetrics.wrap(self.entries[0].entry) self.assertEqual(link, wrap.link) class TestSTMMetrics(twrap.TestWrapper): file = 'stm_feed.txt' wrapper_class_to_test = monitor.STMMetrics def test_stm_metrics(self): link = ('https://9.1.2.3:12443/rest/api/pcm/ManagedSystem/98498bed' '-c78a-3a4f-b90a-4b715418fcb6/RawMetrics/ShortTermMonitor/' 'STM_8247-22L*1111111_phyp_20150430T061135+0000.json') wrap = self.entries[0] self.assertEqual('28cb2328-ca14-48ef-a3bd-691debef53dd', wrap.id) self.assertEqual('2015-04-30T06:11:35.000-05:00', wrap.published) self.assertEqual('2015-04-30T01:11:35.000000UTC', wrap.published_datetime.strftime(_DATETIME_FORMAT)) self.assertEqual( 'STM_8247-22L*1111111_phyp_20150430T061135+0000.json', wrap.title) self.assertEqual('2015-04-30T06:11:35.002Z', wrap.updated) self.assertEqual('2015-04-30T06:11:35.002000UTC', wrap.updated_datetime.strftime(_DATETIME_FORMAT)) self.assertEqual('phyp', wrap.category) self.assertEqual(link, wrap.link) # Test wrapping just one entry and we should get the same data wrap = monitor.STMMetrics.wrap(self.entries[0].entry) self.assertEqual(link, wrap.link) pypowervm-1.1.24/pypowervm/tests/wrappers/__init__.py0000664000175000017500000000000013571367171022424 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/wrappers/test_storage.py0000664000175000017500000010412413571367171023404 0ustar neoneo00000000000000# Copyright 2014, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import pypowervm.const as pc import pypowervm.exceptions as ex import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.wrappers.storage as stor import pypowervm.wrappers.virtual_io_server as vios class TestVolumeGroup(twrap.TestWrapper): file = 'fake_volume_group.txt' wrapper_class_to_test = stor.VG def test_base(self): """Tests baseline function within the Volume Group.""" self.assertEqual('image_pool', self.dwrap.name) self.assertEqual(1063.3, self.dwrap.capacity) self.assertEqual(1051.1, self.dwrap.available_size) self.assertEqual(1051.2, self.dwrap.free_space) self.assertEqual('00f8d6de00004b000000014a54555cd9', self.dwrap.serial_id) def test_vmedia_repos(self): """Tests the virtual media repositories.""" repos = self.dwrap.vmedia_repos self.assertEqual(1, len(repos)) self.assertEqual('VMLibrary', repos[0].name) self.assertEqual(11, repos[0].size) # Optical media vopts = repos[0].optical_media self.assertEqual(2, len(vopts)) self.assertEqual('blank_media1', vopts[0].media_name) self.assertEqual('blank_media1', vopts[0].name) self.assertEqual(0.0977, vopts[0].size) self.assertEqual('0eblank_media1', vopts[0].udid) self.assertEqual('rw', vopts[0].mount_type) def test_physical_volumes(self): """Tests the physical volumes in the VG.""" pvs = self.dwrap.phys_vols self.assertEqual(1, len(pvs)) pv = pvs[0] self.assertEqual('01MUlCTSAgICAgSVBSLTAgICA1RDgyODMwMDAwMDAwMDQw', pv.udid) self.assertEqual(1089592, pv.capacity) self.assertEqual('hdisk1', pv.name) self.assertEqual('active', pv.state) self.assertFalse(pv.is_fc_backed) self.assertTrue(pv.avail_for_use) self.assertEqual('SAS RAID 0 Disk Array', pv.description) self.assertEqual('U78C9.001.WZS0095-P1-C14-R1-L405D828300-L0', pv.loc_code) self.assertEqual(22, pv.read_iops_limit) self.assertEqual(33, pv.write_iops_limit) def test_virtual_disk(self): """Tests the virtual disk gets.""" vdisks = self.dwrap.virtual_disks self.assertEqual(1, len(vdisks)) vdisk = vdisks[0] self.assertEqual('asdf', vdisk.name) self.assertEqual('None', vdisk.label) self.assertEqual(1, vdisk.capacity) self.assertEqual('0300f8d6de00004b000000014a54555cd9.1', vdisk.udid) self.assertEqual( "https://9.1.2.3:12443/rest/api/uom/VirtualIOServer/14B854F7-42CE-" "4FF0-BD57-1D117054E701/VolumeGroup/b6bdbf1f-eddf-3c81-8801-9859eb" "6fedcb", vdisk.vg_uri) self.assertEqual(44, vdisk.read_iops_limit) self.assertEqual(55, vdisk.write_iops_limit) self.assertEqual('Unlocked', vdisk._encryption_state) self.assertIsNone(vdisk._encryption_key) agent = vdisk._encryption_agent self.assertIsInstance(agent, stor._LUKSEncryptor) self.assertEqual(agent.key_size, 256) self.assertEqual(agent.cipher, 'aes-xts-plain64') self.assertEqual(agent.hash_spec, 'sha1') # Test setters vdisk.capacity = 2 self.assertEqual(2, vdisk.capacity) vdisk.name = 'new_name' self.assertEqual('new_name', vdisk.name) vdisk._base_image('base_image') self.assertEqual('base_image', vdisk._get_val_str(stor._DISK_BASE)) vdisk._encryption_state = 'Formatted' self.assertEqual('Formatted', vdisk._encryption_state) vdisk._encryption_key = 'blahblahblah' self.assertEqual('blahblahblah', vdisk._encryption_key) vdisk._encryption_agent = None self.assertIsNone(vdisk._encryption_agent) agent.key_size = 512 self.assertEqual(512, agent.key_size) agent.cipher = 'aes-cbc-essiv:sha256' self.assertEqual('aes-cbc-essiv:sha256', agent.cipher) agent.hash_spec = 'sha256' self.assertEqual('sha256', agent.hash_spec) def test_add_vdisk(self): """Performs a test flow that adds a virtual disk.""" vdisks = self.dwrap.virtual_disks self.assertEqual(1, len(vdisks)) encryptor = stor._LUKSEncryptor.bld(None, cipher='aes-cbc-essiv:sha256', key_size=512, hash_spec='sha256') disk = stor.VDisk.bld( None, 'disk_name', 10.9876543, label='label', base_image='cache', file_format=stor.FileFormatType.RAW) disk._encryption_agent = encryptor disk._encryption_key = 'password' disk._encryption_state = 'Unlocked' self.assertIsNotNone(disk) vdisks.append(disk) self.dwrap.virtual_disks = vdisks self.assertEqual(2, len(self.dwrap.virtual_disks)) # make sure the second virt disk matches what we put in vdisk = self.dwrap.virtual_disks[1] self.assertEqual('disk_name', vdisk.name) self.assertEqual(10.987654, vdisk.capacity) self.assertEqual('label', vdisk.label) self.assertEqual(None, vdisk.udid) self.assertEqual('cache', vdisk._get_val_str(stor._DISK_BASE)) self.assertEqual(stor.FileFormatType.RAW, vdisk.file_format) self.assertEqual(stor.VDiskType.LV, vdisk.vdtype) self.assertEqual('password', vdisk._encryption_key) self.assertEqual('Unlocked', vdisk._encryption_state) self.assertIsInstance(vdisk._encryption_agent, stor._LUKSEncryptor) self.assertEqual('aes-cbc-essiv:sha256', vdisk._encryption_agent.cipher) self.assertEqual(512, vdisk._encryption_agent.key_size) self.assertEqual('sha256', vdisk._encryption_agent.hash_spec) # Try a remove self.dwrap.virtual_disks.remove(vdisk) self.assertEqual(1, len(self.dwrap.virtual_disks)) def test_add_lv(self): """Duplicate of above with the LV alias.""" vdisks = self.dwrap.virtual_disks self.assertEqual(1, len(vdisks)) disk = stor.LV.bld( None, 'disk_name', 10.9876543, label='label', base_image='cache', file_format=stor.FileFormatType.RAW) self.assertIsNotNone(disk) vdisks.append(disk) self.dwrap.virtual_disks = vdisks self.assertEqual(2, len(self.dwrap.virtual_disks)) # make sure the second virt disk matches what we put in vdisk = self.dwrap.virtual_disks[1] self.assertEqual('disk_name', vdisk.name) self.assertEqual(10.987654, vdisk.capacity) self.assertEqual('label', vdisk.label) self.assertEqual(None, vdisk.udid) self.assertEqual('cache', vdisk._get_val_str(stor._DISK_BASE)) self.assertEqual(stor.FileFormatType.RAW, vdisk.file_format) self.assertEqual(stor.VDiskType.LV, vdisk.vdtype) # Try a remove self.dwrap.virtual_disks.remove(vdisk) self.assertEqual(1, len(self.dwrap.virtual_disks)) def test_add_phys_vol(self): """Performs a test flow that adds a physical volume to the vol grp.""" phys_vols = self.dwrap.phys_vols self.assertEqual(1, len(phys_vols)) phys_vol = stor.PV.bld(None, 'disk1') self.assertIsNotNone(phys_vol) phys_vols.append(phys_vol) self.dwrap.phys_vols = phys_vols self.assertEqual(2, len(self.dwrap.phys_vols)) # Make sure that the second physical volume matches pvol = self.dwrap.phys_vols[1] self.assertEqual('disk1', pvol.name) def test_add_media_repo(self): """Performs a simple add to the volume group of a new media repo.""" media_repos = self.dwrap.vmedia_repos self.assertEqual(1, len(media_repos)) vmedia_repo = stor.VMediaRepos.bld(None, 'repo', 10.12345) self.assertIsNotNone(vmedia_repo) media_repos.append(vmedia_repo) self.dwrap.vmedia_repos = media_repos self.assertEqual(2, len(self.dwrap.vmedia_repos)) # Make sure that the second media repo matches repo = self.dwrap.vmedia_repos[1] self.assertEqual('repo', repo.name) self.assertEqual(10.12345, repo.size) self.assertEqual(0, len(repo.optical_media)) def test_update_media_repo(self): """Performs a simple test to add optical media to an existing repo.""" media_repos = self.dwrap.vmedia_repos vopt_medias = media_repos[0].optical_media self.assertEqual(2, len(vopt_medias)) new_media = stor.VOptMedia.bld(None, 'name', 0.123, 'r') self.assertIsNotNone(new_media) vopt_medias.append(new_media) media_repos[0].optical_media = vopt_medias self.assertEqual(3, len(media_repos[0].optical_media)) # Check the attributes media = media_repos[0].optical_media[2] self.assertEqual('name', media.media_name) self.assertEqual('name', media.name) self.assertEqual(0.123, media.size) self.assertEqual('r', media.mount_type) def test_ordering(self): """Set fields out of order; ensure they end up in the right order.""" vg = stor.VG._bld(None) vg.virtual_disks = [] vg.name = 'vgname' vg.vmedia_repos = [] vg.set_parm_value(stor._VG_CAPACITY, 123) vg.phys_vols = [] self.assertEqual( vg.toxmlstring(), '123vgname'. encode('utf-8')) def test_bld(self): vg = stor.VG.bld(None, 'myvg', [stor.PV.bld(None, 'hdisk1')]) self.assertEqual( vg.toxmlstring(), 'myvghdisk1'.encode('utf-8')) class TestLUEnt(twrap.TestWrapper): file = 'lufeed.txt' wrapper_class_to_test = stor.LUEnt def test_logical_units(self): lus = self.entries self.assertEqual(len(lus), 22) lu = lus[0] self.assertEqual('a2b11d20-322d-3dcc-84ee-74fce7e90310', lu.uuid) self.assertEqual(lu.udid, '276c097502d44311e58004000040f2e95dc4a789dfd' '63464654319f89c04aa5382') self.assertEqual(lu.name, 'boot_pvm3_tempest_ser_a73d7083') self.assertTrue(lu.is_thin) self.assertEqual(lu.lu_type, 'VirtualIO_Disk') self.assertAlmostEqual(lu.capacity, 1, 1) self.assertFalse(lu.in_use) self.assertEqual('296c097502d44311e58004000040f2e95dcf6063cd6bf7ed7b49' '7780a1799236ab', lu.cloned_from_udid) def test_lu_bld(self): lu = stor.LUEnt.bld(None, 'lu_name', 123) self.assertEqual( lu.toxmlstring(), '123.000000lu_name'. encode('utf-8')) lu = stor.LUEnt.bld(None, 'lu_name', 1.2345678, thin=True, typ=stor.LUType.IMAGE, tag='my_tag', emulate_model=False) self.assertEqual( lu.toxmlstring(), '' '' 'my_tag' 'false' 'true' '1.234568' 'VirtualIO_Image' 'lu_name'.encode( 'utf-8')) lu = stor.LUEnt.bld(None, 'lu_name', .12300019999, thin=False, clone=mock.Mock(capacity=1.23, udid='cloned_from_udid'), emulate_model=True) self.assertEqual( lu.toxmlstring(), '' '' 'true' 'false' '1.230000' 'cloned_from_udid' 'lu_name'.encode( 'utf-8')) def test_lu_bld_ref(self): lu = stor.LU.bld_ref(None, 'name', 'udid') self.assertEqual('name', lu.name) self.assertEqual('udid', lu.udid) self.assertIsNone(lu.tag) self.assertTrue(lu.emulate_model) lu = stor.LU.bld_ref(None, 'name', 'udid', tag='tag', emulate_model=False) self.assertEqual('name', lu.name) self.assertEqual('udid', lu.udid) self.assertEqual('tag', lu.tag) self.assertFalse(lu.emulate_model) def test_lu_ordering(self): lu = stor.LUEnt._bld(None) lu._name('lu_name') lu._udid('lu_udid') lu._cloned_from_udid('cloned_from') lu._capacity(123) lu.set_parm_value(stor._LU_THIN, 'true') self.assertEqual( lu.toxmlstring(), 'truelu_udid123' '.000000cloned_fromlu_name'. encode('utf-8')) def test_lu_equality(self): # We can equate LUEnt and LU that represent the same LogicalUnit lu1 = stor.LUEnt.bld(None, 'mylu', 1) lu2 = stor.LU.bld(None, 'mylu', 2) self.assertEqual(lu1, lu2) lu1._udid('lu_udid') lu2._udid('lu_udid') self.assertEqual(lu1, lu2) lu2._udid('another_udid') self.assertNotEqual(lu1, lu2) lu2._udid('lu_udid') lu1._name('another_lu') self.assertNotEqual(lu1, lu2) def test_lu_hash(self): udid1 = ('27cfc907d2abf511e4b2d540f2e95daf3' '01a02b0904778d755df5a46fe25e500d8') # Only prefix differs. Should fail == but hash equal udid2 = ('29cfc907d2abf511e4b2d540f2e95daf3' '01a02b0904778d755df5a46fe25e500d8') # Last bit differs udid3 = ('27cfc907d2abf511e4b2d540f2e95daf3' '01a02b0904778d755df5a46fe25e500d9') # First bit differs udid4 = ('274fc907d2abf511e4b2d540f2e95daf3' '01a02b0904778d755df5a46fe25e500d8') lu1 = stor.LUEnt.bld(None, 'mylu', 1) lu2 = stor.LUEnt.bld(None, 'mylu', 2) lu1._udid(udid1) lu2._udid(udid1) self.assertEqual({lu1}, {lu2}) lu2._udid(udid2) self.assertNotEqual({lu1}, {lu2}) self.assertEqual(hash(lu1), hash(lu2)) lu2._udid(udid3) self.assertNotEqual({lu1}, {lu2}) lu2._udid(udid4) self.assertNotEqual({lu1}, {lu2}) class TestTier(twrap.TestWrapper): file = 'tier.txt' wrapper_class_to_test = stor.Tier def test_props(self): self.assertEqual(1, len(self.entries)) tier = self.dwrap self.assertEqual('SYSTEM', tier.name) self.assertEqual('256c097502d44311e58004000040f2e95d7d95846d854f9f38', tier.udid) self.assertTrue(tier.is_default) self.assertAlmostEqual(3071.25, tier.capacity) self.assertEqual('535e1e51-50a6-3722-b6ed-907ff011a535', tier.ssp_uuid) class TestSharedStoragePool(twrap.TestWrapper): file = 'ssp.txt' wrapper_class_to_test = stor.SSP def test_name(self): self.assertEqual(self.dwrap.name, 'neossp1') def test_udid(self): self.assertEqual(self.dwrap.udid, '24cfc907d2abf511e4b2d540f2e95daf3' '0000000000972FB370000000054D14EB8') def test_capacity(self): self.assertAlmostEqual(self.dwrap.capacity, 49.88, 3) def test_free_space(self): self.assertAlmostEqual(self.dwrap.free_space, 48.98, 3) def test_over_commit_space(self): self.assertEqual(self.dwrap.over_commit_space, 1.234567) def test_total_lu_size(self): self.assertAlmostEqual(self.dwrap.total_lu_size, 1, 1) def test_physical_volumes(self): pvs = self.dwrap.physical_volumes self.assertEqual(len(pvs), 1) pv = pvs[0] self.assertEqual( pv.udid, '01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAwMw==') self.assertEqual(pv.name, 'hdisk3') # Test setter self.dwrap.physical_volumes = [] pvs = self.dwrap.physical_volumes self.assertEqual(0, len(pvs)) self.dwrap.physical_volumes = [stor.PV.bld(None, 'pv1', udid='udid1'), stor.PV.bld(None, 'pv2', udid='udid2')] pvs = self.dwrap.physical_volumes self.assertEqual(2, len(pvs)) self.assertEqual(dict(pv1='udid1', pv2='udid2'), {pv.name: pv.udid for pv in pvs}) def test_logical_units(self): lus = self.dwrap.logical_units self.assertEqual(len(lus), 1) lu = lus[0] self.assertEqual(lu.udid, '27cfc907d2abf511e4b2d540f2e95daf301a02b090' '4778d755df5a46fe25e500d8') self.assertEqual(lu.name, 'neolu1') self.assertTrue(lu.is_thin) self.assertEqual(lu.lu_type, 'VirtualIO_Disk') self.assertAlmostEqual(lu.capacity, 1, 1) self.assertTrue(lu.in_use) # Test setter self.dwrap.logical_units = [] lus = self.dwrap.logical_units self.assertEqual(0, len(lus)) self.dwrap.logical_units = [stor.LU.bld(None, 'lu1', 1), stor.LU.bld(None, 'lu2', 2)] lus = self.dwrap.logical_units self.assertEqual(2, len(lus)) self.assertEqual(dict(lu1=1, lu2=2), {lu.name: lu.capacity for lu in lus}) def test_fresh_ssp(self): ssp = stor.SSP.bld(None, 'myssp', [ stor.PV.bld(None, name=n) for n in ( 'hdisk123', 'hdisk132', 'hdisk213', 'hdisk231', 'hdisk312', 'hdisk321')]) self.assertEqual(ssp.name, 'myssp') self.assertEqual(ssp.schema_type, stor.SSP.schema_type) self.assertEqual(ssp.schema_ns, pc.UOM_NS) pvs = ssp.physical_volumes self.assertEqual(len(pvs), 6) pv = pvs[3] # hdisk231 self.assertEqual(pv.schema_type, stor.PV.schema_type) self.assertEqual(pv.schema_ns, pc.UOM_NS) self.assertEqual(pv.name, 'hdisk231') def test_lu_bld(self): lu = stor.LU.bld(None, 'lu_name', 123) self.assertEqual( lu.toxmlstring(), '123.000000lu_name'. encode('utf-8')) lu = stor.LU.bld(None, 'lu_name', 1.2345678, thin=True) self.assertEqual( lu.toxmlstring(), 'true1.234568lu_name'.encode('utf-8')) lu = stor.LU.bld(None, 'lu_name', .12300019999, thin=False) self.assertEqual( lu.toxmlstring(), 'false0.123000lu_name'.encode('utf-8')) def test_lu_ordering(self): lu = stor.LU._bld(None) lu._name('lu_name') lu._udid('lu_udid') lu.set_parm_value(stor._LU_CLONED_FROM, 'cloned_from') lu._capacity(123) lu.set_parm_value(stor._LU_THIN, 'true') self.assertEqual( lu.toxmlstring(), 'truelu_udid123' '.000000cloned_fromlu_name'. encode('utf-8')) def test_lu_equality(self): lu1 = stor.LU.bld(None, 'mylu', 1) lu2 = stor.LU.bld(None, 'mylu', 2) self.assertEqual(lu1, lu2) lu1._udid('lu_udid') lu2._udid('lu_udid') self.assertEqual(lu1, lu2) lu2._udid('another_udid') self.assertNotEqual(lu1, lu2) lu2._udid('lu_udid') lu1._name('another_lu') self.assertNotEqual(lu1, lu2) def test_lu_hash(self): udid1 = ('27cfc907d2abf511e4b2d540f2e95daf3' '01a02b0904778d755df5a46fe25e500d8') # Only prefix differs. Should fail == but hash equal udid2 = ('29cfc907d2abf511e4b2d540f2e95daf3' '01a02b0904778d755df5a46fe25e500d8') # Last bit differs udid3 = ('27cfc907d2abf511e4b2d540f2e95daf3' '01a02b0904778d755df5a46fe25e500d9') # First bit differs udid4 = ('274fc907d2abf511e4b2d540f2e95daf3' '01a02b0904778d755df5a46fe25e500d8') lu1 = stor.LU.bld(None, 'mylu', 1) lu2 = stor.LU.bld(None, 'mylu', 2) lu1._udid(udid1) lu2._udid(udid1) self.assertEqual({lu1}, {lu2}) lu2._udid(udid2) self.assertNotEqual({lu1}, {lu2}) self.assertEqual(hash(lu1), hash(lu2)) lu2._udid(udid3) self.assertNotEqual({lu1}, {lu2}) lu2._udid(udid4) self.assertNotEqual({lu1}, {lu2}) class TestVFCClientAdapter(twrap.TestWrapper): file = 'vfc_client_adapter_feed.txt' wrapper_class_to_test = stor.VFCClientAdapter def test_vfc_client_adapter(self): """Check getters on VFCClientAdapter. The hard part - the wrapping - was done by TestWrapper. """ self.assertEqual('U8247.21L.212A64A-V25-C4', self.dwrap.loc_code) self.assertEqual(25, self.dwrap.lpar_id) self.assertEqual(2, self.dwrap.vios_id) self.assertEqual('Client', self.dwrap.side) self.assertEqual(4, self.dwrap.lpar_slot_num) self.assertEqual(10, self.dwrap.vios_slot_num) self.assertEqual(['C05076087CBA0169', 'C05076087CBA0168'], self.dwrap.wwpns) class TestVIOS(twrap.TestWrapper): file = 'vio_multi_vscsi_mapping.txt' wrapper_class_to_test = vios.VIOS def test_pg83_in_pv(self): """Legitimate pg83 data from the .""" self.assertEqual('600507680282861D88000000000000B5', self.dwrap.phys_vols[1].pg83) # TODO(efried): reinstate when VIOS supports pg83 descriptor in Events # def test_pg83_absent_from_pv(self): # """No pg83 data in .""" # self.assertIsNone(self.dwrap.phys_vols[0].pg83) @mock.patch('pypowervm.wrappers.job.Job.wrap') def test_pg83_absent_from_pv(self, mock_wrap): """LUARecovery.QUERY_INVENTORY when no pg83 in .""" # TODO(efried): remove this method once VIOS supports pg83 in Events mock_jwrap = mock.Mock() mock_jwrap.get_job_results_as_dict.return_value = { 'OutputXML': '' '' '' '' '' '' ''} mock_wrap.return_value = mock_jwrap self.assertEqual('I_am_a_pg83_NAA_descriptor', self.dwrap.phys_vols[0].pg83) mock_jwrap.run_job.assert_called_with( '3443DB77-AED1-47ED-9AA5-3DB9C6CF7089', job_parms=[mock.ANY]) def test_pg83_raises_if_no_parent_entry(self): """Raise attempting to get pg83 if PV has no parent_entry.""" # TODO(efried): remove this method once VIOS supports pg83 in Events pv = stor.PV.bld(self.adpt, 'name', 'udid') self.assertRaises(ex.UnableToBuildPG83EncodingMissingParent, lambda: pv.pg83) def test_bogus_pg83_in_pv(self): """Bogus pg83 data in the doesn't trigger the Job.""" with self.assertLogs(stor.__name__, 'WARNING'): self.assertIsNone(self.dwrap.phys_vols[2].pg83) class TestTargetDevs(twrap.TestWrapper): """Tests for VSCSIMapping.target_dev and {storage_type}TargetDev wrappers. SCSI mapping target devices in the test file are laid out as follows: index type LUA 0 LUTargetDev 0x8200000000000000 1 None - 2 None - 3 LUTargetDev 0x8400000000000000 4 LUTargetDev 0x8500000000000000 5 VOptTargetDev 0x8100000000000000 6 LUTargetDev 0x8300000000000000 7 PVTargetDev 0x8600000000000000 8 VDiskTargetDev 0x8700000000000000 """ file = 'fake_vios_ssp_npiv.txt' wrapper_class_to_test = vios.VIOS def test_subtypes_props(self): """Right subtypes and LUA gets/sets from VSCSIMapping.target_dev.""" smaps = self.dwrap.scsi_mappings # LU self.assertIsInstance(smaps[0].target_dev, stor.LUTargetDev) self.assertEqual('0x8200000000000000', smaps[0].target_dev.lua) smaps[0].target_dev._lua('lu_lua') self.assertEqual('lu_lua', smaps[0].target_dev.lua) # No TargetDevice self.assertIsNone(smaps[1].target_dev) # VOpt self.assertIsInstance(smaps[5].target_dev, stor.VOptTargetDev) self.assertEqual('0x8100000000000000', smaps[5].target_dev.lua) smaps[5].target_dev._lua('vopt_lua') self.assertEqual('vopt_lua', smaps[5].target_dev.lua) # PV self.assertIsInstance(smaps[7].target_dev, stor.PVTargetDev) self.assertEqual('0x8600000000000000', smaps[7].target_dev.lua) smaps[7].target_dev._lua('pv_lua') self.assertEqual('pv_lua', smaps[7].target_dev.lua) # LV/VDisk self.assertIsInstance(smaps[8].target_dev, stor.VDiskTargetDev) self.assertEqual('0x8700000000000000', smaps[8].target_dev.lua) smaps[8].target_dev._lua('lv_lua') self.assertEqual('lv_lua', smaps[8].target_dev.lua) def test_bld_set(self): """Test *TargetDev.bld(lua) and setting VSCSIMapping.target_dev.""" smaps = self.dwrap.scsi_mappings for klass in (stor.LUTargetDev, stor.VOptTargetDev, stor.PVTargetDev, stor.VDiskTargetDev): lua_tag = klass.__class__.__name__ + "_lua" # Build LU target dev vtd = klass.bld('adap', lua_tag) self.assertEqual('adap', vtd.adapter) self.assertEqual(lua_tag, vtd.lua) # Assign to a target_dev in the SCSI mappings. Pick one that's # initially empty: the first iteration will prove we can assign to # an empty one; subsequent iterations will prove we can overwrite. smaps[1]._target_dev(vtd) self.assertIsInstance(smaps[1].target_dev, klass) self.assertEqual(lua_tag, smaps[1].target_dev.lua) class TestStorageTypes(testtools.TestCase): def test_fileio(self): fio = stor.FileIO.bld_ref('adap', 'path') self.assertEqual('path', fio.label) self.assertEqual('path', fio.path) self.assertEqual('path', fio.name) self.assertIsNone(fio.capacity) self.assertIsNone(fio.udid) self.assertIsNone(fio.tag) self.assertTrue(fio.emulate_model) self.assertEqual('File', fio.vdtype) self.assertIsNone(fio.backstore_type) # Explicit backstore, legacy bld method fio = stor.FileIO.bld( 'adap', 'path', backstore_type=stor.BackStoreType.FILE_IO, tag='tag', emulate_model=False) self.assertEqual('path', fio.label) self.assertEqual('path', fio.path) self.assertEqual('path', fio.name) self.assertIsNone(fio.capacity) self.assertIsNone(fio.udid) self.assertEqual('tag', fio.tag) self.assertFalse(fio.emulate_model) self.assertEqual('File', fio.vdtype) self.assertEqual('fileio', fio.backstore_type) def test_rbd(self): rbd = stor.RBD.bld_ref('adap', 'pool/volume') self.assertEqual('pool/volume', rbd.name) self.assertEqual('pool/volume', rbd.label) self.assertEqual('RBD', rbd.vdtype) self.assertEqual('user:rbd', rbd.backstore_type) self.assertIsNone(rbd.tag) self.assertTrue(rbd.emulate_model) rbd = stor.RBD.bld_ref('adap', 'pool/volume', tag='tag', emulate_model=False, user='tester') self.assertEqual('pool/volume', rbd.name) self.assertEqual('pool/volume', rbd.label) self.assertEqual('RBD', rbd.vdtype) self.assertEqual('user:rbd', rbd.backstore_type) self.assertEqual('tag', rbd.tag) self.assertFalse(rbd.emulate_model) self.assertEqual('tester', rbd._get_val_str('RbdUser')) def test_vdisk(self): vdisk = stor.VDisk.bld('adap', 'name', 10) self.assertEqual('name', vdisk.name) self.assertEqual(10, vdisk.capacity) self.assertEqual('None', vdisk.label) self.assertIsNone(vdisk._get_val_str('BaseImage')) self.assertIsNone(vdisk.file_format) self.assertIsNone(vdisk.tag) self.assertTrue(vdisk.emulate_model) vdisk = stor.VDisk.bld( 'adap', 'name', 10, label='label', base_image='img', file_format='format', tag='tag', emulate_model=False) self.assertEqual('name', vdisk.name) self.assertEqual(10, vdisk.capacity) self.assertEqual('label', vdisk.label) self.assertEqual('img', vdisk._get_val_str('BaseImage')) self.assertEqual('format', vdisk.file_format) self.assertEqual('tag', vdisk.tag) self.assertFalse(vdisk.emulate_model) vdisk = stor.VDisk.bld_ref('adap', 'name') self.assertEqual('name', vdisk.name) self.assertIsNone(vdisk.tag) self.assertTrue(vdisk.emulate_model) vdisk = stor.VDisk.bld_ref('adap', 'name', tag='tag', emulate_model=False) self.assertEqual('name', vdisk.name) self.assertEqual('tag', vdisk.tag) self.assertFalse(vdisk.emulate_model) def test_pv(self): pv = stor.PV.bld('adap', 'name') self.assertEqual('name', pv.name) self.assertIsNone(pv.udid) self.assertIsNone(pv.tag) self.assertTrue(pv.emulate_model) pv = stor.PV.bld('adap', 'name', udid='udid', tag='tag', emulate_model=False) self.assertEqual('name', pv.name) self.assertEqual('udid', pv.udid) self.assertEqual('tag', pv.tag) self.assertFalse(pv.emulate_model) pypowervm-1.1.24/pypowervm/tests/wrappers/test_iocard.py0000664000175000017500000004171113571367171023203 0ustar neoneo00000000000000# Copyright 2016, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.util as u import pypowervm.wrappers.iocard as card import pypowervm.wrappers.managed_system as ms import pypowervm.wrappers.network as net class TestSRIOVAdapter(twrap.TestWrapper): file = 'sys_with_sriov.txt' wrapper_class_to_test = ms.System def setUp(self): super(TestSRIOVAdapter, self).setUp() self.sriovs = self.dwrap.asio_config.sriov_adapters def test_list(self): self.assertEqual(4, len(self.sriovs)) for sriov in self.sriovs: self.assertIsInstance(sriov, card.SRIOVAdapter) def test_attrs(self): desc = 'PCIe2 4-port (10Gb FCoE & 1GbE) SR&RJ45 Adapter' self.assertEqual('553713696', self.sriovs[0].id) self.assertEqual(desc, self.sriovs[0].description) self.assertEqual('U78C7.001.RCH0004-P1-C8', self.sriovs[0].phys_loc_code) def test_ids(self): """Test .id and .sriov_adap_id.""" # AdapterID inherited from IOAdapter self.assertEqual('553713696', self.sriovs[0].id) self.assertEqual(1, self.sriovs[0].sriov_adap_id) self.assertEqual('553713680', self.sriovs[2].id) self.assertIsNone(self.sriovs[2].sriov_adap_id) def test_mode(self): self.assertEqual('Sriov', self.sriovs[0].mode) # Test setter self.sriovs[0].mode = 'unknown' self.assertEqual('unknown', self.sriovs[0].mode) def test_personality(self): self.assertEqual(card.SRIOVAdapterPersonality.MIN_CAPACITY, self.sriovs[0].personality) self.assertEqual(card.SRIOVAdapterPersonality.MAX_MIN_CAPACITY, self.sriovs[1].personality) def test_state(self): self.assertEqual('Running', self.sriovs[0].state) def test_physical_ports(self): adapter = self.sriovs[0] phyports = adapter.phys_ports self.assertEqual(4, len(phyports)) # Get converged and ethernet physical ports each conv_port, eth_port = self.sriovs[0].phys_ports[:3:2] # Converged physical ports test self.assertEqual(self.sriovs[0], conv_port.sriov_adap) self.assertEqual(self.sriovs[0].sriov_adap_id, conv_port.sriov_adap_id) self.assertEqual(None, conv_port.label) conv_port.label = 'updatedlabel' self.assertEqual('updatedlabel', conv_port.label) self.assertEqual('U78C7.001.RCH0004-P1-C8-T1', conv_port.loc_code) self.assertEqual(0, conv_port.port_id) self.assertEqual(None, conv_port.sublabel) conv_port.sublabel = 'updatedsublabel' self.assertEqual('updatedsublabel', conv_port.sublabel) self.assertEqual(True, conv_port.link_status) self.assertEqual(20, conv_port.cfg_max_lps) conv_port.cfg_max_lps = 40 self.assertEqual(40, conv_port.cfg_max_lps) self.assertEqual(2, conv_port.cfg_lps) self.assertEqual(0.02, conv_port.min_granularity) self.assertEqual(20, conv_port.supp_max_lps) self.assertEqual(0.02, conv_port.allocated_capacity) # Ethernet physical ports test self.assertEqual(self.sriovs[0], eth_port.sriov_adap) self.assertEqual(self.sriovs[0].sriov_adap_id, eth_port.sriov_adap_id) self.assertEqual(None, eth_port.label) eth_port.label = 'updatedlabel' self.assertEqual('updatedlabel', eth_port.label) self.assertEqual('U78C7.001.RCH0004-P1-C8-T3', eth_port.loc_code) self.assertEqual(2, eth_port.port_id) self.assertEqual(None, eth_port.sublabel) eth_port.sublabel = 'updatedsublabel' self.assertEqual('updatedsublabel', eth_port.sublabel) self.assertEqual(True, eth_port.link_status) self.assertEqual(4, eth_port.cfg_max_lps) eth_port.cfg_max_lps = 40 self.assertEqual(40, eth_port.cfg_max_lps) self.assertEqual(0.02, eth_port.min_granularity) self.assertEqual(4, eth_port.supp_max_lps) self.assertEqual(0.02, eth_port.allocated_capacity) self.assertEqual(card.SRIOVSpeed.E1G, eth_port.curr_speed) self.assertEqual(card.SRIOVPPMTU.E1500, eth_port.mtu) eth_port.mtu = card.SRIOVPPMTU.E9000 self.assertEqual(card.SRIOVPPMTU.E9000, eth_port.mtu) self.assertFalse(eth_port.flow_ctl) eth_port.flow_ctl = True self.assertTrue(eth_port.flow_ctl) self.assertEqual(net.VSwitchMode.VEB, eth_port.switch_mode) eth_port.switch_mode = net.VSwitchMode.VEPA self.assertEqual(net.VSwitchMode.VEPA, eth_port.switch_mode) def test_physical_ports_no_vivify(self): """Don't accidentally vivify [Converged]EthernetPhysicalPorts. See https://bugs.launchpad.net/pypowervm/+bug/1617050 This test case has to prove that, when EthernetPhysicalPorts doesn't exist in the XML, asking for phys_ports doesn't create it. """ # 2nd and 3rd SRIOV adapters have no pports adp = self.sriovs[2] self.assertNotIn('= 1) for vswitch in vswitches: self.assertIsNotNone(vswitch.etag) def test_data(self): self.assertEqual('ETHERNET0', self.dwrap.name) self.assertEqual(0, self.dwrap.switch_id) self.assertEqual('Veb', self.dwrap.mode) self.assertEqual('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/' '4abca7ff-3710-3160-b9e4-cb4456c33f43/VirtualSwitch/' '4d9735ae-feaf-32c2-a1bc-102026df9168?group=None', self.dwrap.href) self.assertEqual('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/' '4abca7ff-3710-3160-b9e4-cb4456c33f43/VirtualSwitch/' '4d9735ae-feaf-32c2-a1bc-102026df9168', self.dwrap.related_href) def test_wrapper_class(self): self.assertEqual(net.VSwitch.schema_type, 'VirtualSwitch') self.assertEqual(net.VSwitch.schema_ns, pc.UOM_NS) self.assertTrue(net.VSwitch.has_metadata) self.assertEqual(net.VSwitch.default_attrib, pc.DEFAULT_SCHEMA_ATTR) def test_set_mode(self): """Tests that the vSwitch element can have the mode set.""" vs = net.VSwitch.bld(None, 'Test') self.assertEqual(net.VSwitchMode.VEB, vs.mode) vs.mode = net.VSwitchMode.VEPA self.assertEqual(net.VSwitchMode.VEPA, vs.mode) vs.mode = net.VSwitchMode.VEB self.assertEqual(net.VSwitchMode.VEB, vs.mode) class TestLoadGroup(unittest.TestCase): def test_wrapper_class(self): self.assertEqual(net.LoadGroup.schema_type, 'LoadGroup') self.assertEqual(net.LoadGroup.schema_ns, pc.UOM_NS) self.assertTrue(net.LoadGroup.has_metadata) self.assertEqual(net.LoadGroup.default_attrib, pc.DEFAULT_SCHEMA_ATTR) class TestTrunkAdapter(unittest.TestCase): def test_wrapper_class(self): self.assertEqual(net.TrunkAdapter.schema_type, 'TrunkAdapter') self.assertEqual(net.TrunkAdapter.schema_ns, pc.UOM_NS) self.assertFalse(net.TrunkAdapter.has_metadata) self.assertEqual(net.TrunkAdapter.default_attrib, pc.DEFAULT_SCHEMA_ATTR) class TestSEA(unittest.TestCase): def test_wrapper_class(self): self.assertEqual(net.SEA.schema_type, 'SharedEthernetAdapter') self.assertEqual(net.SEA.schema_ns, pc.UOM_NS) self.assertTrue(net.SEA.has_metadata) self.assertEqual(net.SEA.default_attrib, pc.DEFAULT_SCHEMA_ATTR) class TestNetBridge(unittest.TestCase): def test_wrapper_class(self): self.assertEqual(net.NetBridge.schema_type, 'NetworkBridge') self.assertEqual(net.NetBridge.schema_ns, pc.UOM_NS) self.assertTrue(net.NetBridge.has_metadata) self.assertEqual(net.NetBridge.default_attrib, pc.DEFAULT_SCHEMA_ATTR) class TestNetwork(twrap.TestWrapper): file = 'fake_network_bridge.txt' wrapper_class_to_test = net.NetBridge mock_adapter_fx_args = dict(traits=fx.LocalPVMTraits) def set_vnet(self, aware): # Since they're all references through the same adapter, setting traits # on dwrap's element's adapter ought to affect all sub-elements, etc. self.adptfx.set_traits(fx.RemoteHMCTraits if aware else fx.RemotePVMTraits) def test_pvid(self): self.assertEqual(1, self.dwrap.pvid) def test_configuration_state(self): self.assertEqual(net.SEAState.CONFIGURED, self.dwrap.seas[0].configuration_state) def test_load_balance(self): self.assertTrue(self.dwrap.load_balance) def test_uuid(self): self.assertEqual( '764f3423-04c5-3b96-95a3-4764065400bd', self.dwrap.uuid) def test_vnet_uri_list(self): uri_list = self.dwrap.vnet_uri_list self.assertEqual(13, len(uri_list)) self.assertEqual('http', uri_list[0][:4]) def test_contrl_channel(self): vios_file = pvmhttp.PVMFile('fake_vios_feed.txt') vios_resp = pvmhttp.PVMResp(pvmfile=vios_file).get_response() vios_wrap = vios.VIOS.wrap(vios_resp.feed.entries[0]) self.assertEqual('ent5', vios_wrap.seas[0].control_channel) def test_contrl_channel_id(self): self.assertEqual(99, self.dwrap.control_channel_id) def test_crt_net_bridge(self): vswitch_file = pvmhttp.PVMFile('fake_vswitch_feed.txt') vswitch_resp = pvmhttp.PVMResp(pvmfile=vswitch_file).get_response() vsw_wrap = net.VSwitch.wrap(vswitch_resp.feed.entries[0]) # Create mocked data nb = net.NetBridge.bld(self.adpt, pvid=1, vios_to_backing_adpts=[('vio_href1', 'ent0'), ('vio_href2', 'ent2')], vswitch=vsw_wrap, load_balance=True) self.assertIsNotNone(nb) self.assertEqual(1, nb.pvid) self.assertEqual(2, len(nb.seas)) self.assertEqual(0, len(nb.load_grps)) self.assertTrue(nb.load_balance) # First SEA. Should be the primary sea1 = nb.seas[0] self.assertIsNotNone(sea1) self.assertEqual(1, sea1.pvid) self.assertEqual('vio_href1', sea1.vio_uri) self.assertEqual('ent0', sea1.backing_device.dev_name) self.assertTrue(sea1.is_primary) # Validate the trunk. ta = sea1.primary_adpt self.assertTrue(ta._required) self.assertEqual(1, ta.pvid) self.assertFalse(ta.has_tag_support) self.assertEqual(vsw_wrap.switch_id, ta.vswitch_id) self.assertEqual(1, ta.trunk_pri) self.assertEqual(vsw_wrap.related_href, ta.associated_vswitch_uri) # Check that the second SEA is similar but not primary. sea2 = nb.seas[1] self.assertIsNotNone(sea2) self.assertEqual(1, sea2.pvid) self.assertEqual('vio_href2', sea2.vio_uri) self.assertEqual('ent2', sea2.backing_device.dev_name) self.assertFalse(sea2.is_primary) self.assertIsNone(sea2.ha_mode) # Validate the second SEA trunk. ta = sea2.primary_adpt self.assertTrue(ta._required) self.assertEqual(1, ta.pvid) self.assertFalse(ta.has_tag_support) self.assertEqual(vsw_wrap.switch_id, ta.vswitch_id) self.assertEqual(2, ta.trunk_pri) self.assertEqual(vsw_wrap.related_href, ta.associated_vswitch_uri) def test_crt_sea(self): vswitch_file = pvmhttp.PVMFile('fake_vswitch_feed.txt') vswitch_resp = pvmhttp.PVMResp(pvmfile=vswitch_file).get_response() vsw_wrap = net.VSwitch.wrap(vswitch_resp.feed.entries[0]) # Create mocked data sea = net.SEA.bld(self.adpt, pvid=1, vios_href='127.0.0.1', adpt_name='ent0', vswitch=vsw_wrap) self.assertIsNotNone(sea) self.assertEqual(1, sea.pvid) self.assertEqual('127.0.0.1', sea.vio_uri) self.assertEqual('ent0', sea.backing_device.dev_name) ta = sea.primary_adpt self.assertTrue(ta._required) self.assertEqual(1, ta.pvid) self.assertFalse(ta.has_tag_support) self.assertEqual(vsw_wrap.switch_id, ta.vswitch_id) self.assertEqual(1, ta.trunk_pri) self.assertEqual(vsw_wrap.related_href, ta.associated_vswitch_uri) def test_crt_trunk_adapter(self): vswitch_file = pvmhttp.PVMFile('fake_vswitch_feed.txt') vswitch_resp = pvmhttp.PVMResp(pvmfile=vswitch_file).get_response() vsw_wrap = net.VSwitch.wrap(vswitch_resp.feed.entries[0]) # Create mocked data ta = net.TrunkAdapter.bld(self.adpt, pvid=1, vlan_ids=[1, 2, 3], vswitch=vsw_wrap) self.assertIsNotNone(ta) self.assertTrue(ta._required) self.assertEqual(1, ta.pvid) self.assertEqual([1, 2, 3], ta.tagged_vlans) self.assertTrue(ta.has_tag_support) self.assertEqual(vsw_wrap.switch_id, ta.vswitch_id) self.assertEqual(1, ta.trunk_pri) self.assertEqual(vsw_wrap.related_href, ta.associated_vswitch_uri) # Try adding a VLAN to the trunk adapter. ta.tagged_vlans.append(4) self.assertEqual([1, 2, 3, 4], ta.tagged_vlans) def test_crt_load_group(self): # Create my mocked data uri_list = ['a', 'b', 'c'] pvid = 1 lg = net.LoadGroup.bld(self.adpt, pvid, uri_list) # Validate the data back self.assertIsNotNone(lg) self.assertEqual(1, lg.pvid) self.assertEqual(3, len(lg.vnet_uri_list)) self.assertEqual('a', lg.vnet_uri_list[0]) self.assertEqual('b', lg.vnet_uri_list[1]) self.assertEqual('c', lg.vnet_uri_list[2]) def test_load_groups(self): prim_ld_grp = self.dwrap.load_grps[0] self.assertIsNotNone(prim_ld_grp) self.assertEqual(1, prim_ld_grp.pvid) self.assertEqual(1, len(prim_ld_grp.trunk_adapters)) self.assertEqual('U8246.L2C.0604C7A-V4-C2', prim_ld_grp.trunk_adapters[0].loc_code) self.assertEqual(4, prim_ld_grp.trunk_adapters[0].vios_id) addl_ld_grps = self.dwrap.load_grps[1:] self.assertIsNotNone(addl_ld_grps) self.assertEqual(1, len(addl_ld_grps)) self.assertEqual( 12, len(addl_ld_grps[0].vnet_uri_list)) addl_ld_grps[0].vnet_uri_list.append('fake_uri') self.assertEqual( 13, len(addl_ld_grps[0].vnet_uri_list)) addl_ld_grps[0].vnet_uri_list.remove('fake_uri') self.assertEqual( 12, len(addl_ld_grps[0].vnet_uri_list)) # Make sure that the reference to the Network Bridge is there. self.assertEqual(self.dwrap, prim_ld_grp._nb_root) def test_load_group_modification(self): """Verifies that the callbacks to the Network Bridge work. When modifying the Virtual Network list in the Load Group, those updates should be reflected back into the Network Bridge. """ orig_len = len(self.dwrap.vnet_uri_list) ld_grp = self.dwrap.load_grps[0] lg_vnets = ld_grp.vnet_uri_list first_vnet = lg_vnets[0] lg_vnets.remove(first_vnet) self.assertEqual(orig_len - 1, len(self.dwrap.vnet_uri_list)) def test_sea_modification(self): """Verifies that the SEA can have a Trunk Adapter added to it.""" vswitch_file = pvmhttp.PVMFile('fake_vswitch_feed.txt') vswitch_resp = pvmhttp.PVMResp(pvmfile=vswitch_file).get_response() vsw_wrap = net.VSwitch.wrap(vswitch_resp.feed.entries[0]) # Create mocked data ta = net.TrunkAdapter.bld( self.adpt, pvid=1, vlan_ids=[1, 2, 3], vswitch=vsw_wrap) self.assertEqual(1, len(self.dwrap.seas[0].addl_adpts)) self.dwrap.seas[0].addl_adpts.append(ta) self.assertEqual(2, len(self.dwrap.seas[0].addl_adpts)) # Check that the total trunks is now three elements self.assertEqual(3, len(self.dwrap.seas[0]._get_trunks())) def test_supports_vlan(self): """Tests the supports_vlan method.""" # Both styles should produce similar results. vnet_paths = [False, True] for use_vnet in vnet_paths: self.set_vnet(use_vnet) # PVID of primary adapter self.assertTrue(self.dwrap.supports_vlan(1)) self.assertTrue(self.dwrap.supports_vlan("1")) # PVID of second adapter self.assertFalse(self.dwrap.supports_vlan(4094)) self.assertFalse(self.dwrap.supports_vlan("4094")) # Additional VLAN of second adapter. self.assertTrue(self.dwrap.supports_vlan(100)) self.assertTrue(self.dwrap.supports_vlan("100")) self.assertTrue(self.dwrap.supports_vlan(2228)) self.assertTrue(self.dwrap.supports_vlan("2228")) self.assertTrue(self.dwrap.supports_vlan(2227)) self.assertTrue(self.dwrap.supports_vlan("2227")) # A VLAN that isn't anywhere self.assertFalse(self.dwrap.supports_vlan(123)) def test_supports_vlan_no_vnet(self): """Tests that a VLAN change affects trunks, not vnets.""" self.dwrap.seas[0].primary_adpt.tagged_vlans.append(128) self.set_vnet(False) self.assertTrue(self.dwrap.supports_vlan(128)) self.set_vnet(True) self.assertFalse(self.dwrap.supports_vlan(128)) def test_no_primary_adpt(self): """Tests rare case that SEA has no primary adapter.""" # Test to make sure None reference error is not hit self.assertIsNone(self.dwrap.seas[1].primary_adpt) self.assertEqual(self.dwrap.seas[1].addl_adpts, []) self.assertFalse(self.dwrap.seas[1].contains_device('abcd')) ct_ch = self.dwrap.seas[1].control_channel self.assertTrue(self.dwrap.seas[1].contains_device(ct_ch)) def test_vswitch_id(self): """Tests that the pass thru of the vswitch id works.""" self.assertEqual(2, self.dwrap.vswitch_id) def test_arbitrary_pvids(self): self.set_vnet(False) self.assertEqual([4094], self.dwrap.arbitrary_pvids) self.set_vnet(True) self.assertEqual([4094], self.dwrap.arbitrary_pvids) def test_list_vlans(self): # Both styles should produce similar results. vnet_paths = [False, True] for use_vnet in vnet_paths: self.set_vnet(use_vnet) # 1 is the PVID. 4094 is the arbitrary (only one arbitrary) val = set(self.dwrap.list_vlans()) self.assertEqual({100, 150, 175, 200, 250, 300, 333, 350, 900, 1001, 2227, 2228, 1}, val) val = set(self.dwrap.list_vlans(pvid=False, arbitrary=True)) self.assertEqual({4094, 100, 150, 175, 200, 250, 300, 333, 350, 900, 1001, 2227, 2228}, val) val = set(self.dwrap.list_vlans(pvid=False)) self.assertEqual({100, 150, 175, 200, 250, 300, 333, 350, 900, 1001, 2227, 2228}, val) val = set(self.dwrap.list_vlans(arbitrary=True)) self.assertEqual({1, 4094, 100, 150, 175, 200, 250, 300, 333, 350, 900, 1001, 2227, 2228}, val) def test_list_vlan_no_vnet(self): """Tests that a VLAN change affects trunks, not vnets.""" self.dwrap.seas[0].primary_adpt.tagged_vlans.append(128) self.set_vnet(False) self.assertIn(128, self.dwrap.list_vlans()) self.set_vnet(True) self.assertNotIn(128, self.dwrap.list_vlans()) def test_seas(self): self.assertEqual(2, len(self.dwrap.seas)) sea = self.dwrap.seas[0] # Test some properties self.assertEqual(1, sea.pvid) self.assertEqual('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/' '726e9cb3-6576-3df5-ab60-40893d51d074/VirtualIOServer' '/691019AF-506A-4896-AADE-607E21FA93EE', sea.vio_uri) self.assertEqual('ent8', sea.dev_name) self.assertEqual(net.HAMode.DISABLED, sea.ha_mode) new_sea = copy.deepcopy(sea) self.dwrap.seas.append(new_sea) self.assertEqual(3, len(self.dwrap.seas)) sea_copy = copy.copy(self.dwrap.seas) sea_copy.remove(new_sea) self.dwrap.seas = sea_copy self.assertEqual(2, len(self.dwrap.seas)) # Test the 'contains_device' method within the SEA. self.assertTrue(new_sea.contains_device('ent5')) self.assertFalse(new_sea.contains_device('ent2')) def test_sea_trunks(self): """Tests the trunk adapters on the SEA.""" sea = self.dwrap.seas[0] # The primary adapter testing prim_t = sea.primary_adpt self.assertIsNotNone(prim_t) self.assertEqual(1, prim_t.pvid) self.assertFalse(prim_t.has_tag_support) self.assertEqual(0, len(prim_t.tagged_vlans)) self.assertEqual(2, prim_t.vswitch_id) self.assertEqual('ent4', prim_t.dev_name) self.assertEqual(1, prim_t.trunk_pri) # The secondary adapter. addl_adpts = sea.addl_adpts self.assertIsNotNone(addl_adpts) self.assertEqual(1, len(addl_adpts)) addl_adpt = addl_adpts[0] self.assertEqual(4094, addl_adpt.pvid) self.assertTrue(addl_adpt.has_tag_support) self.assertEqual(12, len(addl_adpt.tagged_vlans)) self.assertEqual(2, addl_adpt.vswitch_id) self.assertEqual('ent5', addl_adpt.dev_name) self.assertEqual(1, addl_adpt.trunk_pri) # Try setting the tagged vlans orig_vlans = copy.copy(addl_adpt.tagged_vlans) addl_adpt.tagged_vlans.append(5) self.assertEqual(13, len(addl_adpt.tagged_vlans)) addl_adpt.tagged_vlans = [1] self.assertEqual(1, len(addl_adpt.tagged_vlans)) addl_adpt.tagged_vlans = orig_vlans self.assertEqual(12, len(addl_adpt.tagged_vlans)) # Modify the tag support addl_adpt.has_tag_support = False self.assertFalse(addl_adpt.has_tag_support) addl_adpt.has_tag_support = True self.assertTrue(addl_adpt.has_tag_support) def test_varied_on(self): self.assertEqual(2, len(self.dwrap.seas)) sea = self.dwrap.seas[0] # Try the varied_on property prim_t = sea.primary_adpt self.assertTrue(prim_t.varied_on) class TestCNAWrapper(twrap.TestWrapper): file = 'fake_cna.txt' wrapper_class_to_test = net.CNA mock_adapter_fx_args = dict(traits=fx.LocalPVMTraits) def setUp(self): super(TestCNAWrapper, self).setUp() self.assertIsNotNone(self.entries.etag) def test_standard_crt(self): """Tests a standard create of the CNA.""" test = net.CNA.bld(self.adpt, 1, "fake_vs") self.assertEqual('fake_vs', test.vswitch_uri) self.assertFalse(test.is_tagged_vlan_supported) self.assertEqual([], test.tagged_vlans) self.assertIsNotNone(test._use_next_avail_slot_id) self.assertTrue(test._use_next_avail_slot_id) self.assertIsNone(test.mac) self.assertIsNone(test.vsi_type_id) self.assertIsNone(test.vsi_type_version) self.assertIsNone(test.vsi_type_manager_id) self.assertIsNone(test.vswitch_id) self.assertEqual(1, test.pvid) self.assertNotIn(net._TA_TRUNK_PRI, str(test.toxmlstring())) self.assertFalse(test.is_trunk) self.assertIsNone(test.trunk_pri) self.assertFalse(test.enabled) def test_trunk_crt(self): """Tests a standard create of the CNA.""" test = net.CNA.bld(self.adpt, 1, "fake_vs", trunk_pri=2) self.assertEqual('fake_vs', test.vswitch_uri) self.assertFalse(test.is_tagged_vlan_supported) self.assertEqual([], test.tagged_vlans) self.assertIsNotNone(test._use_next_avail_slot_id) self.assertTrue(test._use_next_avail_slot_id) self.assertIsNone(test.mac) self.assertIsNone(test.vsi_type_id) self.assertIsNone(test.vsi_type_version) self.assertIsNone(test.vsi_type_manager_id) self.assertIsNone(test.vswitch_id) self.assertEqual(1, test.pvid) self.assertIn(net._TA_TRUNK_PRI, str(test.toxmlstring())) self.assertTrue(test.is_trunk) self.assertEqual(test.trunk_pri, 2) def test_unique_crt(self): """Tests the create path with a non-standard flow for the CNA.""" test = net.CNA.bld( self.adpt, 5, "fake_vs", mac_addr="aa:bb:cc:dd:ee:ff", slot_num=5, addl_tagged_vlans=[6, 7, 8, 9]) self.assertEqual('fake_vs', test.vswitch_uri) self.assertTrue(test.is_tagged_vlan_supported) self.assertEqual([6, 7, 8, 9], test.tagged_vlans) self.assertEqual(5, test.slot) self.assertFalse(test._use_next_avail_slot_id) self.assertIsNotNone(test.mac) self.assertEqual("AABBCCDDEEFF", test.mac) self.assertEqual(5, test.pvid) self.assertIsNone(test.vsi_type_id) self.assertIsNone(test.vsi_type_version) self.assertIsNone(test.vsi_type_manager_id) self.assertIsNone(test.vswitch_id) self.assertNotIn(net._TA_TRUNK_PRI, str(test.toxmlstring())) self.assertFalse(test.is_trunk) self.assertIsNone(test.trunk_pri) def test_unasi_field(self): """UseNextAvailable(High)SlotID field is (not) used, as appropriate.""" mock_vswitch = mock.Mock() mock_vswitch.related_href = 'href' # Do TrunkAdapter as well as CNA here # Traits fixture starts off "PVM" - should use High cna = net.CNA.bld(self.adpt, 1, "fake_vs") self.assertIsNone(cna._find(net._USE_NEXT_AVAIL_SLOT)) self.assertIsNotNone(cna._find(net._USE_NEXT_AVAIL_HIGH_SLOT)) self.assertTrue(cna._use_next_avail_slot_id) ta = net.TrunkAdapter.bld(self.adpt, 1, [], mock_vswitch) self.assertIsNone(ta._find(net._USE_NEXT_AVAIL_SLOT)) self.assertIsNotNone(ta._find(net._USE_NEXT_AVAIL_HIGH_SLOT)) self.assertEqual('Unknown', ta.dev_name) # When slot specified, no UseNextAvailable(High)SlotID cna = net.CNA.bld(self.adpt, 1, "fake_vs", slot_num=1) self.assertIsNone(cna._find(net._USE_NEXT_AVAIL_SLOT)) self.assertIsNone(cna._find(net._USE_NEXT_AVAIL_HIGH_SLOT)) self.assertFalse(cna._use_next_avail_slot_id) # Swap to HMC - should *not* use High self.adptfx.set_traits(fx.RemoteHMCTraits) cna = net.CNA.bld(self.adpt, 1, "fake_vs") self.assertIsNone(cna._find(net._USE_NEXT_AVAIL_HIGH_SLOT)) self.assertIsNotNone(cna._find(net._USE_NEXT_AVAIL_SLOT)) self.assertTrue(cna._use_next_avail_slot_id) ta = net.TrunkAdapter.bld(self.adpt, 1, [], mock_vswitch) self.assertIsNone(ta._find(net._USE_NEXT_AVAIL_HIGH_SLOT)) self.assertIsNotNone(ta._find(net._USE_NEXT_AVAIL_SLOT)) self.assertEqual('Unknown', cna.dev_name) # When slot specified, no UseNextAvailable(High)SlotID cna = net.CNA.bld(self.adpt, 1, "fake_vs", slot_num=1) self.assertIsNone(cna._find(net._USE_NEXT_AVAIL_SLOT)) self.assertIsNone(cna._find(net._USE_NEXT_AVAIL_HIGH_SLOT)) self.assertFalse(cna._use_next_avail_slot_id) @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.create') @mock.patch('pypowervm.wrappers.logical_partition.LPAR.get') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') def test_cna_create(self, mock_vget, mock_lget, mock_ewrap_create): """CNA.create hack that mucks with UseNextAvailable(High)SlotID.""" lpar_parent = mock.Mock(env=bp.LPARType.AIXLINUX, is_mgmt_partition=False) vios_parent = mock.Mock(env=bp.LPARType.VIOS, is_mgmt_partition=False) mgmt_parent = mock.Mock(env=bp.LPARType.AIXLINUX, is_mgmt_partition=True) sde_parent = mock.Mock(env=bp.LPARType.VIOS, is_mgmt_partition=True) # Exception paths for invalid parent spec cna = net.CNA.bld(self.adpt, 1, 'href') self.assertRaises(ValueError, cna.create) self.assertRaises(ValueError, cna.create, parent_type='foo') self.assertRaises(ValueError, cna.create, parent_uuid='foo') mock_ewrap_create.assert_not_called() mock_vget.assert_not_called() mock_lget.assert_not_called() # No parent, string parent_type gets converted. Validate element # twiddling for VIOS and mgmt mock_lget.return_value = mgmt_parent mock_vget.return_value = vios_parent for ptyp, mck in ((lpar.LPAR, mock_lget), (vios.VIOS, mock_vget)): cna = net.CNA.bld(self.adpt, 1, 'href') self.assertEqual( mock_ewrap_create.return_value, cna.create(parent_type=ptyp.schema_type, parent_uuid='puuid')) mock_ewrap_create.assert_called_once_with( parent_type=ptyp, parent_uuid='puuid', timeout=-1, parent=mck.return_value) # One mck should get called in each loop mck.assert_called_once_with(self.adpt, uuid='puuid') # Element should get twiddled each time self.assertIsNone(cna._find(net._USE_NEXT_AVAIL_HIGH_SLOT)) self.assertIsNotNone(cna._find(net._USE_NEXT_AVAIL_SLOT)) self.assertTrue(cna._use_next_avail_slot_id) mock_ewrap_create.reset_mock() mock_lget.reset_mock() mock_vget.reset_mock() # No parent, wrapper parent_type, element twiddling for SDE (VIOS+mgmt) mock_vget.return_value = sde_parent cna = net.CNA.bld(self.adpt, 1, 'href') self.assertEqual( mock_ewrap_create.return_value, cna.create(parent_type=vios.VIOS, parent_uuid='puuid')) mock_ewrap_create.assert_called_once_with( parent_type=vios.VIOS, parent_uuid='puuid', timeout=-1, parent=mock_vget.return_value) mock_vget.assert_called_once_with(self.adpt, uuid='puuid') mock_lget.assert_not_called() # Element should get twiddled. self.assertIsNone(cna._find(net._USE_NEXT_AVAIL_HIGH_SLOT)) self.assertIsNotNone(cna._find(net._USE_NEXT_AVAIL_SLOT)) self.assertTrue(cna._use_next_avail_slot_id) mock_ewrap_create.reset_mock() mock_vget.reset_mock() # Parent specified, no element twiddling for plain LPAR cna = net.CNA.bld(self.adpt, 1, 'href') self.assertEqual(mock_ewrap_create.return_value, cna.create(parent=lpar_parent)) mock_ewrap_create.assert_called_once_with( parent_type=None, parent_uuid=None, timeout=-1, parent=lpar_parent) mock_vget.assert_not_called() mock_lget.assert_not_called() # Element should not get twiddled. self.assertIsNotNone(cna._find(net._USE_NEXT_AVAIL_HIGH_SLOT)) self.assertIsNone(cna._find(net._USE_NEXT_AVAIL_SLOT)) self.assertTrue(cna._use_next_avail_slot_id) mock_ewrap_create.reset_mock() # If slot specified, we skip the whole hack self.adptfx.set_traits(fx.RemoteHMCTraits) cna = net.CNA.bld(self.adpt, 1, 'href', slot_num=1) self.assertEqual(mock_ewrap_create.return_value, cna.create(parent_type='ptyp', parent_uuid='puuid')) mock_ewrap_create.assert_called_once_with( parent_type='ptyp', parent_uuid='puuid', timeout=-1, parent=None) mock_vget.assert_not_called() mock_lget.assert_not_called() mock_ewrap_create.reset_mock() # For HMC, we skip the whole hack self.adptfx.set_traits(fx.RemoteHMCTraits) cna = net.CNA.bld(self.adpt, 1, 'href') self.assertEqual(mock_ewrap_create.return_value, cna.create(parent_type='ptyp', parent_uuid='puuid')) mock_ewrap_create.assert_called_once_with( parent_type='ptyp', parent_uuid='puuid', timeout=-1, parent=None) mock_vget.assert_not_called() mock_lget.assert_not_called() def test_attrs(self): """Test getting the attributes.""" self.assertEqual(32, self.dwrap.slot) self.assertEqual("FAD4433ED120", self.dwrap.mac) self.assertEqual(100, self.dwrap.pvid) self.assertEqual('https://9.1.2.3:12443/rest/api/uom/LogicalPartition/' '0A68CFAB-F62B-46D4-A6A0-F4EBE0264AD5/' 'ClientNetworkAdapter/' '6445b54b-b9dc-3bc2-b1d3-f8cc22ba95b8', self.dwrap.href) self.assertEqual('U8246.L2C.0604C7A-V24-C32', self.dwrap.loc_code) self.assertEqual([53, 54, 55], self.dwrap.tagged_vlans) self.assertTrue(self.dwrap.is_tagged_vlan_supported) self.assertEqual('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/' '726e9cb3-6576-3df5-ab60-40893d51d074/VirtualSwitch/' '9e42d4a9-9725-3007-9932-d85374ebf5cf', self.dwrap.vswitch_uri) self.assertEqual(0, self.dwrap.vswitch_id) self.assertEqual('VSITID', self.dwrap.vsi_type_id) self.assertEqual('77.99', self.dwrap.vsi_type_version) self.assertEqual('VSIMID', self.dwrap.vsi_type_manager_id) self.assertEqual("192.168.2.6", self.dwrap.ip_address) self.assertIsNotNone("192.168.2.0", self.dwrap.gateway) self.assertIsNotNone("255.255.255.0", self.dwrap.subnet_mask) def test_tagged_vlan_modification(self): """Tests that the tagged vlans can be modified.""" # Update via getter and Actionable List tags = self.dwrap.tagged_vlans tags.append(56) self.assertEqual(4, len(self.dwrap.tagged_vlans)) tags.remove(56) self.assertEqual(3, len(self.dwrap.tagged_vlans)) # Update via setter self.dwrap.tagged_vlans = [1, 2, 3] self.assertEqual([1, 2, 3], self.dwrap.tagged_vlans) self.dwrap.tagged_vlans = [] self.assertEqual([], self.dwrap.tagged_vlans) self.dwrap.tagged_vlans = [53, 54, 55] # Try the tagged vlan support self.dwrap.is_tagged_vlan_supported = False self.assertFalse(self.dwrap.is_tagged_vlan_supported) self.dwrap.is_tagged_vlan_supported = True def test_mac_set(self): orig_mac = self.dwrap.mac mac = "AA:bb:CC:dd:ee:ff" self.dwrap.mac = mac self.assertEqual("AABBCCDDEEFF", self.dwrap.mac) self.dwrap.mac = orig_mac def test_get_slot(self): """Test getting the VirtualSlotID.""" self.assertEqual(32, self.dwrap.slot) def test_get_mac(self): """Test that we can get the mac address.""" self.assertEqual("FAD4433ED120", self.dwrap.mac) def test_pvid(self): """Test that the PVID returns properly.""" self.assertEqual(100, self.dwrap.pvid) self.dwrap.pvid = 101 self.assertEqual(101, self.dwrap.pvid) self.dwrap.pvid = 100 def test_vswitch_uri(self): orig_uri = self.dwrap.vswitch_uri self.dwrap.vswitch_uri = 'test' self.assertEqual('test', self.dwrap.vswitch_uri) self.dwrap.vswitch_uri = orig_uri def test_wrapper_class(self): self.assertEqual(net.CNA.schema_type, 'ClientNetworkAdapter') self.assertEqual(net.CNA.schema_ns, pc.UOM_NS) self.assertTrue(net.CNA.has_metadata) self.assertEqual(net.CNA.default_attrib, pc.DEFAULT_SCHEMA_ATTR) def test_get_trunk_pri(self): """Test that we can get the trunk priority.""" self.assertEqual(1, self.dwrap.trunk_pri) def test_set_trunk_pri(self): """Test that we can set the trunk priority.""" self.assertEqual(1, self.dwrap.trunk_pri) self.dwrap._trunk_pri(2) self.assertEqual(2, self.dwrap.trunk_pri) def test_is_trunk(self): """Test that we can get if this adapter is a trunk.""" self.assertTrue(self.dwrap.is_trunk) self.dwrap._trunk_pri(None) self.assertFalse(self.dwrap.is_trunk) def test_lpar_id(self): """Test that we can get the local partition id.""" self.assertEqual(3, self.dwrap.lpar_id) def test_set_dev_name(self): """Test that we can set the device name.""" self.assertEqual('Unknown', self.dwrap.dev_name) self.dwrap._dev_name('tap-01234') self.assertEqual('tap-01234', self.dwrap.dev_name) def test_enabled(self): """Test that we disable/enable.""" self.assertTrue(self.dwrap.enabled) self.dwrap.enabled = False self.assertFalse(self.dwrap.enabled) self.dwrap.enabled = True self.assertTrue(self.dwrap.enabled) def test_ovs_attrs(self): self.assertEqual( 'iface-id=ba9d8ec3-64b2-47fe-9f50-e12ba373814c,' 'iface-status=active,' 'attached-mac=fa:e6:c8:3f:80:20,' 'vm-uuid=64443c49-920d-47d7-9b78-1216845c51f5', self.dwrap.ovs_ext_ids) self.assertEqual('br-int', self.dwrap.ovs_bridge) self.dwrap.ovs_ext_ids = 'foo' self.assertEqual('foo', self.dwrap.ovs_ext_ids) self.dwrap.ovs_bridge = 'bar' self.assertEqual('bar', self.dwrap.ovs_bridge) def test_mtu(self): self.assertEqual(1500, self.dwrap.configured_mtu) self.dwrap.configured_mtu = 2000 self.assertEqual(2000, self.dwrap.configured_mtu) pypowervm-1.1.24/pypowervm/tests/wrappers/test_logical_partition.py0000664000175000017500000006766313571367171025463 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import uuid import mock import testtools import pypowervm.tests.test_fixtures as fx from pypowervm.tests.test_utils import pvmhttp import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.utils.uuid as pvm_uuid import pypowervm.wrappers.base_partition as bp import pypowervm.wrappers.logical_partition as lpar LPAR_HTTPRESP_FILE = "lpar.txt" IBMI_HTTPRESP_FILE = "lpar_ibmi.txt" MC_HTTPRESP_FILE = "managementconsole.txt" DEDICATED_LPAR_NAME = 'z3-9-5-126-168-00000002' SHARED_LPAR_NAME = 'z3-9-5-126-127-00000001' EXPECTED_OPERATING_SYSTEM_VER = 'Linux/Red Hat 2.6.32-358.el6.ppc64 6.4' EXPECTED_ASSOC_SYSTEM_UUID = 'a168a3ec-bb3e-3ead-86c1-7d98b9d50239' class TestLogicalPartition(testtools.TestCase): _skip_setup = False _shared_wrapper = None _dedicated_wrapper = None _bad_wrapper = None _shared_entry = None _dedicated_entry = None def setUp(self): super(TestLogicalPartition, self).setUp() self.adptfx = self.useFixture(fx.AdapterFx(traits=fx.LocalPVMTraits)) self.adpt = self.adptfx.adpt self.TC = TestLogicalPartition lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.adpt) self.assertIsNotNone(lpar_http, "Could not load %s " % LPAR_HTTPRESP_FILE) entries = lpar_http.response.feed.findentries( bp._BP_NAME, SHARED_LPAR_NAME) self.assertIsNotNone(entries, "Could not find %s in %s" % (SHARED_LPAR_NAME, LPAR_HTTPRESP_FILE)) self.TC._shared_entry = entries[0] entries = lpar_http.response.feed.findentries( bp._BP_NAME, DEDICATED_LPAR_NAME) self.assertIsNotNone(entries, "Could not find %s in %s" % (DEDICATED_LPAR_NAME, LPAR_HTTPRESP_FILE)) self.TC._dedicated_entry = entries[0] TestLogicalPartition._shared_wrapper = lpar.LPAR.wrap( self.TC._shared_entry) TestLogicalPartition._dedicated_wrapper = lpar.LPAR.wrap( self.TC._dedicated_entry) mc_http = pvmhttp.load_pvm_resp(MC_HTTPRESP_FILE, adapter=self.adpt) self.assertIsNotNone(mc_http, "Could not load %s" % MC_HTTPRESP_FILE) # Create a bad wrapper to use when retrieving properties which don't # exist TestLogicalPartition._bad_wrapper = lpar.LPAR.wrap( mc_http.response.feed.entries[0]) TestLogicalPartition._skip_setup = True def verify_equal(self, method_name, returned_value, expected_value): if returned_value is not None and expected_value is not None: returned_type = type(returned_value) expected_type = type(expected_value) self.assertEqual(returned_type, expected_type, "%s: type mismatch. " "Returned %s(%s). Expected %s(%s)" % (method_name, returned_value, returned_type, expected_value, expected_type)) self.assertEqual(returned_value, expected_value, "%s returned %s instead of %s" % (method_name, returned_value, expected_value)) @staticmethod def _get_nested_prop(wrapper, prop_path): value = None for partial in prop_path.split('.'): value = wrapper.__getattribute__(partial) if callable(value): value = value() wrapper = value return value def call_simple_getter(self, method_name, expected_value, expected_bad_value, use_dedicated=False): # Use __getattribute__ to dynamically call the method if use_dedicated: wrapper = TestLogicalPartition._dedicated_wrapper else: wrapper = TestLogicalPartition._shared_wrapper value = self._get_nested_prop(wrapper, method_name) self.verify_equal(method_name, value, expected_value) bad_value = self._get_nested_prop(TestLogicalPartition._bad_wrapper, method_name) self.verify_equal(method_name, bad_value, expected_bad_value) def test_get_val_str(self): expected_value = SHARED_LPAR_NAME value = TestLogicalPartition._shared_wrapper._get_val_str( bp._BP_NAME) self.verify_equal("_get_val_str", value, expected_value) expected_value = None value = TestLogicalPartition._shared_wrapper._get_val_str( 'BogusName') self.verify_equal( "_get_val_str for BogusName ", value, expected_value) def test_get_state(self): self.call_simple_getter("state", bp.LPARState.NOT_ACTIVATED, None) self._shared_wrapper.set_parm_value(bp._BP_STATE, bp.LPARState.RUNNING) self.call_simple_getter("state", bp.LPARState.RUNNING, None) def test_get_name(self): self.call_simple_getter("name", SHARED_LPAR_NAME, None) def test_get_id(self): self.call_simple_getter("id", 9, None) def test_get_ref_code(self): self.call_simple_getter("ref_code", "00000000", None) def test_get_ref_code_full(self): self.call_simple_getter( "ref_code_full", ("time_stamp=08/13/2016 23:52:08,refcode=00000000" ",word2=03D00000,fru_call_out_loc_codes=#47-" "Ubuntu SMP Fri Jun 24 10:09:20 UTC 2016"), None) def test_uuid(self): wrapper = self._dedicated_wrapper self.assertEqual('42DF39A2-3A4A-4748-998F-25B15352E8A7', wrapper.uuid) # Test set and retrieve uuid1 = pvm_uuid.convert_uuid_to_pvm(str(uuid.uuid4())) up_uuid1 = uuid1.upper() wrapper.set_uuid(uuid1) self.assertEqual(up_uuid1, wrapper.uuid) self.assertEqual(up_uuid1, wrapper.partition_uuid) uuid2 = pvm_uuid.convert_uuid_to_pvm(str(uuid.uuid4())) wrapper.uuid = uuid2 self.assertEqual(uuid2.upper(), wrapper.uuid) def test_rmc_state(self): self.call_simple_getter("rmc_state", bp.RMCState.INACTIVE, None) self._shared_wrapper.set_parm_value(bp._BP_RMC_STATE, bp.RMCState.ACTIVE) self.call_simple_getter("rmc_state", bp.RMCState.ACTIVE, None) def test_avail_priority(self): self.call_simple_getter("avail_priority", 127, 0) self._shared_wrapper.avail_priority = 63 self.call_simple_getter("avail_priority", 63, 0) def test_profile_sync(self): self.call_simple_getter("profile_sync", True, False) self.assertEqual( self._shared_wrapper._get_val_str(bp._BP_PROFILE_SYNC), "On") self._shared_wrapper.profile_sync = False self.call_simple_getter("profile_sync", False, False) self.assertEqual( self._shared_wrapper._get_val_str(bp._BP_PROFILE_SYNC), "Off") self._shared_wrapper.profile_sync = "Off" self.call_simple_getter("profile_sync", False, False) def test_get_operating_system(self): self.call_simple_getter( "operating_system", EXPECTED_OPERATING_SYSTEM_VER, "Unknown") def test_get_pending_secure_boot(self): self.call_simple_getter( "pending_secure_boot", 2, 0) def test_get_current_secure_boot(self): self.call_simple_getter( "current_secure_boot", 1, 0) @mock.patch('warnings.warn') def test_rr_off(self, mock_warn): """Remote Restart fields when not RR capable.""" self.call_simple_getter("rr_enabled", None, None) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) mock_warn.reset_mock() self._shared_wrapper.rr_enabled = True mock_warn.assert_called_with(mock.ANY, DeprecationWarning) mock_warn.reset_mock() self.call_simple_getter("rr_enabled", None, None) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) def test_srr(self): self.call_simple_getter("srr_enabled", True, False) self._shared_wrapper.srr_enabled = False self.call_simple_getter("srr_enabled", False, False) def test_get_proc_compat_modes(self): self.call_simple_getter("proc_compat_mode", "POWER6_Plus", None) self.call_simple_getter("pending_proc_compat_mode", "default", None) def test_get_type(self): self.call_simple_getter("env", "AIX/Linux", None) def test_associated_managed_system_uuid(self): self.call_simple_getter("assoc_sys_uuid", EXPECTED_ASSOC_SYSTEM_UUID, None) def test_is_mgmt_partition(self): self.call_simple_getter("is_mgmt_partition", True, False) def test_is_svc_partition(self): self.call_simple_getter("is_service_partition", False, False) self._shared_wrapper.is_service_partition = True self.call_simple_getter("is_service_partition", True, False) def test_keylock_pos(self): self.call_simple_getter("keylock_pos", "normal", None) self._shared_wrapper.keylock_pos = bp.KeylockPos.MANUAL self.call_simple_getter("keylock_pos", "manual", None) with testtools.ExpectedException(ValueError): self._shared_wrapper.keylock_pos = 'frobnicated' def test_bootmode(self): self.call_simple_getter("bootmode", "Normal", None) self._shared_wrapper.bootmode = bp.BootMode.SMS self.call_simple_getter("bootmode", "System_Management_Services", None) with testtools.ExpectedException(ValueError): self._shared_wrapper.bootmode = 'frobnicated' def test_disable_secure_boot(self): self.call_simple_getter("disable_secure_boot", False, False) self._shared_wrapper.disable_secure_boot = True self.call_simple_getter("disable_secure_boot", True, False) def test_allow_perf_data_collection(self): self.call_simple_getter("allow_perf_data_collection", False, False) self._shared_wrapper.allow_perf_data_collection = True self.call_simple_getter("allow_perf_data_collection", True, False) def test_subwrapper_getters(self): wrap = self._shared_wrapper self.assertIsInstance(wrap.capabilities, bp.PartitionCapabilities) self.assertIsInstance(wrap.io_config, bp.PartitionIOConfiguration) self.assertIsInstance(wrap.mem_config, bp.PartitionMemoryConfiguration) proc = wrap.proc_config self.assertIsInstance(proc, bp.PartitionProcessorConfiguration) self.assertIsInstance(proc.shared_proc_cfg, bp.SharedProcessorConfiguration) self.assertIsInstance(proc.dedicated_proc_cfg, bp.DedicatedProcessorConfiguration) def test_can_modifies(self): """Simple check on the 'can_modify_xxx' methods.""" wrap = TestLogicalPartition._shared_wrapper wrap.set_parm_value(bp._BP_STATE, bp.LPARState.RUNNING) wrap.set_parm_value(bp._BP_RMC_STATE, bp.RMCState.ACTIVE) self.assertTrue(wrap.can_modify_io()[0]) self.assertFalse(wrap.can_modify_mem()[0]) self.assertTrue(wrap.can_modify_proc()[0]) def test_can_modify(self): """Detailed testing on the _can_modify method.""" wrap = TestLogicalPartition._shared_wrapper # By default, it will return True because it is a non-activated LPAR self.assertTrue(wrap._can_modify(mock.Mock(), '')[0]) # Turn on the LPAR. Should fail due to RMC wrap.set_parm_value(bp._BP_MGT_PARTITION, False) wrap.set_parm_value(bp._BP_STATE, bp.LPARState.RUNNING) val, reason = wrap._can_modify(mock.Mock(), '') self.assertFalse(val) self.assertTrue('RMC' in reason) # Turn on Management Partition wrap.set_parm_value(bp._BP_MGT_PARTITION, True) val, reason = wrap._can_modify(mock.Mock(), '') self.assertTrue(val) self.assertIsNone(reason) # Turn on RMC, but have the DLPAR return false. wrap.set_parm_value(bp._BP_RMC_STATE, bp.RMCState.ACTIVE) val, reason = wrap._can_modify(None, 'Testing') self.assertFalse(val) self.assertTrue('DLPAR' in reason) self.assertTrue('Testing' in reason) # Turn on DLPAR val, reason = wrap._can_modify(mock.Mock(), '') self.assertTrue(val) self.assertIsNone(reason) # Now turn off RMC but change the LPAR type to OS400. Should be OK. wrap.set_parm_value(bp._BP_RMC_STATE, bp.RMCState.INACTIVE) wrap.set_parm_value(bp._BP_TYPE, bp.LPARType.OS400) val, reason = wrap._can_modify(mock.Mock(), '') self.assertTrue(val) self.assertIsNone(reason) def test_can_lpm(self): """Tests for the can_lpm method.""" wrap = TestLogicalPartition._shared_wrapper # By default, it will return True because it is a non-activated LPAR val, reason = wrap.can_lpm(mock.ANY) self.assertFalse(val) self.assertTrue('active' in reason) # Turn on the LPAR, but make it RMC inactive wrap.set_parm_value(bp._BP_MGT_PARTITION, False) wrap.set_parm_value(bp._BP_STATE, bp.LPARState.RUNNING) wrap.set_parm_value(bp._BP_RMC_STATE, bp.RMCState.INACTIVE) val, reason = wrap.can_lpm(mock.ANY) self.assertFalse(val) self.assertTrue('RMC' in reason) # Turn on RMC, but by default some of the capabilities are off. wrap.set_parm_value(bp._BP_RMC_STATE, bp.RMCState.ACTIVE) val, reason = wrap.can_lpm(mock.ANY) self.assertFalse(val) self.assertTrue('DLPAR' in reason) # Turn on the DLPAR bits. Mem is the only one required as the others # are on in the root XML. wrap.capabilities.set_parm_value(bp._CAP_DLPAR_MEM_CAPABLE, True) val, reason = wrap.can_lpm(mock.ANY) self.assertTrue(val) self.assertIsNone(reason) # Turn on Management Partition wrap.set_parm_value(bp._BP_MGT_PARTITION, True) val, reason = wrap.can_lpm(mock.ANY) self.assertFalse(val) self.assertTrue('management' in reason) def test_can_lpm_ibmi(self): """Tests for the can_lpm method for IBM i branches.""" wrap = TestLogicalPartition._shared_wrapper # Set that it is IBM i wrap.set_parm_value(bp._BP_MGT_PARTITION, False) wrap.set_parm_value(bp._BP_TYPE, bp.LPARType.OS400) wrap.set_parm_value(bp._BP_STATE, bp.LPARState.RUNNING) host_w = mock.MagicMock() # Destination host is not capable for IBMi LPM migr_data = {'ibmi_lpar_mobility_capable': False} val, reason = wrap.can_lpm(host_w, migr_data=migr_data) self.assertFalse(val) self.assertEqual(reason, 'Target system does not have the IBM i ' 'LPAR Mobility Capability.') # Check if restricted I/O is off. migr_data = {'ibmi_lpar_mobility_capable': True} wrap.set_parm_value(lpar._LPAR_RESTRICTED_IO, 'False') val, reason = wrap.can_lpm(host_w, migr_data=migr_data) self.assertFalse(val) self.assertIn('restricted I/O', reason) # Turn restricted I/O on, but get a host without the mobility cap wrap.set_parm_value(lpar._LPAR_RESTRICTED_IO, 'True') host_w = mock.MagicMock() host_w.get_capability.return_value = False val, reason = wrap.can_lpm(host_w, migr_data=migr_data) self.assertFalse(val) self.assertEqual('Source system does not have the IBM i LPAR ' 'Mobility Capability.', reason) # Turn all required capabilities on host_w.get_capability.return_value = True wrap.capabilities.set_parm_value(bp._CAP_DLPAR_MEM_CAPABLE, True) val, reason = wrap.can_lpm(host_w, migr_data=migr_data) self.assertTrue(val) self.assertIsNone(reason) # Turn all required capabilities on but migration data is empty val, reason = wrap.can_lpm(host_w) self.assertTrue(val) self.assertIsNone(reason) # Turn all required capabilities on but migration data doesn't contain # the key 'ibmi_lpar_mobility_capable' migr_data = {} val, reason = wrap.can_lpm(host_w, migr_data=migr_data) self.assertTrue(val) self.assertIsNone(reason) def test_capabilities(self): # PartitionCapabilities self.call_simple_getter("capabilities.io_dlpar", True, False) self.call_simple_getter("capabilities.mem_dlpar", False, False) self.call_simple_getter("capabilities.proc_dlpar", True, False) def test_get_proc_mode(self): # PartitionProcessorConfiguration self.call_simple_getter( "proc_config.has_dedicated", False, False) self.call_simple_getter( "proc_config.has_dedicated", True, False, use_dedicated=True) self._dedicated_wrapper.proc_config._has_dedicated(False) self.call_simple_getter( "proc_config.has_dedicated", False, False, use_dedicated=True) def test_get_current_sharing_mode(self): # SharedProcessorConfiguration self.call_simple_getter("proc_config.sharing_mode", "uncapped", None) self._shared_wrapper.proc_config.sharing_mode = "keep idle procs" self.call_simple_getter("proc_config.sharing_mode", "keep idle procs", None) def test_desired_units(self): self.call_simple_getter("proc_config.shared_proc_cfg.desired_units", 1.5, None) self._shared_wrapper.proc_config.shared_proc_cfg.desired_units = 1.75 self.call_simple_getter("proc_config.shared_proc_cfg.desired_units", 1.75, None) def test_max_units(self): self.call_simple_getter("proc_config.shared_proc_cfg.max_units", 2.5, None) self._shared_wrapper.proc_config.shared_proc_cfg.max_units = 1.75 self.call_simple_getter("proc_config.shared_proc_cfg.max_units", 1.75, None) def test_min_units(self): self.call_simple_getter("proc_config.shared_proc_cfg.min_units", 0.5, None) self._shared_wrapper.proc_config.shared_proc_cfg.min_units = 1.75 self.call_simple_getter("proc_config.shared_proc_cfg.min_units", 1.75, None) def test_desired_virtual(self): self.call_simple_getter("proc_config.shared_proc_cfg.desired_virtual", 2, None) self._shared_wrapper.proc_config.shared_proc_cfg.desired_virtual = 5 self.call_simple_getter("proc_config.shared_proc_cfg.desired_virtual", 5, None) def test_max_virtual(self): self.call_simple_getter("proc_config.shared_proc_cfg.max_virtual", 3, None) self._shared_wrapper.proc_config.shared_proc_cfg.max_virtual = 2 self.call_simple_getter("proc_config.shared_proc_cfg.max_virtual", 2, None) def test_min_virtual(self): self.call_simple_getter("proc_config.shared_proc_cfg.min_virtual", 1, None) self._shared_wrapper.proc_config.shared_proc_cfg.min_virtual = 2 self.call_simple_getter("proc_config.shared_proc_cfg.min_virtual", 2, None) def test_get_shared_proc_pool_id(self): self.call_simple_getter("proc_config.shared_proc_cfg.pool_id", 9, 0) self._shared_wrapper.proc_config.shared_proc_cfg.pool_id = 2 self.call_simple_getter("proc_config.shared_proc_cfg.pool_id", 2, 0) def test_uncapped_weight(self): self.call_simple_getter("proc_config.shared_proc_cfg.uncapped_weight", 128, 0) self._shared_wrapper.proc_config.shared_proc_cfg.uncapped_weight = 100 self.call_simple_getter("proc_config.shared_proc_cfg.uncapped_weight", 100, 0) # DedicatedProcessorConfiguration def test_desired(self): self.call_simple_getter("proc_config.dedicated_proc_cfg.desired", 2, 0, use_dedicated=True) self._dedicated_wrapper.proc_config.dedicated_proc_cfg.desired = 3 self.call_simple_getter("proc_config.dedicated_proc_cfg.desired", 3, 0, use_dedicated=True) def test_max(self): self.call_simple_getter("proc_config.dedicated_proc_cfg.max", 3, 0, use_dedicated=True) self._dedicated_wrapper.proc_config.dedicated_proc_cfg.max = 4 self.call_simple_getter("proc_config.dedicated_proc_cfg.max", 4, 0, use_dedicated=True) def test_min(self): self.call_simple_getter("proc_config.dedicated_proc_cfg.min", 1, 0, use_dedicated=True) self._dedicated_wrapper.proc_config.dedicated_proc_cfg.min = 3 self.call_simple_getter("proc_config.dedicated_proc_cfg.min", 3, 0, use_dedicated=True) def test_nvram(self): self.assertEqual("TheNVRAMis20KofBASE64encodedDATA", self._dedicated_wrapper.nvram) self._dedicated_wrapper.nvram = "RRNVRAMis20KofBASE64encodedDATA" self.assertEqual("RRNVRAMis20KofBASE64encodedDATA", self._dedicated_wrapper.nvram) # Test setting one that's absent self.assertIsNone(self._shared_wrapper.nvram) self._shared_wrapper.nvram = 'SomeOtherValue' self.assertEqual('SomeOtherValue', self._shared_wrapper.nvram) def test_uptime(self): self.assertEqual(1185681, self._dedicated_wrapper.uptime) class TestIBMiSpecific(twrap.TestWrapper): """IBMi-specific tests, requiring a test file from an IBMi partition.""" file = IBMI_HTTPRESP_FILE wrapper_class_to_test = lpar.LPAR def test_restricted_io(self): self.dwrap.restrictedio = True self.assertTrue(self.dwrap.restrictedio) def test_desig_ipl_src(self): self.assertEqual('b', self.dwrap.desig_ipl_src) self.dwrap.desig_ipl_src = 'c' self.assertEqual('c', self.dwrap.desig_ipl_src) # Argh, testtools.TestCase overrides assertRaises - can't use 'with' try: self.dwrap.desig_ipl_src = 'q' self.fail() except ValueError: pass def test_tagged_io(self): # Getter tio = self.dwrap.io_config.tagged_io self.assertIsInstance(tio, bp.TaggedIO) # Field getters & setters self.assertEqual('NONE', tio.alt_load_src) tio.alt_load_src = 34 self.assertEqual('34', tio.alt_load_src) self.assertEqual('0', tio.console) tio.console = 'NONE' self.assertEqual('NONE', tio.console) self.assertEqual('NONE', tio.load_src) tio.load_src = '56' self.assertEqual('56', tio.load_src) # _bld honors child ordering new_tio = bp.TaggedIO.bld(self.adpt) new_tio.load_src = 1 new_tio.alt_load_src = 2 new_tio.console = 3 self.assertEqual( '231'.encode('utf-8'), new_tio.toxmlstring()) # Setter self.dwrap.io_config.tagged_io = new_tio self.assertEqual('3', self.dwrap.io_config.tagged_io.console) new_tio = bp.TaggedIO.bld(self.adpt) self.assertEqual( 'NONEHMC0'.encode('utf-8'), new_tio.toxmlstring()) @mock.patch('warnings.warn') def test_rr_real_values(self, mock_warn): """Test Remote Restart fields when RR capable.""" # Testing this under IBMi because the IBMi payload file happens to have # real data to use. self.assertIsNone(self.dwrap.rr_enabled) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) mock_warn.reset_mock() self.assertIsNone(self.dwrap.rr_state) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) class TestPartitionIOConfiguration(twrap.TestWrapper): file = LPAR_HTTPRESP_FILE wrapper_class_to_test = lpar.LPAR def setUp(self): super(TestPartitionIOConfiguration, self).setUp() self.io_config = self.entries[0].io_config def test_max_slots(self): self.assertEqual(64, self.io_config.max_virtual_slots) def test_io_slots(self): # IO Slots are typically associated with the VIOS. Further testing # driven there. self.assertIsNotNone(self.io_config.io_slots) self.assertEqual(0, len(self.io_config.io_slots)) class TestMemCfg(twrap.TestWrapper): """Test cases to test the lpar mem operations.""" file = LPAR_HTTPRESP_FILE wrapper_class_to_test = lpar.LPAR def setUp(self): super(TestMemCfg, self).setUp() self.mem_config = self.entries[0].mem_config def test_mem(self): mem_wrap = bp.PartitionMemoryConfiguration.bld( None, 1024, min_mem=512, max_mem=2048) self.assertIsNotNone(mem_wrap) self.assertEqual(512, mem_wrap.min) self.assertEqual(1024, mem_wrap.desired) self.assertEqual(2048, mem_wrap.max) self.assertEqual(0, mem_wrap.exp_factor) self.assertFalse(mem_wrap.ame_enabled) def test_current_mem(self): self.assertEqual(512, self.mem_config.current) class TestIOCfg(twrap.TestWrapper): """Test the lpar I/O configuration.""" file = LPAR_HTTPRESP_FILE wrapper_class_to_test = lpar.LPAR def setUp(self): super(TestIOCfg, self).setUp() self.io_config = self.entries[0].io_config def test_bld(self): # No slots io_wrap = bp.PartitionIOConfiguration.bld(None, 10) self.assertEqual(10, io_wrap.max_virtual_slots) self.assertEqual([], io_wrap.io_slots) # With slots slot_wraps = [bp.IOSlot.bld(None, True, 1234), bp.IOSlot.bld(None, False, 4321)] io_wrap = bp.PartitionIOConfiguration.bld(None, 12, slot_wraps) self.assertEqual(len(slot_wraps), len(io_wrap.io_slots)) for exp, act in zip(slot_wraps, io_wrap.io_slots): self.assertEqual(exp.drc_index, act.drc_index) self.assertEqual(exp.bus_grp_required, act.bus_grp_required) def test_data(self): self.assertEqual(64, self.io_config.max_virtual_slots) self.assertEqual([], self.io_config.io_slots) class TestPhysFCPort(unittest.TestCase): def test_bld(self): port = bp.PhysFCPort.bld_ref(None, 'fcs0') self.assertIsNotNone(port) self.assertEqual('fcs0', port.name) if __name__ == "__main__": unittest.main() pypowervm-1.1.24/pypowervm/tests/wrappers/test_cdata.py0000664000175000017500000000522613571367171023017 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.wrappers.job as jwrap CORRECT_CDATA_PARAMVAL = ( '1' 'trueIBM' '6005076802810B0FD00000000000049F042145' '21000024FF409CD0500507680245CAC0' '6000000000000]]>' '' "\n\n ").encode("utf-8") CORRECT_CDATA_CONTENT = ( '1true' 'IBM' '6005076802810B0FD00000000000049F042145' '21000024FF409CD0500507680245CAC0' '6000000000000') class TestCDATA(twrap.TestWrapper): file = 'cdata.xml' wrapper_class_to_test = jwrap.Job """Verify CDATA segments survive going into and out of the Adapter.""" def test_cdata_request(self): pval = self.dwrap.entry.element.find( 'JobRequestInstance/JobParameters/JobParameter/ParameterValue') out = pval.toxmlstring() self.assertEqual(out, CORRECT_CDATA_PARAMVAL, "CDATA was not preserved in JobRequest!\n%s" % out) def test_cdata_results(self): resdict = self.dwrap.get_job_results_as_dict() out = resdict['inputXML'] self.assertEqual(out, CORRECT_CDATA_CONTENT, "CDATA was not preserved in Results!\n%s" % out) if __name__ == '__main__': unittest.main() pypowervm-1.1.24/pypowervm/tests/wrappers/test_wrapper_properties.py0000664000175000017500000001023713571367171025675 0ustar neoneo00000000000000# Copyright 2016, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for properties of EntryWrapper/ElementWrapper subclasses.""" import testtools import pypowervm.const as c from pypowervm.utils import wrappers as wutil from pypowervm.wrappers import base_partition as bp from pypowervm.wrappers import enterprise_pool as epool from pypowervm.wrappers import entry_wrapper as ewrap from pypowervm.wrappers import iocard from pypowervm.wrappers import logical_partition as lpar from pypowervm.wrappers import network from pypowervm.wrappers import virtual_io_server as vios class TestXAGs(testtools.TestCase): def verify_xags(self, wcls, expected_xags): """Verify extended attribute groups for properties of a wrapper class. :param wcls: The pypowervm.wrappers.entry_wrapper.Wrapper subclass to test. :param expected_xags: A dict mapping wcls's property names to their respective extended attribute group names. Can (should) only include those properties for which an extended attribute group is registered. (If it contains any other properties, the value must be None.) Format is { prop_name: xag_name } """ for prop in dir(wcls): actual = wcls.get_xag_for_prop(prop) expected = expected_xags.get(prop, None) self.assertEqual(expected, actual, message="%s.%s" % (wcls.__name__, prop)) def test_xags(self): """Verify xags associated with properties of wrapper classes.""" # The following wrapper classes have no properties with xags for wcls in wutil.wrapper_class_iter(): if wcls is vios.VIOS: self.verify_xags(wcls, { 'media_repository': c.XAG.VIO_STOR, 'ip_addresses': c.XAG.VIO_NET, 'vfc_mappings': c.XAG.VIO_FMAP, 'scsi_mappings': c.XAG.VIO_SMAP, 'seas': c.XAG.VIO_NET, 'trunk_adapters': c.XAG.VIO_NET, 'phys_vols': c.XAG.VIO_STOR, 'io_adpts_for_link_agg': c.XAG.VIO_NET, 'nvram': c.XAG.NVRAM, }) elif wcls is epool.Pool: self.verify_xags(wcls, { 'compliance_hours_left': c.XAG.ADV }) elif wcls is epool.PoolMember: self.verify_xags(wcls, { 'proc_compliance_hours_left': c.XAG.ADV, 'mem_compliance_hours_left': c.XAG.ADV }) elif wcls in (bp.BasePartition, lpar.LPAR): self.verify_xags(wcls, { 'nvram': c.XAG.NVRAM }) elif wcls in (network.CNA, iocard.VNIC, iocard._VNICDetails): self.verify_xags(wcls, { 'ip_address': c.XAG.ADV, 'subnet_mask': c.XAG.ADV, 'gateway': c.XAG.ADV }) # Include an elif for each Wrapper subclass that has xags defined. else: self.verify_xags(wcls, {}) def test_wrapper_registration(self): """All wrapper subclasses must be registered via [base_]pvm_type.""" for wcls in wutil.wrapper_class_iter(): if wcls not in (ewrap.Wrapper, ewrap.ElementWrapper, ewrap.EntryWrapper): self.assertTrue(wcls._registered, "%s not registered" % wcls.__name__) pypowervm-1.1.24/pypowervm/tests/wrappers/test_shared_proc_pool.py0000664000175000017500000000414313571367171025262 0ustar neoneo00000000000000# Copyright 2014, 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.wrappers.shared_proc_pool as spp SHRPROC_HTTPRESP_FILE = "shrprocpool.txt" class TestShrPrcPoolTestCase(twrap.TestWrapper): file = 'shrprocpool.txt' wrapper_class_to_test = spp.SharedProcPool def test_validate_attribues(self): # First element from the feed is the default pool self.assertEqual('DefaultPool', self.dwrap.name) self.assertEqual(0, self.dwrap.id) self.assertEqual(0, self.dwrap.curr_rsrv_proc_units) self.assertTrue(self.dwrap.is_default) self.assertEqual(0, self.dwrap.max_proc_units) self.assertEqual(0, self.dwrap.pend_rsrv_proc_units) self.assertEqual(0, self.dwrap.avail_proc_units) # The second pool is non-default. n_spp = self.entries[1] self.assertEqual('SharedPool01', n_spp.name) self.assertEqual(1, n_spp.id) self.assertEqual(5.35, n_spp.curr_rsrv_proc_units) self.assertFalse(n_spp.is_default) self.assertEqual(10.25, n_spp.max_proc_units) self.assertEqual(6.15, n_spp.pend_rsrv_proc_units) self.assertEqual(5.05, n_spp.avail_proc_units) def test_setters(self): self.dwrap.name = 'new' self.assertEqual('new', self.dwrap.name) self.dwrap.max_proc_units = 5.5 self.assertEqual(5.5, self.dwrap.max_proc_units) self.dwrap.pend_rsrv_proc_units = 4.3 self.assertEqual(4.3, self.dwrap.pend_rsrv_proc_units) pypowervm-1.1.24/pypowervm/tests/wrappers/pcm/0000775000175000017500000000000013571367172021105 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/wrappers/pcm/test_phyp.py0000664000175000017500000001536113571367171023503 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the raw PHYP long term metrics.""" import testtools from pypowervm.tests.test_utils import pvmhttp from pypowervm.wrappers.pcm import phyp as pcm_phyp PHYP_DATA = 'phyp_pcm_data.txt' class TestPhypLTM(testtools.TestCase): def setUp(self): super(TestPhypLTM, self).setUp() self.raw_json = pvmhttp.PVMFile(PHYP_DATA).body def test_parse(self): info = pcm_phyp.PhypInfo(self.raw_json) self.assertIsNotNone(info) # Validate the info self.assertEqual('1.3.0', info.info.version) self.assertEqual('Raw', info.info.metric_type) self.assertEqual('LTM', info.info.monitoring_type) self.assertEqual('8247-22L*2125D4A', info.info.mtms) self.assertEqual('dev-4', info.info.name) # Validate some samples sample = info.sample self.assertEqual(806297258933150.0, sample.time_based_cycles) self.assertEqual(0, sample.status) self.assertEqual(u'2015-05-27T08:17:45+0000', sample.time_stamp) # Firmware self.assertEqual(58599310268, sample.system_firmware.utilized_proc_cycles) self.assertEqual(4096, sample.system_firmware.assigned_mem) # Shared Proc Pool spp_list = sample.shared_proc_pools self.assertEqual(1, len(spp_list)) self.assertEqual(0, spp_list[0].id) self.assertEqual('DefaultPool', spp_list[0].name) self.assertEqual(1.6125945162342e+16, spp_list[0].assigned_proc_cycles) self.assertEqual(683011326288, spp_list[0].utilized_pool_cycles) self.assertEqual(20, spp_list[0].max_proc_units) self.assertEqual(18, spp_list[0].borrowed_pool_proc_units) # Processor self.assertEqual(20, sample.processor.total_proc_units) self.assertEqual(20, sample.processor.configurable_proc_units) self.assertEqual(18.9, sample.processor.available_proc_units) self.assertEqual(512000000, sample.processor.proc_cycles_per_sec) # Memory self.assertEqual(65536, sample.memory.total_mem) self.assertEqual(32512, sample.memory.available_mem) self.assertEqual(65536, sample.memory.configurable_mem) # LPARs self.assertEqual(5, len(sample.lpars)) # First LPAR shouldn't have network or storage (inactive) bad_lpar = sample.lpars[0] self.assertEqual(6, bad_lpar.id) self.assertEqual('2545BCC5-BAE8-4414-AD49-EAFC2DEE2546', bad_lpar.uuid) self.assertEqual('aixlinux', bad_lpar.type) self.assertEqual('fkh4-99b8fdca-kyleh', bad_lpar.name) self.assertEqual('Not Activated', bad_lpar.state) self.assertEqual(100, bad_lpar.affinity_score) self.assertIsNotNone(bad_lpar.memory) self.assertIsNotNone(bad_lpar.processor) self.assertEqual(None, bad_lpar.network) self.assertEqual(None, bad_lpar.storage) # Last LPAR should have network and storage good_lpar = sample.lpars[4] # VM Memory self.assertEqual(20480, good_lpar.memory.logical_mem) self.assertEqual(20480, good_lpar.memory.backed_physical_mem) # VM Processor self.assertEqual(0, good_lpar.processor.pool_id) self.assertEqual('uncap', good_lpar.processor.mode) self.assertEqual(4, good_lpar.processor.virt_procs) self.assertEqual(.4, good_lpar.processor.proc_units) self.assertEqual(128, good_lpar.processor.weight) self.assertEqual(1765629232513, good_lpar.processor.entitled_proc_cycles) self.assertEqual(264619289721, good_lpar.processor.util_cap_proc_cycles) self.assertEqual(641419282, good_lpar.processor.util_uncap_proc_cycles) self.assertEqual(0, good_lpar.processor.idle_proc_cycles) self.assertEqual(0, good_lpar.processor.donated_proc_cycles) self.assertEqual(0, good_lpar.processor.time_wait_dispatch) self.assertEqual(160866895489, good_lpar.processor.total_instructions) self.assertEqual(193139925064, good_lpar.processor.total_inst_exec_time) # VM Vea vea = good_lpar.network.veas[0] self.assertEqual(2227, vea.vlan_id) self.assertEqual(0, vea.vswitch_id) self.assertEqual('U8247.22L.2125D4A-V2-C2', vea.physical_location) self.assertEqual(True, vea.is_pvid) self.assertEqual(10, vea.received_packets) self.assertEqual(100, vea.sent_packets) self.assertEqual(5, vea.dropped_packets) self.assertEqual(100, vea.sent_bytes) self.assertEqual(10000, vea.received_bytes) self.assertEqual(0, vea.received_physical_packets) self.assertEqual(0, vea.sent_physical_packets) self.assertEqual(0, vea.dropped_physical_packets) self.assertEqual(0, vea.sent_physical_bytes) self.assertEqual(0, vea.received_physical_bytes) # TODO(thorst) Test SR-IOV # VM storage stor = good_lpar.storage.v_stor_adpts[0] self.assertEqual('U8247.22L.2125D4A-V2-C3', stor.physical_location) self.assertEqual(1, stor.vios_id) self.assertEqual(1000, stor.vios_slot) # Test that VFC adapter has been parsed. self.assertIsNotNone(good_lpar.storage.v_fc_adpts) self.assertEqual(2, len(good_lpar.storage.v_fc_adpts)) # Test 1st VFC adapter vfc_adpt = good_lpar.storage.v_fc_adpts[0] self.assertEqual('U8247.22L.2125D4A-V2-C2', vfc_adpt.physical_location) self.assertEqual(2, vfc_adpt.vios_id) self.assertEqual(2, len(vfc_adpt.wwpn_pair)) self.assertIn(13857705835384867080, vfc_adpt.wwpn_pair) self.assertIn(13857705835384867081, vfc_adpt.wwpn_pair) # Test 2nd VFC adapter vfc_adpt = good_lpar.storage.v_fc_adpts[1] self.assertEqual('U8247.22L.2125D4A-V2-C3', vfc_adpt.physical_location) self.assertEqual(1, vfc_adpt.vios_id) self.assertEqual(2, len(vfc_adpt.wwpn_pair)) self.assertIn(13857705835384867082, vfc_adpt.wwpn_pair) self.assertIn(13857705835384867083, vfc_adpt.wwpn_pair) # TODO(thorst) Test vfc pypowervm-1.1.24/pypowervm/tests/wrappers/pcm/test_vios.py0000664000175000017500000001424413571367171023502 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the raw VIOS long term metrics.""" import testtools from pypowervm.tests.test_utils import pvmhttp from pypowervm.wrappers.pcm import vios as pcm_vios VIOS_DATA = 'vios_pcm_data.txt' VIOS_DATA_SPARSE = 'vios_pcm_data_sparse.txt' class TestViosLTM(testtools.TestCase): def setUp(self): super(TestViosLTM, self).setUp() self.raw_json = pvmhttp.PVMFile(VIOS_DATA).body def test_parse(self): info = pcm_vios.ViosInfo(self.raw_json) self.assertIsNotNone(info) # Validate the info self.assertEqual('1.0.0', info.info.version) self.assertEqual('Raw', info.info.metric_type) self.assertEqual('LTM', info.info.monitoring_type) self.assertEqual('8247-22L*2125D4A', info.info.mtms) # Validate some samples sample = info.sample self.assertEqual(u'2015-05-27T00:22:00+0000', sample.time_stamp) self.assertEqual(1, sample.id) self.assertEqual('IOServer - SN2125D4A', sample.name) # Validate Memory self.assertEqual(1715, sample.mem.utilized_mem) # Validate the Network self.assertEqual(6, len(sample.network.adpts)) self.assertEqual(1, len(sample.network.seas)) phys_dev = sample.network.adpts[1] self.assertEqual('ent0', phys_dev.name) self.assertEqual('physical', phys_dev.type) self.assertEqual('U78CB.001.WZS007Y-P1-C10-T1', phys_dev.physical_location) self.assertEqual(1703083, phys_dev.received_packets) self.assertEqual(65801, phys_dev.sent_packets) self.assertEqual(0, phys_dev.dropped_packets) self.assertEqual(187004823, phys_dev.received_bytes) self.assertEqual(71198950, phys_dev.sent_bytes) # SEA validation sea = sample.network.seas[0] self.assertEqual('ent6', sea.name) self.assertEqual('sea', sea.type) self.assertEqual('U8247.22L.2125D4A-V1-C12-T1', sea.physical_location) self.assertEqual(0, sea.received_packets) self.assertEqual(0, sea.sent_packets) self.assertEqual(0, sea.dropped_packets) self.assertEqual(0, sea.received_bytes) self.assertEqual(0, sea.sent_bytes) self.assertEqual(['ent3', 'ent5'], sea.bridged_adpts) # Storage - FC Validation fc = sample.storage.fc_adpts[0] self.assertEqual('fcs0', fc.name) self.assertEqual('21000024ff649104', fc.wwpn) self.assertEqual('U78CB.001.WZS007Y-P1-C3-T1', fc.physical_location) self.assertEqual(0, fc.num_reads) self.assertEqual(0, fc.num_writes) self.assertEqual(0, fc.read_bytes) self.assertEqual(0, fc.write_bytes) self.assertEqual(8, fc.running_speed) # VFC Validation vfc = sample.storage.fc_adpts[1].ports[0] self.assertEqual("vfc1", vfc.name) self.assertEqual("21000024ff649159", vfc.wwpn) self.assertEqual(1234, vfc.num_reads) self.assertEqual(1235, vfc.num_writes) self.assertEqual(184184, vfc.read_bytes) self.assertEqual(138523, vfc.write_bytes) self.assertEqual(8, vfc.running_speed) self.assertEqual("U78CB.001.WZS007Y-P1-C3-T2000", vfc.physical_location) # Physical Adpt Validation padpt = sample.storage.phys_adpts[0] self.assertEqual('sissas0', padpt.name) self.assertEqual('U78CB.001.WZS007Y-P1-C14-T1', padpt.physical_location) self.assertEqual(1089692, padpt.num_reads) self.assertEqual(1288936, padpt.num_writes) self.assertEqual(557922304, padpt.read_bytes) self.assertEqual(659935232, padpt.write_bytes) self.assertEqual('sas', padpt.type) # Storage Virtual Adapter Validation vadpt = sample.storage.virt_adpts[0] self.assertEqual('vhost5', vadpt.name) self.assertEqual('U8247.22L.2125D4A-V1-C7', vadpt.physical_location) self.assertEqual(0, vadpt.num_reads) self.assertEqual(1, vadpt.num_writes) self.assertEqual(0, vadpt.read_bytes) self.assertEqual(512, vadpt.write_bytes) self.assertEqual('virtual', vadpt.type) # SSP Validation ssp = sample.storage.ssps[0] self.assertEqual('ssp1', ssp.name) self.assertEqual(["sissas0"], ssp.pool_disks) self.assertEqual(12346, ssp.num_reads) self.assertEqual(17542, ssp.num_writes) self.assertEqual(18352435, ssp.total_space) self.assertEqual(123452, ssp.used_space) self.assertEqual(123825, ssp.read_bytes) self.assertEqual(375322, ssp.write_bytes) class TestViosLTMSparse(testtools.TestCase): def setUp(self): super(TestViosLTMSparse, self).setUp() self.raw_json = pvmhttp.PVMFile(VIOS_DATA_SPARSE).body def test_parse(self): info = pcm_vios.ViosInfo(self.raw_json) self.assertIsNotNone(info) # Validate the info self.assertEqual('1.0.0', info.info.version) self.assertEqual('Raw', info.info.metric_type) self.assertEqual('LTM', info.info.monitoring_type) self.assertEqual('8247-22L*2125D4A', info.info.mtms) # Validate some samples sample = info.sample self.assertEqual(u'2015-05-27T00:22:00+0000', sample.time_stamp) self.assertEqual(1, sample.id) self.assertEqual('IOServer - SN2125D4A', sample.name) # Validate Memory self.assertIsNone(sample.mem) # Validate the Network self.assertIsNone(sample.network) # Validate the Storage self.assertIsNone(sample.storage) pypowervm-1.1.24/pypowervm/tests/wrappers/pcm/__init__.py0000664000175000017500000000000013571367171023203 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/wrappers/pcm/test_lpar.py0000664000175000017500000000744713571367171023467 0ustar neoneo00000000000000# Copyright 2016, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the raw LPAR long term metrics.""" import json from pypowervm.tests.test_utils import pvmhttp from pypowervm.wrappers.pcm import lpar as pcm_lpar import testtools LPAR_DATA = 'lpar_pcm_data.txt' class TestLparLTM(testtools.TestCase): def setUp(self): super(TestLparLTM, self).setUp() self.raw_json = pvmhttp.PVMFile(LPAR_DATA).body def test_parse(self): info = pcm_lpar.LparInfo(self.raw_json) self.assertIsNotNone(info) # Validate the Lpar metrics. # There are metrics for four Lpars. self.assertEqual(6, len(info.lpars_util)) # Get the first Lpar and assert its metrics lpar = info.lpars_util[0] self.assertEqual("Ubuntu1410", lpar.name) self.assertIsNotNone(lpar.memory) self.assertEqual(80, lpar.memory.pct_real_mem_avbl) self.assertEqual(1024, lpar.memory.total_pg_count) self.assertEqual(512, lpar.memory.free_pg_count) self.assertEqual(64, lpar.memory.active_pg_count) self.assertEqual(1048576, lpar.memory.real_mem_size_bytes) self.assertEqual(61, lpar.memory.pct_real_mem_free) self.assertEqual(25, lpar.memory.vm_pg_out_rate) # Get 3rd(random) VM and assert its metrics lpar = info.lpars_util[2] self.assertEqual("test_vm3", lpar.name) self.assertIsNotNone(lpar.memory) self.assertEqual(82, lpar.memory.pct_real_mem_avbl) self.assertEqual(4096, lpar.memory.total_pg_count) self.assertEqual(2048, lpar.memory.free_pg_count) self.assertEqual(256, lpar.memory.active_pg_count) self.assertEqual(1048576, lpar.memory.real_mem_size_bytes) self.assertEqual(60, lpar.memory.pct_real_mem_free) self.assertEqual(0, lpar.memory.vm_pg_out_rate) # Assert that we have entries in JSON for VMs which were in error metric_json = json.loads(self.raw_json) self.assertEqual("3B0237F9-26F1-41C7-BE57-A08C9452AD9D", metric_json['lparUtil'][4]['name']) self.assertEqual("vm_inactive_rmc", metric_json['lparUtil'][5]['name']) # Assert that powered off VM has 100 percent free memory. lpar = info.lpars_util[4] self.assertEqual("3B0237F9-26F1-41C7-BE57-A08C9452AD9D", lpar.name) self.assertIsNotNone(lpar.memory) self.assertIsNone(lpar.memory.pct_real_mem_avbl) self.assertIsNone(lpar.memory.total_pg_count) self.assertIsNone(lpar.memory.free_pg_count) self.assertIsNone(lpar.memory.active_pg_count) self.assertIsNone(lpar.memory.real_mem_size_bytes) self.assertEqual(100, lpar.memory.pct_real_mem_free) # Assert that LPAR with inactive RMC has no free memory. lpar = info.lpars_util[5] self.assertEqual("vm_inactive_rmc", lpar.name) self.assertIsNotNone(lpar.memory) self.assertIsNone(lpar.memory.pct_real_mem_avbl) self.assertIsNone(lpar.memory.total_pg_count) self.assertIsNone(lpar.memory.free_pg_count) self.assertIsNone(lpar.memory.active_pg_count) self.assertIsNone(lpar.memory.real_mem_size_bytes) self.assertEqual(0, lpar.memory.pct_real_mem_free) pypowervm-1.1.24/pypowervm/tests/wrappers/test_http_error.py0000664000175000017500000000611613571367171024132 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import mock from pypowervm.tests.test_utils import pvmhttp import pypowervm.wrappers.http_error as he HTTPRESP_FILE = "fake_httperror.txt" MSG = ('Unexpected error occurred while fetching Cluster/SSP ' 'information : 9999-99Z*2125D4A/1 : Unable to send com' 'mand to VIOS at this moment. VIOS 1*9999-99Z*2125D4A ' 'is busy processing some other request. Please retry t' 'he operation after sometime.') MSG2 = ('Error occurred while querying for Adapter from VIOS vios1 with ID 2 ' 'in System 9119-MHE*1085B07 - The system is currently too busy to ' 'complete the specified request. Please retry the operation at a ' 'later time. If the operation continues to fail, check the error log ' 'to see if the filesystem is full.') REASON_CODE = 'Unknown internal error.' class TestHttpError(unittest.TestCase): def setUp(self): super(TestHttpError, self).setUp() self.http_error = pvmhttp.load_pvm_resp(HTTPRESP_FILE) self.assertIsNotNone(self.http_error, "Could not load %s " % HTTPRESP_FILE) def test_wrap(self): wrap = he.HttpError.wrap(self.http_error.response.entry) self.assertEqual(wrap.message, MSG) self.assertEqual(wrap.status, 500) self.assertEqual(wrap.reason_code, REASON_CODE) self.assertTrue(wrap.is_vios_busy()) # Ensure it's checking for 500 only. with mock.patch.object(wrap, '_get_val_int', return_value=555): self.assertFalse(wrap.is_vios_busy()) # Ensure it's checking for 'VIOS' string. with mock.patch.object(wrap, '_get_val_str', return_value='other'): self.assertFalse(wrap.is_vios_busy()) # Ensure it finds 'VIOS' but not the other string. with mock.patch.object(wrap, '_get_val_str', return_value='VIOS xxx'): self.assertFalse(wrap.is_vios_busy()) # Ensure it finds 'HSCL' return code. msg_string = 'msg HSCL3205 msg' with mock.patch.object(wrap, '_get_val_str', return_value=msg_string): self.assertTrue(wrap.is_vios_busy()) msg_string = 'msg VIOS0014 msg' with mock.patch.object(wrap, '_get_val_str', return_value=msg_string): self.assertTrue(wrap.is_vios_busy()) # Ensure we find the new message with mock.patch.object(wrap, '_get_val_str', return_value=MSG2): self.assertTrue(wrap.is_vios_busy()) pypowervm-1.1.24/pypowervm/tests/wrappers/test_event.py0000664000175000017500000000507413571367171023065 0ustar neoneo00000000000000# Copyright 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import pypowervm.tests.test_utils.test_wrapper_abc as twrap from pypowervm.wrappers import event class TestEvent(twrap.TestWrapper): file = 'event_feed.txt' wrapper_class_to_test = event.Event @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.get') def test_get(self, mock_ewrap_get): event.Event.get('adap', 'appid') mock_ewrap_get.assert_called_once_with('adap', xag=[], add_qp=[ ('QUEUE_CLIENTKEY_METHOD', 'USE_APPLICATIONID'), ('QUEUE_APPLICATIONID', 'appid')]) def test_getters(self): ev1, ev2 = self.entries[:2] self.assertEqual('510ae1e6-3e86-34c6-bf4c-c638e76a5f68', ev1.uuid) self.assertEqual(event.EventType.ADD_URI, ev1.etype) self.assertEqual('1473962006548', ev1.eid) self.assertEqual('http://localhost:12080/rest/api/uom/ManagedSystem/1c' 'ab7366-6b73-342c-9f43-ddfeb9f8edd3/LogicalPartition/' '1E6FC741-6253-4B69-B88B-8A44BED92145', ev1.data) self.assertIsNone(ev1.detail) self.assertEqual(event.EventType.MODIFY_URI, ev2.etype) self.assertEqual('Other', ev2.detail) def test_bld(self): evt = event.Event.bld('adap', None, None) self.assertIsInstance(evt, event.Event) self.assertEqual('adap', evt.adapter) self.assertIsNone(evt.data) self.assertIsNone(evt.detail) evt = event.Event.bld('adap2', 'data', 'detail') self.assertIsInstance(evt, event.Event) self.assertEqual('adap2', evt.adapter) self.assertEqual('data', evt.data) self.assertEqual('detail', evt.detail) def test_str(self): self.assertEqual( 'Event(id=1473962006548, type=ADD_URI, data=http://localhost:12080' '/rest/api/uom/ManagedSystem/1cab7366-6b73-342c-9f43-ddfeb9f8edd3/' 'LogicalPartition/1E6FC741-6253-4B69-B88B-8A44BED92145, detail=Non' 'e)', str(self.dwrap)) pypowervm-1.1.24/pypowervm/tests/wrappers/test_job.py0000664000175000017500000003441313571367171022515 0ustar neoneo00000000000000# Copyright 2014, 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import pypowervm.exceptions as ex import pypowervm.tests.test_fixtures as fx from pypowervm.tests.test_utils import pvmhttp import pypowervm.wrappers.job as jwrap JOB_REQUEST_FILE = "job_request_power_off.txt" JOB_RESPONSE_OK = "job_response_completed_ok.txt" JOB_RESPONSE_FAILED = "job_response_completed_failed.txt" JOB_RESPONSE_EXCEPTION = "job_response_exception.txt" EXPECTED_ID = '1375391227297' EXPECTED_STATUS = jwrap.JobStatus.COMPLETED_WITH_ERROR EXPECTED_EXCEPTION_MESSAGE = 'This is an exception message' EXPECTED_RESULTS_VALUE = 'This is an error message' EXPECTED_GROUP_NAME = 'LogicalPartition' EXPECTED_OPERATION_NAME = 'PowerOff' class TestJobEntryWrapper(testtools.TestCase): def setUp(self): super(TestJobEntryWrapper, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt def load(fname): resp = pvmhttp.load_pvm_resp(fname, adapter=self.adpt).response self.assertIsNotNone(resp, "Could not load %s " % fname) return jwrap.Job.wrap(resp) self._request_wrapper = load(JOB_REQUEST_FILE) self._ok_wrapper = load(JOB_RESPONSE_OK) self._failed_wrapper = load(JOB_RESPONSE_FAILED) self._exception_wrapper = load(JOB_RESPONSE_EXCEPTION) self._exception_wrapper.op = 'CLIRunner' # Create a bad wrapper to use when retrieving properties which don't # exist self._bad_wrapper = load(JOB_REQUEST_FILE) self.set_test_property_values() def set_single_value(self, entry, property_name, value): prop = entry.element.find(property_name) self.assertNotEqual(prop, None, "Could not find property %s." % property_name) prop.text = str(value) def set_test_property_values(self): """Set expected values in entry so test code can work consistently.""" self.set_single_value(self._ok_wrapper.entry, jwrap._JOB_ID, EXPECTED_ID) self.set_single_value(self._request_wrapper.entry, jwrap._JOB_GROUP_NAME, EXPECTED_GROUP_NAME) self.set_single_value(self._request_wrapper.entry, jwrap._JOB_OPERATION_NAME, EXPECTED_OPERATION_NAME) self.set_single_value(self._failed_wrapper.entry, jwrap._JOB_STATUS, EXPECTED_STATUS) self.set_single_value(self._exception_wrapper.entry, jwrap._JOB_MESSAGE, EXPECTED_EXCEPTION_MESSAGE) # results value containing the message is the second one in a list props = self._failed_wrapper.entry.element.findall( jwrap._JOB_RESULTS_VALUE) props[1].text = str(EXPECTED_RESULTS_VALUE) def verify_equal(self, method_name, returned_value, expected_value): if returned_value is not None and expected_value is not None: returned_type = type(returned_value) expected_type = type(expected_value) self.assertEqual(returned_type, expected_type, "%s: type mismatch. " "Returned %s(%s). Expected %s(%s)" % (method_name, returned_value, returned_type, expected_value, expected_type)) self.assertEqual(returned_value, expected_value, "%s returned %s instead of %s" % (method_name, returned_value, expected_value)) def call_simple_getter(self, wrapper, method_name, expected_value, expected_bad_value): # Use __getattribute__ to dynamically call the method value = wrapper.__getattribute__( method_name) if callable(value): value = value() self.verify_equal(method_name, value, expected_value) bad_value = self._bad_wrapper.__getattribute__(method_name) if callable(bad_value): bad_value = bad_value() self.verify_equal(method_name, bad_value, expected_bad_value) def test_get_job_id(self): self.call_simple_getter(self._ok_wrapper, "job_id", EXPECTED_ID, None) def test_get_job_status(self): self.call_simple_getter(self._failed_wrapper, "job_status", EXPECTED_STATUS, None) def test_get_job_message(self): self.call_simple_getter(self._exception_wrapper, "get_job_message", EXPECTED_EXCEPTION_MESSAGE, '') def test_get_job_resp_exception_msg(self): self.call_simple_getter(self._exception_wrapper, "get_job_resp_exception_msg", EXPECTED_EXCEPTION_MESSAGE, '') def test_get_job_results_message(self): self.call_simple_getter(self._failed_wrapper, "get_job_results_message", EXPECTED_RESULTS_VALUE, '') def test_job_parameters(self): input_name1 = 'JobParmName1' input_name2 = 'JobParmName2' input_value1 = 'JobParmValue1' input_value2 = 'JobParmValue2' wrapper = self._request_wrapper job_parms = [ wrapper.create_job_parameter(input_name1, input_value1), wrapper.create_job_parameter(input_name2, input_value2)] wrapper.add_job_parameters_to_existing(*job_parms) elements = wrapper.entry.element.findall( 'JobParameters/JobParameter/ParameterName') names = [] for element in elements: names.append(element.text) self.assertEqual(input_name1, names[0], "Job names don't match") self.assertEqual(input_name2, names[1], "Job names don't match") elements = wrapper.entry.element.findall( 'JobParameters/JobParameter/ParameterValue') values = [] for element in elements: values.append(element.text) self.assertEqual(input_value1, values[0], "Job values don't match") self.assertEqual(input_value2, values[1], "Job values don't match") @mock.patch('pypowervm.wrappers.job.Job._monitor_job') @mock.patch('pypowervm.wrappers.job.Job.cancel_job') @mock.patch('pypowervm.wrappers.job.Job.delete_job') @mock.patch('pypowervm.wrappers.job.Job.job_status') def test_run_job(self, mock_status, mock_del, mock_cancel, mock_monitor): mock_status.__get__ = mock.Mock( return_value=jwrap.JobStatus.COMPLETED_OK) wrapper = self._request_wrapper # Synchronous # No timeout mock_monitor.return_value = False wrapper.run_job('uuid') self.adpt.create_job.assert_called_with(mock.ANY, 'LogicalPartition', 'uuid', sensitive=False) self.assertEqual(1, mock_monitor.call_count) self.assertEqual(0, mock_cancel.call_count) self.assertEqual(1, mock_del.call_count) # Time out mock_monitor.reset_mock() mock_del.reset_mock() mock_monitor.return_value = True self.assertRaises(ex.JobRequestTimedOut, wrapper.run_job, 'uuid') self.assertEqual(1, mock_monitor.call_count) self.assertEqual(1, mock_cancel.call_count) self.assertEqual(0, mock_del.call_count) # Non-OK status mock_status.__get__.return_value = jwrap.JobStatus.COMPLETED_WITH_ERROR mock_monitor.reset_mock() mock_cancel.reset_mock() mock_monitor.return_value = False self.assertRaises(ex.JobRequestFailed, wrapper.run_job, 'uuid') self.assertEqual(0, mock_cancel.call_count) self.assertEqual(1, mock_del.call_count) # Asynchronous. With no timeout, return right after monitor. "Bad" # result isn't checked, delete is not called. mock_del.reset_mock() wrapper.run_job('uuid', synchronous=False) self.assertEqual(0, mock_del.call_count) @mock.patch('pypowervm.wrappers.job.Job.poll_while_status') @mock.patch('pypowervm.wrappers.job.PollAndDeleteThread') def test_montor_job(self, mock_thread, mock_poll): wrapper = self._ok_wrapper # Synchronous is a pass-through to poll_while_status mock_poll.return_value = 'abc123' self.assertEqual('abc123', wrapper._monitor_job()) mock_poll.assert_called_once_with(['RUNNING', 'NOT_STARTED'], mock.ANY, mock.ANY) self.assertEqual(0, mock_thread.call_count) # Asynchronous # Time out mock_poll.reset_mock() mock_poll.return_value = True self.assertTrue(wrapper._monitor_job(synchronous=False)) mock_poll.assert_called_once_with(['NOT_STARTED'], mock.ANY, mock.ANY) self.assertEqual(0, mock_thread.call_count) # No timeout mock_poll.reset_mock() mock_poll.return_value = False thread_inst = mock.Mock() mock_thread.return_value = thread_inst self.assertFalse(wrapper._monitor_job(synchronous=False)) mock_thread.assert_called_once_with(wrapper, False) thread_inst.start.assert_called_once_with() @mock.patch('time.time') @mock.patch('time.sleep') @mock.patch('pypowervm.wrappers.job.Job.job_status') def test_poll_while_status(self, mock_status, mock_sleep, mock_time): wrapper = self._ok_wrapper mock_status.__get__ = mock.Mock(return_value=jwrap.JobStatus.RUNNING) # Short-circuit if the status is already not in the list self.assertFalse(wrapper.poll_while_status( [jwrap.JobStatus.NOT_ACTIVE], 10, False)) self.assertEqual(0, mock_sleep.call_count) self.assertEqual(1, mock_time.call_count) # Engineer a timeout after the third run mock_time.reset_mock() mock_time.side_effect = [0, 1, 2, 3, 4, 5] self.assertTrue(wrapper.poll_while_status( [jwrap.JobStatus.RUNNING], 3, False)) self.assertEqual(3, mock_sleep.call_count) # Initial setup, bail on the fourth iteration self.assertEqual(5, mock_time.call_count) # "Infinite" timeout, status eventually becomes one not in the list mock_status.__get__.side_effect = ['a', 'b', 'c', 'd', 'e', 'f'] mock_time.reset_mock() mock_time.side_effect = [0, 1, 2, 3, 4, 5] mock_sleep.reset_mock() self.assertFalse(wrapper.poll_while_status(['a', 'b', 'c', 'd', 'e'], 0, False)) self.assertEqual(5, mock_sleep.call_count) # Only the initial timer setup self.assertEqual(6, mock_time.call_count) @mock.patch('pypowervm.wrappers.job.Job.poll_while_status') @mock.patch('pypowervm.wrappers.job.Job.delete_job') def test_poll_and_delete_thread(self, mock_del, mock_poll): # OK jwrap.PollAndDeleteThread(self._ok_wrapper, 'sens').run() mock_poll.assert_called_once_with(['RUNNING'], 0, 'sens') mock_del.assert_called_once_with() # Not OK mock_poll.reset_mock() mock_del.reset_mock() with self.assertLogs(jwrap.__name__, 'ERROR'): jwrap.PollAndDeleteThread(self._failed_wrapper, 'sens').run() mock_poll.assert_called_once_with(['RUNNING'], 0, 'sens') mock_del.assert_called_once_with() @mock.patch('pypowervm.wrappers.job.Job.poll_while_status') @mock.patch('pypowervm.wrappers.job.Job.delete_job') def test_cancel_job_thread(self, mock_del, mock_poll): jwrap.CancelJobThread(self._ok_wrapper, 'sens').run() mock_poll.assert_called_once_with(['RUNNING', 'NOT_STARTED'], 0, 'sens') mock_del.assert_called_once_with() @mock.patch('pypowervm.wrappers.job.CancelJobThread.start') @mock.patch('pypowervm.wrappers.job.Job.delete_job') @mock.patch('pypowervm.wrappers.job.Job._monitor_job') def test_cancel_job(self, mock_monitor, mock_delete, mock_start): wrapper = self._ok_wrapper self.adpt.update.side_effect = ex.Error('error') mock_monitor.return_value = False wrapper.cancel_job() self.adpt.update.assert_called_with( None, None, root_type='jobs', root_id=wrapper.job_id, suffix_type='cancel') self.assertEqual(0, mock_delete.call_count) self.adpt.update.reset_mock() self.adpt.update.side_effect = None mock_start.reset_mock() mock_monitor.return_value = False wrapper.cancel_job() self.adpt.update.assert_called_with( None, None, root_type='jobs', root_id=wrapper.job_id, suffix_type='cancel') self.assertEqual(1, mock_start.call_count) @mock.patch('pypowervm.wrappers.job.Job.job_status') def test_delete_job(self, mock_status): wrapper = self._ok_wrapper mock_status.__get__ = mock.Mock(return_value=jwrap.JobStatus.RUNNING) with self.assertLogs(jwrap.__name__, 'ERROR'): self.assertRaises(ex.Error, wrapper.delete_job) mock_status.__get__.return_value = jwrap.JobStatus.COMPLETED_OK wrapper.delete_job() self.adpt.delete.assert_called_with('jobs', wrapper.job_id) self.adpt.delete.reset_mock() self.adpt.delete.side_effect = ex.Error('foo') with self.assertLogs(jwrap.__name__, 'ERROR'): wrapper.delete_job() self.adpt.delete.assert_called_with('jobs', wrapper.job_id) pypowervm-1.1.24/pypowervm/tests/wrappers/test_virtual_io_server.py0000664000175000017500000011574313571367171025514 0ustar neoneo00000000000000# Copyright 2014, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock import unittest import pypowervm.adapter as adpt import pypowervm.const as c import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.wrappers.base_partition as bp import pypowervm.wrappers.storage as pvm_stor import pypowervm.wrappers.virtual_io_server as vios class TestVIOSWrapper(twrap.TestWrapper): file = 'fake_vios_ssp_npiv.txt' wrapper_class_to_test = vios.VIOS def test_update_timeout(self): self.adpt.update_by_path.return_value = self.dwrap.entry self.assertEqual(self.dwrap.entry, self.dwrap.update().entry) self.adpt.update_by_path.assert_called_with(self.dwrap, None, mock.ANY, timeout=3600) self.assertEqual(self.dwrap.entry, self.dwrap.update(timeout=42).entry) self.adpt.update_by_path.assert_called_with(self.dwrap, None, mock.ANY, timeout=42) # If the session is configured for longer... self.adpt.session.timeout = 10000 self.assertEqual(self.dwrap.entry, self.dwrap.update().entry) # ...default to the longer value. self.adpt.update_by_path.assert_called_with(self.dwrap, None, mock.ANY, timeout=10000) # But explicit timeout can still be set. self.assertEqual(self.dwrap.entry, self.dwrap.update(timeout=42).entry) self.adpt.update_by_path.assert_called_with(self.dwrap, None, mock.ANY, timeout=42) def test_get_ip_addresses(self): expected_ips = ('9.1.2.4', '10.10.10.5') self.assertEqual(expected_ips, self.dwrap.ip_addresses) def test_mover_service_partition(self): self.assertTrue(self.dwrap.is_mover_service_partition) self.dwrap.is_mover_service_partition = False self.assertFalse(self.dwrap.is_mover_service_partition) def test_rmc_ip(self): self.assertEqual('9.1.2.5', self.dwrap.rmc_ip) def test_license_accept(self): self.assertTrue(self.dwrap.is_license_accepted) def test_vnic_capabilities(self): self.assertTrue(self.dwrap.vnic_capable) self.assertTrue(self.dwrap.vnic_failover_capable) def test_hdisk_reserve_policy_found(self): # Most are NoReserve; look for the only one that's SinglePath to make # sure we're actually searching rather than picking first/last/random found_policy = self.dwrap.hdisk_reserve_policy( '6005076300838041300000000000002B') self.assertEqual('SinglePath', found_policy) def test_hdisk_reserve_policy_notfound(self): # Most are NoReserve; look for the only one that's SinglePath to make # sure we're actually searching rather than picking first/last/random found_policy = self.dwrap.hdisk_reserve_policy('Bogus') self.assertIsNone(found_policy) def test_hdisk_from_uuid_found(self): found_name = self.dwrap.hdisk_from_uuid( '01M0lCTTIxNDUyNEM2MDA1MDc2MzAwODM4MDQxMzAwMDAwMDAwMDAwMDhCNQ==') self.assertEqual('hdisk7', found_name) def test_hdisk_from_uuid_notfound(self): found_name = self.dwrap.hdisk_from_uuid('Bogus') self.assertIsNone(found_name) def test_seas(self): self.assertEqual(1, len(self.dwrap.seas)) sea = self.dwrap.seas[0] self.assertEqual(1, sea.pvid) self.assertEqual(1, len(sea.addl_adpts)) def test_trunks(self): self.assertEqual(3, len(self.dwrap.trunk_adapters)) self.assertEqual(1, self.dwrap.trunk_adapters[0].pvid) self.assertEqual(4094, self.dwrap.trunk_adapters[1].pvid) self.assertEqual(4093, self.dwrap.trunk_adapters[2].pvid) def test_derive_orphan_trunk_adapters(self): orphans = self.dwrap.derive_orphan_trunk_adapters() self.assertEqual(1, len(orphans)) self.assertEqual(4093, orphans[0].pvid) def test_wwpns(self): """Tests the helper methods to get WWPNs more easily.""" phys_paths = self.dwrap.get_pfc_wwpns() self.assertIsNotNone(phys_paths) self.assertEqual(2, len(phys_paths)) virt_paths = self.dwrap.get_vfc_wwpns() self.assertIsNotNone(virt_paths) self.assertEqual(2, len(virt_paths)) for virt_path in virt_paths: self.assertEqual(2, len(virt_path)) self.assertEqual(1, len(self.dwrap.get_active_pfc_wwpns())) self.assertEqual('10000090FA1B6302', self.dwrap.get_active_pfc_wwpns()[0]) def test_pfc_ports(self): """Tests that the physical FC ports can be gathered.""" ports = self.dwrap.pfc_ports self.assertIsNotNone(ports) self.assertEqual(2, len(ports)) # Validate attributes on one. self.assertEqual('U78AB.001.WZSJBM3-P1-C2-T2', ports[0].loc_code) self.assertEqual('fcs1', ports[0].name) self.assertEqual('1aU78AB.001.WZSJBM3-P1-C2-T2', ports[0].udid) self.assertEqual('10000090FA1B6303', ports[0].wwpn) self.assertEqual(0, ports[0].npiv_available_ports) self.assertEqual(0, ports[0].npiv_total_ports) def test_phys_vols(self): """Tests that the physical volumes can be gathered.""" phys_vols = self.dwrap.phys_vols self.assertIsNotNone(phys_vols) self.assertEqual(11, len(phys_vols)) # Validate attributes on one. self.assertEqual(phys_vols[0].description, 'SAS Disk Drive') self.assertEqual(phys_vols[0].udid, '01M0lCTU1CRjI2MDBSQzUwMDAwMzk0NzgzQTUyQjg=') self.assertEqual(phys_vols[0].capacity, 572325) self.assertEqual(phys_vols[0].name, 'hdisk0') self.assertEqual(phys_vols[0].state, 'active') self.assertEqual(phys_vols[0]._encryption_state, 'Unlocked') self.assertIsNone(phys_vols[0]._encryption_key) agent = phys_vols[0]._encryption_agent self.assertIsInstance(agent, pvm_stor._LUKSEncryptor) self.assertEqual(agent.key_size, 512) self.assertEqual(agent.cipher, 'aes-cbc-essiv:sha256') self.assertEqual(agent.hash_spec, 'sha512') class TestViosMappings(twrap.TestWrapper): file = 'fake_vios_mappings.txt' wrapper_class_to_test = vios.VIOS mock_adapter_fx_args = {} def setUp(self): super(TestViosMappings, self).setUp() self.adpt.build_href.return_value = "a_link" def test_bld_scsi_mapping_vopt(self): """Validation that the element is correct.""" vopt = pvm_stor.VOptMedia.bld_ref(self.adpt, 'media_name') vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid', 'client_lpar_uuid', vopt) self.assertIsNotNone(vmap) self.assertIsNotNone(vmap.element) self.assertEqual(vmap.client_adapter.side, 'Client') self.assertTrue(vmap.client_adapter._get_val_bool( 'UseNextAvailableSlotID')) self.assertEqual(vmap.server_adapter.side, 'Server') # Validate the exact XML of the server adapter: ensure proper ordering. self.assertEqual( '' 'Servertrue'.encode('utf-8'), vmap.server_adapter.toxmlstring()) # If the slot number is None then REST will assign the first available. self.assertIsNone(vmap.client_adapter.lpar_slot_num) self.assertIsNone(vmap.target_dev) self.assertEqual('media_name', vmap.backing_storage.media_name) self.assertEqual('a_link', vmap.client_lpar_href) self.assertIsInstance(vmap.backing_storage, pvm_stor.VOptMedia) # Test the cloning vopt2 = pvm_stor.VOptMedia.bld_ref(self.adpt, 'media_name2') vmap2 = vios.VSCSIMapping.bld_from_existing(vmap, vopt2) self.assertIsNotNone(vmap2) self.assertIsNotNone(vmap2.element) self.assertEqual(vmap2.client_adapter.side, 'Client') self.assertEqual(vmap2.server_adapter.side, 'Server') self.assertIsNone(vmap2.client_adapter.lpar_slot_num) self.assertIsNone(vmap2.target_dev) self.assertEqual('media_name2', vmap2.backing_storage.media_name) self.assertEqual('a_link', vmap2.client_lpar_href) self.assertIsInstance(vmap2.backing_storage, pvm_stor.VOptMedia) # Clone to a different device type vdisk = pvm_stor.VDisk.bld_ref(self.adpt, 'disk_name') vmap3 = vios.VSCSIMapping.bld_from_existing( vmap, vdisk, lpar_slot_num=6, lua='vdisk_lua') self.assertIsNotNone(vmap3) self.assertIsNotNone(vmap3.element) # Validate the exact XML of the client adapter: ensure proper ordering. self.assertEqual( '' 'Clientfalse<' 'uom:VirtualSlotNumber>6'.encode('utf-8'), vmap3.client_adapter.toxmlstring()) self.assertEqual('Client', vmap3.client_adapter.side) # Specifying 'lua' builds the appropriate type of target dev... self.assertIsInstance(vmap3.target_dev, pvm_stor.VDiskTargetDev) # ...with the correct LUA self.assertEqual('vdisk_lua', vmap3.target_dev.lua) self.assertEqual(6, vmap3.client_adapter.lpar_slot_num) # Assert this is set to False when specifying the slot number # and building from an existing mapping self.assertFalse(vmap3.client_adapter._get_val_bool( 'UseNextAvailableSlotID')) self.assertEqual('Server', vmap3.server_adapter.side) self.assertEqual('disk_name', vmap3.backing_storage.name) self.assertEqual('LogicalVolume', vmap3.backing_storage.vdtype) self.assertEqual('a_link', vmap3.client_lpar_href) self.assertIsInstance(vmap3.backing_storage, pvm_stor.VDisk) def test_bld_scsi_mapping_vdisk(self): """Validation that the element is correct.""" vdisk = pvm_stor.VDisk.bld_ref(self.adpt, 'disk_name') vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid', 'client_lpar_uuid', vdisk, lpar_slot_num=5, lua='vdisk_lua') self.assertIsNotNone(vmap) self.assertIsNotNone(vmap.element) self.assertEqual('Client', vmap.client_adapter.side) self.assertIsInstance(vmap.target_dev, pvm_stor.VDiskTargetDev) self.assertEqual('vdisk_lua', vmap.target_dev.lua) self.assertEqual(5, vmap.client_adapter.lpar_slot_num) # Assert that we set this to False when specifying the slot number self.assertFalse(vmap.client_adapter._get_val_bool( 'UseNextAvailableSlotID')) self.assertEqual('Server', vmap.server_adapter.side) self.assertEqual('disk_name', vmap.backing_storage.name) self.assertEqual('LogicalVolume', vmap.backing_storage.vdtype) self.assertEqual('a_link', vmap.client_lpar_href) self.assertIsInstance(vmap.backing_storage, pvm_stor.VDisk) # Test cloning vdisk2 = pvm_stor.VDisk.bld_ref(self.adpt, 'disk_name2') vmap2 = vios.VSCSIMapping.bld_from_existing(vmap, vdisk2, lpar_slot_num=6) self.assertIsNotNone(vmap2) self.assertIsNotNone(vmap2.element) self.assertEqual('Client', vmap2.client_adapter.side) # Cloning without specifying 'lua' doesn't clone the target dev self.assertIsNone(vmap2.target_dev) self.assertEqual(6, vmap2.client_adapter.lpar_slot_num) self.assertFalse(vmap2.client_adapter._get_val_bool( 'UseNextAvailableSlotID')) self.assertEqual('Server', vmap2.server_adapter.side) self.assertEqual('disk_name2', vmap2.backing_storage.name) self.assertEqual('LogicalVolume', vmap2.backing_storage.vdtype) self.assertEqual('a_link', vmap2.client_lpar_href) self.assertIsInstance(vmap2.backing_storage, pvm_stor.VDisk) def test_bld_scsi_mapping_lu(self): """Validation that the element is correct.""" lu = pvm_stor.LU.bld_ref(self.adpt, 'disk_name', 'udid') vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid', 'client_lpar_uuid', lu, lpar_slot_num=5) self.assertIsNotNone(vmap) self.assertIsNotNone(vmap.element) self.assertEqual('Client', vmap.client_adapter.side) self.assertIsNone(vmap.target_dev) self.assertEqual(5, vmap.client_adapter.lpar_slot_num) self.assertEqual('Server', vmap.server_adapter.side) self.assertEqual('disk_name', vmap.backing_storage.name) self.assertEqual('udid', vmap.backing_storage.udid) self.assertEqual('a_link', vmap.client_lpar_href) self.assertIsInstance(vmap.backing_storage, pvm_stor.LU) # Test cloning lu2 = pvm_stor.LU.bld_ref(self.adpt, 'disk_name2', 'udid2') vmap2 = vios.VSCSIMapping.bld_from_existing(vmap, lu2, lua='lu_lua') self.assertIsNotNone(vmap2) self.assertIsNotNone(vmap2.element) self.assertEqual('Client', vmap2.client_adapter.side) self.assertEqual(5, vmap2.client_adapter.lpar_slot_num) self.assertIsInstance(vmap2.target_dev, pvm_stor.LUTargetDev) self.assertEqual('lu_lua', vmap2.target_dev.lua) self.assertEqual('Server', vmap2.server_adapter.side) self.assertEqual('disk_name2', vmap2.backing_storage.name) self.assertEqual('udid2', vmap2.backing_storage.udid) self.assertEqual('a_link', vmap2.client_lpar_href) self.assertIsInstance(vmap2.backing_storage, pvm_stor.LU) def test_bld_scsi_mapping_pv(self): """Validation that the element is correct.""" pv = pvm_stor.PV.bld(self.adpt, 'disk_name', 'udid') vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid', 'client_lpar_uuid', pv, lpar_slot_num=5, target_name='fake_name') self.assertIsNotNone(vmap) self.assertIsNotNone(vmap.element) self.assertEqual('Client', vmap.client_adapter.side) self.assertEqual(5, vmap.client_adapter.lpar_slot_num) self.assertEqual('Server', vmap.server_adapter.side) self.assertEqual('disk_name', vmap.backing_storage.name) self.assertEqual('a_link', vmap.client_lpar_href) self.assertEqual('fake_name', vmap.target_dev.name) self.assertIsInstance(vmap.backing_storage, pvm_stor.PV) # Test cloning pv2 = pvm_stor.PV.bld(self.adpt, 'disk_name2', 'udid2') vmap2 = vios.VSCSIMapping.bld_from_existing( vmap, pv2, lpar_slot_num=6, lua='pv_lua') self.assertIsNotNone(vmap2) self.assertIsNotNone(vmap2.element) self.assertEqual('Client', vmap2.client_adapter.side) self.assertEqual(6, vmap2.client_adapter.lpar_slot_num) self.assertIsInstance(vmap2.target_dev, pvm_stor.PVTargetDev) self.assertEqual('pv_lua', vmap2.target_dev.lua) self.assertEqual('Server', vmap2.server_adapter.side) self.assertEqual('disk_name2', vmap2.backing_storage.name) self.assertEqual('a_link', vmap2.client_lpar_href) self.assertIsNone(vmap2.target_dev.name) self.assertIsInstance(vmap2.backing_storage, pvm_stor.PV) # Test empty target_dev_type pv3 = pvm_stor.PV.bld(self.adpt, 'disk_name3', 'udid3') vmap3 = vios.VSCSIMapping.bld_from_existing( vmap, pv3, lpar_slot_num=6) self.assertIsNone(vmap3.target_dev) def test_clone_scsi_mapping_no_storage(self): """Clone a VSCSI mapping with no storage element.""" pv = pvm_stor.PV.bld(self.adpt, 'disk_name', 'udid') vmap = vios.VSCSIMapping.bld(self.adpt, 'host_uuid', 'client_lpar_uuid', pv, lpar_slot_num=5) vmap2 = vios.VSCSIMapping.bld_from_existing(vmap, None) self.assertIsNotNone(vmap2) self.assertIsNotNone(vmap2.element) self.assertEqual('Client', vmap2.client_adapter.side) self.assertEqual('Server', vmap2.server_adapter.side) self.assertEqual('a_link', vmap2.client_lpar_href) self.assertEqual(5, vmap2.client_adapter.lpar_slot_num) self.assertIsNone(vmap.target_dev) self.assertIsNone(vmap2.backing_storage) # Illegal to specify target dev properties without backing storage. self.assertRaises(ValueError, vios.VSCSIMapping.bld_from_existing, vmap, None, lua='bogus') def test_get_scsi_mappings(self): mappings = self.dwrap.scsi_mappings # Ensure that at least one adapter has a client LPAR & storage found_client_uri = False static_map = None for mapping in mappings: if mapping.client_lpar_href and mapping.backing_storage: found_client_uri = True static_map = mapping self.assertTrue(found_client_uri) # We'll use the previous mapping as a baseline for further validation self.assertIsNotNone(static_map.client_adapter) self.assertIsNotNone(static_map.backing_storage) self.assertIsNotNone(static_map.server_adapter) # Deeper check on each of these. ca = static_map.client_adapter self.assertEqual(5, ca.lpar_id) self.assertEqual(1, ca.vios_id) self.assertTrue(ca.is_varied_on) self.assertIsNotNone(ca.lpar_slot_num) self.assertIsNotNone(ca.vios_slot_num) self.assertIsNotNone(ca.loc_code) self.assertEqual(ca.side, 'Client') sa = static_map.server_adapter self.assertEqual(10, sa.lpar_id) self.assertEqual(1, sa.vios_id) self.assertIsNotNone(sa.name) self.assertIsNotNone(sa.backing_dev_name) self.assertIsNotNone(sa.udid) self.assertEqual(sa.side, 'Server') self.assertTrue(sa.is_varied_on) self.assertIsNotNone(sa.lpar_slot_num) self.assertIsNotNone(sa.vios_slot_num) self.assertIsNotNone(sa.loc_code) # Try copying the map and adding it in new_map = copy.deepcopy(static_map) orig_size = len(mappings) mappings.append(new_map) self.assertEqual(len(mappings), orig_size + 1) self.assertEqual(len(self.dwrap.scsi_mappings), orig_size + 1) mappings.remove(new_map) self.dwrap.scsi_mappings = mappings self.assertEqual(len(self.dwrap.scsi_mappings), orig_size) def test_vfc_mappings(self): mappings = self.dwrap.vfc_mappings # Ensure that at least one adapter has a client LPAR found_client_uri = False static_map = None for mapping in mappings: if mapping.client_lpar_href: found_client_uri = True static_map = mapping self.assertTrue(found_client_uri) # We'll use the previous mapping as a baseline for further validation self.assertIsNotNone(static_map.client_adapter) self.assertIsNotNone(static_map.backing_port) self.assertIsNotNone(static_map.server_adapter) # Deeper check on each of these. ca = static_map.client_adapter self.assertIsNotNone(ca.wwpns) self.assertIsNotNone(ca.lpar_id) self.assertIsNotNone(ca.vios_id) self.assertTrue(ca.is_varied_on) self.assertIsNotNone(ca.lpar_slot_num) self.assertIsNotNone(ca.vios_slot_num) self.assertIsNotNone(ca.loc_code) self.assertEqual(ca.side, 'Client') bport = static_map.backing_port self.assertIsNotNone(bport.loc_code) self.assertIsNotNone(bport.name) self.assertIsNotNone(bport.udid) self.assertIsNotNone(bport.wwpn) self.assertIsNotNone(bport.npiv_available_ports) self.assertIsNotNone(bport.npiv_total_ports) sa = static_map.server_adapter self.assertIsNotNone(sa.name) self.assertIsNotNone(sa.map_port) self.assertIsNotNone(sa.udid) self.assertEqual(sa.side, 'Server') self.assertTrue(sa.is_varied_on) self.assertIsNotNone(sa.lpar_slot_num) self.assertIsNotNone(sa.vios_slot_num) self.assertIsNotNone(sa.loc_code) # Try copying the map and adding it in new_map = copy.deepcopy(static_map) orig_size = len(mappings) mappings.append(new_map) self.assertEqual(len(mappings), orig_size + 1) self.assertEqual(len(self.dwrap.vfc_mappings), orig_size + 1) mappings.remove(new_map) self.dwrap.vfc_mappings = mappings self.assertEqual(len(self.dwrap.vfc_mappings), orig_size) def test_bld_vfc_mapping(self): mapping = vios.VFCMapping.bld(self.adpt, 'host_uuid', 'client_lpar_uuid', 'fcs0', ['aa', 'bb']) self.assertIsNotNone(mapping) # Validate the FC Backing port self.assertIsNotNone(mapping.backing_port) # Validate the Server Adapter self.assertIsNotNone(mapping.server_adapter) # Validate the Client Adapter self.assertIsNotNone(mapping.client_adapter) self.assertEqual(['AA', 'BB'], mapping.client_adapter.wwpns) def test_bld_vfc_mapping_with_slot(self): mapping = vios.VFCMapping.bld(self.adpt, 'host_uuid', 'client_lpar_uuid', 'fcs0', client_wwpns=['aa', 'bb'], lpar_slot_num=3) self.assertIsNotNone(mapping) # Validate the FC Backing port self.assertIsNotNone(mapping.backing_port) # Validate the Server Adapter self.assertIsNotNone(mapping.server_adapter) # Validate the Client Adapter self.assertIsNotNone(mapping.client_adapter) self.assertEqual(['AA', 'BB'], mapping.client_adapter.wwpns) # verify the slot number self.assertEqual(3, mapping.client_adapter.lpar_slot_num) # Assert that we set this to False when specifying the slot number self.assertFalse(mapping.client_adapter._get_val_bool( 'UseNextAvailableSlotID')) def test_bld_scsi_mapping_from_existing(self): def map_has_pieces(smap, lpar_href=True, client_adapter=True, server_adapter=True, storage=True, target_device=True): def has_piece(piece, has_it): if has_it: self.assertIsNotNone(piece) else: self.assertIsNone(piece) has_piece(smap.client_lpar_href, lpar_href) has_piece(smap.client_adapter, client_adapter) has_piece(smap.server_adapter, server_adapter) has_piece(smap.backing_storage, storage) has_piece(smap.element.find('TargetDevice'), target_device) stg = pvm_stor.VDisk.bld_ref(self.adpt, 'disk_name') smaps = self.dwrap.scsi_mappings # 0 has only ServerAdapter sm = smaps[0] map_has_pieces(sm, lpar_href=False, client_adapter=False, storage=False, target_device=False) smclone = vios.VSCSIMapping.bld_from_existing(sm, stg) map_has_pieces(smclone, lpar_href=False, client_adapter=False, target_device=False) self.assertEqual(stg, smclone.backing_storage) # 1 has ServerAdapter, Storage, and TargetDevice sm = smaps[1] map_has_pieces(sm, lpar_href=False, client_adapter=False) self.assertNotEqual(stg, sm.backing_storage) smclone = vios.VSCSIMapping.bld_from_existing(sm, stg) # Target device *disappears* map_has_pieces(smclone, lpar_href=False, client_adapter=False, target_device=False) self.assertEqual(stg, smclone.backing_storage) # 3 has AssociatedLogicalPartition, ClientAdapter, ServerAdapter. sm = smaps[3] map_has_pieces(sm, storage=False, target_device=False) smclone = vios.VSCSIMapping.bld_from_existing(sm, stg) map_has_pieces(smclone, target_device=False) self.assertEqual(stg, smclone.backing_storage) # 12 has everything sm = smaps[12] map_has_pieces(sm) self.assertNotEqual(stg, sm.backing_storage) smclone = vios.VSCSIMapping.bld_from_existing(sm, stg) # Target device *disappears* map_has_pieces(smclone, target_device=False) self.assertEqual(stg, smclone.backing_storage) # Everything else cloned okay self.assertEqual(sm.client_lpar_href, smclone.client_lpar_href) self.assertEqual(sm.client_adapter, smclone.client_adapter) self.assertEqual(sm.server_adapter, smclone.server_adapter) class TestCrtRelatedHref(unittest.TestCase): @mock.patch('pypowervm.adapter.Session') def test_crt_related_href(self, mock_sess): """Tests to make sure that related elements are well formed.""" # Test with host_uuid defined mock_sess.dest = 'root' adapter = adpt.Adapter(mock_sess) href = vios.VStorageMapping.crt_related_href(adapter, 'host', 'lpar') self.assertEqual('root/rest/api/uom/ManagedSystem/host/' 'LogicalPartition/lpar', href) # Test with host_uuid = None href = vios.VStorageMapping.crt_related_href(adapter, None, 'lpar') self.assertEqual('root/rest/api/uom/LogicalPartition/lpar', href) class TestVSCSIBus(twrap.TestWrapper): file = 'vscsibus_feed.txt' wrapper_class_to_test = vios.VSCSIBus def test_props(self): self.assertEqual(4, len(self.entries)) bus = self.dwrap self.assertEqual('1f25efc1-a42b-3384-85e7-f37158f46615', bus.uuid) self.assertEqual( 'http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-3' '42c-9f43-ddfeb9f8edd3/LogicalPartition/3DFF2EF5-6F99-4C29-B655-EE' '57DF1B64C6', bus.client_lpar_href) self.assertEqual(5, bus.client_adapter.lpar_id) self.assertEqual(2, bus.client_adapter.lpar_slot_num) self.assertEqual(5, bus.server_adapter.lpar_id) self.assertEqual(2, bus.server_adapter.lpar_slot_num) map1, map2 = bus.mappings self.assertIsInstance(map1.backing_storage, pvm_stor.PV) self.assertEqual('hdisk10', map1.backing_storage.name) self.assertIsInstance(map1.target_dev, pvm_stor.PVTargetDev) self.assertEqual('0x8100000000000000', map1.target_dev.lua) self.assertIsInstance(map2.backing_storage, pvm_stor.VOptMedia) self.assertEqual('cfg_My_OS_Image_V_3dff2ef5_000000.iso', map2.backing_storage.name) self.assertIsInstance(map2.target_dev, pvm_stor.VOptTargetDev) self.assertEqual('0x8200000000000000', map2.target_dev.lua) def test_bld(self): self.adpt.build_href.return_value = 'href' # Default slot number (use next available) bus = vios.VSCSIBus.bld(self.adpt, 'client_lpar_uuid') self.adpt.build_href.assert_called_once_with( 'LogicalPartition', 'client_lpar_uuid', xag=[]) self.assertEqual('href', bus.client_lpar_href) self.assertTrue(bus.client_adapter._get_val_bool( pvm_stor._VADPT_NEXT_SLOT)) self.assertIsNotNone(bus.server_adapter) self.assertEqual([], bus.mappings) # Specify slot number bus = vios.VSCSIBus.bld(self.adpt, 'client_lpar_uuid', lpar_slot_num=42) self.assertFalse(bus.client_adapter._get_val_bool( pvm_stor._VADPT_NEXT_SLOT)) self.assertEqual(42, bus.client_adapter.lpar_slot_num) def test_bld_from_existing(self): bus = vios.VSCSIBus.bld_from_existing(self.dwrap) self.assertEqual( 'http://localhost:12080/rest/api/uom/ManagedSystem/1cab7366-6b73-3' '42c-9f43-ddfeb9f8edd3/LogicalPartition/3DFF2EF5-6F99-4C29-B655-EE' '57DF1B64C6', bus.client_lpar_href) self.assertEqual(5, bus.client_adapter.lpar_id) self.assertEqual(2, bus.client_adapter.lpar_slot_num) self.assertEqual(5, bus.server_adapter.lpar_id) self.assertEqual(2, bus.server_adapter.lpar_slot_num) self.assertEqual([], bus.mappings) def test_mappings(self): # No LUA lu1 = pvm_stor.LU.bld_ref(self.adpt, 'lu1', 'lu_udid1') std1 = vios.STDev.bld(self.adpt, lu1) self.assertIsInstance(std1.backing_storage, pvm_stor.LU) self.assertEqual('lu1', std1.backing_storage.name) self.assertIsNone(std1.target_dev) # With LUA vdisk1 = pvm_stor.VDisk.bld_ref(self.adpt, 'vdisk1') std2 = vios.STDev.bld(self.adpt, vdisk1, lua='vdisk1_lua') self.assertIsInstance(std2.backing_storage, pvm_stor.VDisk) self.assertEqual('vdisk1', std2.backing_storage.name) self.assertEqual('LogicalVolume', std2.backing_storage.vdtype) self.assertIsInstance(std2.target_dev, pvm_stor.VDiskTargetDev) self.assertEqual('vdisk1_lua', std2.target_dev.lua) # Add 'em to a bus bus = self.dwrap self.assertEqual(2, len(bus.mappings)) bus.mappings.extend((std1, std2)) self.assertEqual(4, len(bus.mappings)) self.assertEqual('lu1', bus.mappings[2].backing_storage.name) self.assertEqual('vdisk1', bus.mappings[3].backing_storage.name) # Replace bus mappings bus.mappings = [std2, std1] self.assertEqual(2, len(bus.mappings)) self.assertEqual('vdisk1', bus.mappings[0].backing_storage.name) self.assertEqual('lu1', bus.mappings[1].backing_storage.name) class TestPartitionIOConfiguration(twrap.TestWrapper): file = 'fake_vios_ssp_npiv.txt' wrapper_class_to_test = vios.VIOS def setUp(self): super(TestPartitionIOConfiguration, self).setUp() self.io_config = self.dwrap.io_config def test_max_slots(self): self.assertEqual(80, self.io_config.max_virtual_slots) def test_io_slots(self): # IO Slots are typically associated with the VIOS. Further testing # driven there. self.assertIsNotNone(self.io_config.io_slots) self.assertEqual(3, len(self.io_config.io_slots)) class TestIOSlots(twrap.TestWrapper): file = 'fake_vios_ssp_npiv.txt' wrapper_class_to_test = vios.VIOS def setUp(self): super(TestIOSlots, self).setUp() self.io_slot = self.dwrap.io_config.io_slots[0] def test_attrs(self): self.assertEqual('PCI-E SAS Controller', self.io_slot.description) self.assertEqual('U78AB.001.WZSJBM3', self.io_slot.phys_loc) self.assertEqual(825, self.io_slot.pc_adpt_id) self.assertEqual(260, self.io_slot.pci_class) self.assertEqual(825, self.io_slot.pci_dev_id) self.assertEqual(826, self.io_slot.pci_subsys_dev_id) self.assertEqual(4116, self.io_slot.pci_mfg_id) self.assertEqual(1, self.io_slot.pci_rev_id) self.assertEqual(4116, self.io_slot.pci_vendor_id) self.assertEqual(4116, self.io_slot.pci_subsys_vendor_id) self.assertEqual(553713674, self.io_slot.drc_index) self.assertEqual('U78AB.001.WZSJBM3-P1-T9', self.io_slot.drc_name) self.assertEqual(False, self.io_slot.bus_grp_required) self.assertEqual(False, self.io_slot.required) def test_io_slots_setter(self): old_len = len(self.dwrap.io_config.io_slots) new_io_slots = self.dwrap.io_config.io_slots[:] deleted_slot = new_io_slots[1] del new_io_slots[1] self.dwrap.io_config.io_slots = new_io_slots self.assertEqual(old_len - 1, len(self.dwrap.io_config.io_slots)) self.assertNotIn(deleted_slot, self.dwrap.io_config.io_slots) @mock.patch('warnings.warn') def test_io_adpt(self, mock_warn): self.assertEqual('553713674', self.io_slot.io_adapter.id) # Verify deprecation warning on IOSlot.adapter self.assertEqual('553713674', self.io_slot.adapter.id) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) def test_bld(self): new_slot = bp.IOSlot.bld(self.adpt, True, 12345678) self.assertEqual(False, new_slot.required) self.assertEqual(True, new_slot.bus_grp_required) self.assertEqual(12345678, new_slot.drc_index) class TestGenericIOAdapter(twrap.TestWrapper): file = 'fake_vios_ssp_npiv.txt' wrapper_class_to_test = vios.VIOS def setUp(self): super(TestGenericIOAdapter, self).setUp() self.io_adpt = self.dwrap.io_config.io_slots[0].io_adapter def test_attrs(self): self.assertEqual('553713674', self.io_adpt.id) self.assertEqual('PCI-E SAS Controller', self.io_adpt.description) self.assertEqual('U78AB.001.WZSJBM3-P1-T9', self.io_adpt.dev_name) self.assertEqual('U78AB.001.WZSJBM3-P1-T9', self.io_adpt.drc_name) self.assertEqual('T9', self.io_adpt.phys_loc_code) self.assertFalse(isinstance(self.io_adpt, bp.PhysFCAdapter)) class TestPhysFCAdapter(twrap.TestWrapper): file = 'fake_vios_ssp_npiv.txt' wrapper_class_to_test = vios.VIOS def setUp(self): super(TestPhysFCAdapter, self).setUp() self.io_adpt = self.dwrap.io_config.io_slots[2].io_adapter def test_attrs(self): desc = '8 Gigabit PCI Express Dual Port Fibre Channel Adapter' self.assertEqual('553714177', self.io_adpt.id) self.assertEqual(desc, self.io_adpt.description) self.assertEqual('U78AB.001.WZSJBM3-P1-C2', self.io_adpt.dev_name) self.assertEqual('U78AB.001.WZSJBM3-P1-C2', self.io_adpt.drc_name) self.assertEqual('C2', self.io_adpt.phys_loc_code) self.assertIsInstance(self.io_adpt, bp.PhysFCAdapter) def test_fc_ports(self): self.assertEqual(2, len(self.io_adpt.fc_ports)) class TestPhysFCPort(twrap.TestWrapper): file = 'fake_vios_ssp_npiv.txt' wrapper_class_to_test = vios.VIOS def setUp(self): super(TestPhysFCPort, self).setUp() self.io_port1 = self.dwrap.io_config.io_slots[2].io_adapter.fc_ports[0] self.io_port2 = self.dwrap.io_config.io_slots[2].io_adapter.fc_ports[1] def test_attrs(self): self.assertEqual('U78AB.001.WZSJBM3-P1-C2-T2', self.io_port1.loc_code) self.assertEqual('fcs1', self.io_port1.name) self.assertEqual('1aU78AB.001.WZSJBM3-P1-C2-T2', self.io_port1.udid) self.assertEqual('10000090FA1B6303', self.io_port1.wwpn) self.assertEqual(0, self.io_port1.npiv_available_ports) self.assertEqual(0, self.io_port1.npiv_total_ports) self.assertEqual('U78AB.001.WZSJBM3-P1-C2-T1', self.io_port2.loc_code) self.assertEqual('fcs0', self.io_port2.name) self.assertEqual('1aU78AB.001.WZSJBM3-P1-C2-T1', self.io_port2.udid) self.assertEqual('10000090FA1B6302', self.io_port2.wwpn) self.assertEqual(64, self.io_port2.npiv_available_ports) self.assertEqual(64, self.io_port2.npiv_total_ports) class TestIOAdapterChoices(twrap.TestWrapper): file = 'fake_vios_ssp_npiv.txt' wrapper_class_to_test = vios.VIOS def setUp(self): super(TestIOAdapterChoices, self).setUp() self.io_adpts = self.dwrap.io_adpts_for_link_agg def test_adapter_choices(self): self.assertEqual(len(self.io_adpts), 3) self.assertEqual(self.io_adpts[0].id, '1') self.assertEqual( self.io_adpts[0].description, '4-Port Gigabit Ethernet PCI-Express Adapter (e414571614102004)') self.assertEqual(self.io_adpts[0].dev_name, 'ent3') self.assertEqual(self.io_adpts[0].dev_type, 'physicalEthernetAdpter') self.assertEqual(self.io_adpts[0].drc_name, 'U78AB.001.WZSJBM3-P1-C7-T4') self.assertEqual(self.io_adpts[0].phys_loc_code, 'U78AB.001.WZSJBM3-P1-C7-T4') self.assertEqual(self.io_adpts[0].udid, '13U78AB.001.WZSJBM3-P1-C7-T4') class TestFeed3(twrap.TestWrapper): """Tests that specifically need fake_vios_feed3.txt""" file = 'fake_vios_feed3.txt' wrapper_class_to_test = vios.VIOS def test_vivify_io_adpts_for_link_agg(self): """Vivifying FreeIOAdaptersForLinkAggregation adds the Network xag.""" # The first VIOS doesn't have FreeIOAdapters... vwrp = self.dwrap self.assertIsNone(vwrp._find(vios._VIO_FREE_IO_ADPTS_FOR_LNAGG)) # Vivify it - should be empty self.assertEqual([], vwrp.io_adpts_for_link_agg) # Now it's in there elem = vwrp._find(vios._VIO_FREE_IO_ADPTS_FOR_LNAGG) self.assertIsNotNone(elem) # Got the right xag self.assertEqual(c.XAG.VIO_NET, elem.attrib['group']) @mock.patch('warnings.warn') def test_xags(self, mock_warn): """Test deprecated extented attribute groups on the VIOS class. This can be removed once VIOS.xags is removed. """ expected = dict(NETWORK=c.XAG.VIO_NET, STORAGE=c.XAG.VIO_STOR, SCSI_MAPPING=c.XAG.VIO_SMAP, FC_MAPPING=c.XAG.VIO_FMAP) for key, val in expected.items(): # Test class accessor, ensure '.name' works. self.assertEqual(val, getattr(vios.VIOS.xags, key).name) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) mock_warn.reset_mock() # Test instance accessor. self.assertEqual(val, getattr(self.dwrap.xags, key)) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) mock_warn.reset_mock() # And in case getattr(foo, 'bar') actually differs from foo.bar... self.assertEqual(c.XAG.VIO_NET, vios.VIOS.xags.NETWORK) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) mock_warn.reset_mock() # Make sure the equality comparison works the other way self.assertEqual(self.dwrap.xags.NETWORK, c.XAG.VIO_NET) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) # Test sorting self.assertTrue(c.XAG.VIO_NET < self.dwrap.xags.SCSI_MAPPING) self.assertTrue(self.dwrap.xags.NETWORK < c.XAG.VIO_SMAP) if __name__ == "__main__": unittest.main() pypowervm-1.1.24/pypowervm/tests/wrappers/test_search.py0000664000175000017500000000361313571367171023206 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import pypowervm.wrappers.cluster as clust from pypowervm.wrappers import job import pypowervm.wrappers.logical_partition as lpar import pypowervm.wrappers.managed_system as ms import pypowervm.wrappers.network as net import pypowervm.wrappers.shared_proc_pool as spp import pypowervm.wrappers.storage as stor import pypowervm.wrappers.vios_file as vf import pypowervm.wrappers.virtual_io_server as vios class TestSearch(unittest.TestCase): expected_search_keys = { clust.Cluster: dict(name='ClusterName'), job.Job: None, lpar.LPAR: dict(name='PartitionName', id='PartitionID'), ms.System: None, net.NetBridge: None, net.VNet: None, net.CNA: None, net.VSwitch: None, spp.SharedProcPool: None, stor.SSP: dict(name='StoragePoolName'), stor.VG: None, vf.File: None, vios.VIOS: dict(name='PartitionName', id='PartitionID'), } def test_all_search_keys(self): for wcls in list(self.expected_search_keys): sk = self.expected_search_keys[wcls] if sk is None: self.assertFalse(hasattr(wcls, 'search_keys')) else: self.assertEqual(sk, wcls.search_keys) if __name__ == '__main__': unittest.main() pypowervm-1.1.24/pypowervm/tests/wrappers/test_managed_system.py0000664000175000017500000003645413571367171024752 0ustar neoneo00000000000000# Copyright 2014, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from pypowervm.tests.test_utils import pvmhttp import pypowervm.wrappers.base_partition as bp import pypowervm.wrappers.managed_system as ms import pypowervm.wrappers.mtms as mtmwrap _MS_HTTPRESP_FILE = "managedsystem.txt" _MC_HTTPRESP_FILE = "managementconsole.txt" _MS_NAME = 'HV4' class TestMSEntryWrapper(unittest.TestCase): def setUp(self): super(TestMSEntryWrapper, self).setUp() self.ms_http = pvmhttp.load_pvm_resp(_MS_HTTPRESP_FILE) self.assertNotEqual(self.ms_http, None, "Could not load %s " % _MS_HTTPRESP_FILE) entries = self.ms_http.response.feed.findentries( ms._SYSTEM_NAME, _MS_NAME) self.assertNotEqual(entries, None, "Could not find %s in %s" % (_MS_NAME, _MS_HTTPRESP_FILE)) self.myentry = entries[0] self.wrapper = ms.System.wrap(self.myentry) # Set a hardcoded value for MemoryUsedByHypervisor fw_mem = self.myentry.element.find( 'AssociatedSystemMemoryConfiguration/MemoryUsedByHypervisor') fw_mem.text = '1536' mc_http = pvmhttp.load_pvm_resp(_MC_HTTPRESP_FILE) self.assertNotEqual(mc_http, None, "Could not load %s" % _MC_HTTPRESP_FILE) self.test_ioslot_unassigned = self.wrapper.asio_config.io_slots[0] self.test_ioslot_assigned = self.wrapper.asio_config.io_slots[1] """ Create a bad wrapper to use when retrieving properties which don't exist """ self.bad_wrapper = ms.System.wrap(mc_http.response.feed.entries[0]) def verify_equal(self, method_name, returned_value, expected_value): self.assertEqual(returned_value, expected_value, "%s returned %s instead of %s" % (method_name, returned_value, expected_value)) def call_simple_getter(self, method_name, expected_value, expected_bad_value): # Use __getattribute__ to dynamically call the method value = self.wrapper.__getattribute__(method_name) if callable(value): value = value() self.verify_equal(method_name, value, expected_value) bad_value = self.bad_wrapper.__getattribute__(method_name) if callable(bad_value): bad_value = bad_value() self.verify_equal(method_name, bad_value, expected_bad_value) def test_get_val_str(self): expected_value = _MS_NAME value = self.wrapper._get_val_str(ms._SYSTEM_NAME) self.verify_equal("_get_val_str", value, expected_value) expected_value = None value = self.wrapper._get_val_str('BogusName') self.verify_equal( "_get_val_str for BogusName ", value, expected_value) def test_get_model(self): self.assertEqual(self.wrapper.mtms.model, "E4A") def test_get_type(self): self.assertEqual(self.wrapper.mtms.machine_type, "8203") def test_get_serial(self): self.assertEqual(self.wrapper.mtms.serial, "ACE0001") def test_get_mtms_str(self): self.assertEqual(self.wrapper.mtms.mtms_str, '8203-E4A*ACE0001') def test_highest_compat_mode(self): self.assertEqual(self.wrapper.highest_compat_mode(), 7) def test_proc_compat_modes(self): expected = ('default', 'POWER5', 'POWER6', 'POWER6_Enhanced', 'POWER6_Plus_Enhanced', 'POWER7') self.assertEqual(self.wrapper.proc_compat_modes, expected) def test_get_proc_units(self): self.call_simple_getter("proc_units", 500.0, 0) def test_get_min_proc_units(self): self.call_simple_getter("min_proc_units", 0.05, 0) def test_get_proc_units_configurable(self): self.call_simple_getter("proc_units_configurable", 500.0, 0) def test_get_proc_units_avail(self): self.call_simple_getter("proc_units_avail", 500.0, 0) def test_get_memory_total(self): self.call_simple_getter("memory_total", 5767168, 0) def test_get_memory_free(self): self.call_simple_getter("memory_free", 5242752, 0) def test_get_host_ip_address(self): self.call_simple_getter("host_ip_address", '127.0.0.1', None) def test_get_firmware_memory(self): self.call_simple_getter("firmware_memory", 1536, 0) def test_page_table_ratio(self): self.call_simple_getter("page_table_ratio", 7, 0) def test_default_ppt_ratio(self): self.call_simple_getter("default_ppt_ratio", 4, 6) def test_get_system_name(self): self.wrapper.set_parm_value(ms._SYSTEM_NAME, 'XYZ') name = self.wrapper.system_name self.verify_equal("system_name", name, 'XYZ') self.wrapper.set_parm_value(ms._SYSTEM_NAME, 'ABC') name = self.wrapper.system_name self.verify_equal("system_name", name, 'ABC') def test_max_procs_per_aix_linux_lpar(self): self.call_simple_getter("max_procs_per_aix_linux_lpar", 32, 0) # Test setter self.wrapper.max_procs_per_aix_linux_lpar = 64 self.call_simple_getter("max_procs_per_aix_linux_lpar", 64, 0) # Test fallback condition. Should retrieve max_sys_procs_limit self.wrapper.max_procs_per_aix_linux_lpar = 0 self.call_simple_getter("max_procs_per_aix_linux_lpar", self.wrapper.max_sys_procs_limit, 0) def test_max_vcpus_per_aix_linux_lpar(self): self.call_simple_getter("max_vcpus_per_aix_linux_lpar", 30, 0) # Test setter self.wrapper.max_vcpus_per_aix_linux_lpar = 60 self.call_simple_getter("max_vcpus_per_aix_linux_lpar", 60, 0) # Test fallback condition. Should retrieve max_sys_vcpus_limit self.wrapper.max_vcpus_per_aix_linux_lpar = 0 self.call_simple_getter("max_vcpus_per_aix_linux_lpar", self.wrapper.max_sys_vcpus_limit, 0) def test_vios_links(self): self.call_simple_getter( "vios_links", ('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/caae9209-25e5-' '35cd-a71a-ed55c03f294d/VirtualIOServer/32F3530F-ECA0-4EAA-A37E-' '4B792C21AF70',), ()) def test_asio_wwpns(self): self.assertEqual(self.wrapper.asio_config.avail_wwpns, 65536) def test_asio_wwpn_prefix(self): self.assertEqual(self.wrapper.asio_config.wwpn_prefix, '12379814471884843981') def test_ioslot_bus_grp_required(self): self.assertFalse(self.test_ioslot_unassigned.bus_grp_required) self.assertTrue(self.test_ioslot_assigned.bus_grp_required) def test_ioslot_description(self): self.assertEqual('I/O Processor', self.test_ioslot_unassigned.description) self.assertEqual('I/O Processor', self.test_ioslot_assigned.description) def test_ioslot_feat_codes(self): self.assertEqual(0, self.test_ioslot_unassigned.feat_codes) self.assertIsNone(self.test_ioslot_assigned.feat_codes) def test_ioslot_assignment(self): self.assertIsNone(self.test_ioslot_unassigned.part_id) self.assertIsNone(self.test_ioslot_unassigned.part_uuid) self.assertIsNone(self.test_ioslot_unassigned.part_name) self.assertIsNone(self.test_ioslot_unassigned.part_type) self.assertEqual(2, self.test_ioslot_assigned.part_id) self.assertEqual("02899D96-9D20-490F-8B1B-4D3DEE1210ED", self.test_ioslot_assigned.part_uuid) self.assertEqual("vios1", self.test_ioslot_assigned.part_name) self.assertEqual(bp.LPARType.VIOS, self.test_ioslot_assigned.part_type) def test_ioslot_pci_class(self): self.assertEqual(0x200, self.test_ioslot_unassigned.pci_class) self.assertEqual(0x200, self.test_ioslot_assigned.pci_class) def test_ioslot_pci_dev_id(self): self.assertEqual(0x1234, self.test_ioslot_unassigned.pci_dev_id) self.assertEqual(0x1234, self.test_ioslot_assigned.pci_dev_id) def test_ioslot_pci_subsys_dev_id(self): self.assertEqual(0x04b2, self.test_ioslot_unassigned.pci_subsys_dev_id) self.assertIsNone(self.test_ioslot_assigned.pci_subsys_dev_id) def test_ioslot_pci_rev_id(self): self.assertEqual(0, self.test_ioslot_unassigned.pci_rev_id) self.assertEqual(0, self.test_ioslot_assigned.pci_rev_id) def test_ioslot_pci_vendor_id(self): self.assertEqual(0x1014, self.test_ioslot_unassigned.pci_vendor_id) self.assertEqual(0x1014, self.test_ioslot_assigned.pci_vendor_id) def test_ioslot_pci_subsys_vendor_id(self): self.assertEqual(0x1014, self.test_ioslot_unassigned.pci_subsys_vendor_id) self.assertIsNone(self.test_ioslot_assigned.pci_subsys_vendor_id) def test_ioslot_drc_index(self): self.assertEqual(0x21010011, self.test_ioslot_unassigned.drc_index) self.assertEqual(0x21020011, self.test_ioslot_assigned.drc_index) def test_ioslot_drc_name(self): self.assertEqual('U5294.001.CEC1234-P01-C011', self.test_ioslot_unassigned.drc_name) self.assertEqual('U5294.001.CEC1234-P01-C012', self.test_ioslot_assigned.drc_name) def test_get_capabilities(self): good_cap = {'active_lpar_mobility_capable': True, 'inactive_lpar_mobility_capable': True, 'ibmi_lpar_mobility_capable': True, 'custom_mac_addr_capable': True, 'ibmi_restrictedio_capable': True, 'ibmi_nativeio_capable': False, 'simplified_remote_restart_capable': False, 'aix_capable': False, 'ibmi_capable': True, 'linux_capable': False, 'shared_processor_pool_capable': True, 'active_memory_expansion_capable': True, 'dynamic_srr_capable': True, 'vnic_capable': True, 'vnic_failover_capable': True, 'disable_secure_boot_capable': False, 'physical_page_table_ratio_capable': True, 'ioslot_owner_assignment_capable': True, 'affinity_check_capable': True, 'partition_secure_boot_capable': True, 'dedicated_processor_partition_capable': True} bad_cap = {'active_lpar_mobility_capable': False, 'inactive_lpar_mobility_capable': False, 'ibmi_lpar_mobility_capable': False, 'custom_mac_addr_capable': True, 'ibmi_restrictedio_capable': False, 'ibmi_nativeio_capable': False, 'simplified_remote_restart_capable': False, 'aix_capable': True, 'ibmi_capable': False, 'linux_capable': True, 'shared_processor_pool_capable': False, 'active_memory_expansion_capable': False, 'dynamic_srr_capable': False, 'vnic_capable': False, 'vnic_failover_capable': False, 'disable_secure_boot_capable': False, 'physical_page_table_ratio_capable': False, 'ioslot_owner_assignment_capable': False, 'affinity_check_capable': False, 'partition_secure_boot_capable': False, 'dedicated_processor_partition_capable': True} self.call_simple_getter("get_capabilities", good_cap, bad_cap) def test_session_is_master(self): self.assertTrue(self.wrapper.session_is_master) def test_migration_data(self): expected_data = {'active_lpar_mobility_capable': True, 'inactive_lpar_mobility_capable': True, 'ibmi_lpar_mobility_capable': True, 'custom_mac_addr_capable': True, 'ibmi_restrictedio_capable': True, 'ibmi_nativeio_capable': False, 'simplified_remote_restart_capable': False, 'aix_capable': False, 'ibmi_capable': True, 'linux_capable': False, 'shared_processor_pool_capable': True, 'active_memory_expansion_capable': True, 'physical_page_table_ratio_capable': True, 'partition_secure_boot_capable': True, 'max_migration_ops_supported': 9, 'active_migrations_supported': 0, 'inactive_migrations_supported': 5, 'preferred_active_migrations_supported': 0, 'preferred_inactive_migrations_supported': 5, 'active_migrations_in_progress': 0, 'inactive_migrations_in_progress': 0, 'proc_compat': 'default,POWER5,POWER6,POWER6_Enhanced' ',POWER6_Plus_Enhanced,POWER7', 'dynamic_srr_capable': True, 'vnic_capable': True, 'vnic_failover_capable': True, 'disable_secure_boot_capable': False, 'ioslot_owner_assignment_capable': True, 'affinity_check_capable': True, 'dedicated_processor_partition_capable': True} result_data = self.wrapper.migration_data self.assertEqual(result_data, expected_data, "migration_data returned %s instead of %s" % (result_data, expected_data)) def test_get_metered_pool_id(self): self.call_simple_getter("metered_pool_id", '6689', None) def test_processor_is_throttled(self): self.call_simple_getter("processor_is_throttled", True, False) class TestMTMS(unittest.TestCase): def test_mtms(self): mtms = mtmwrap.MTMS.bld(None, '1234-567*ABCDEF0') self.assertEqual(mtms.machine_type, '1234') self.assertEqual(mtms.model, '567') self.assertEqual(mtms.serial, 'ABCDEF0') self.assertEqual(mtms.mtms_str, '1234-567*ABCDEF0') # Setters mtms.machine_type = '4321' self.assertEqual(mtms.machine_type, '4321') mtms.model = '765' self.assertEqual(mtms.model, '765') mtms.serial = '0FEDCBA' self.assertEqual(mtms.serial, '0FEDCBA') self.assertEqual(mtms.mtms_str, '4321-765*0FEDCBA') if __name__ == "__main__": unittest.main() pypowervm-1.1.24/pypowervm/tests/wrappers/test_entry.py0000664000175000017500000021233713571367171023107 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re import unittest import uuid from lxml import etree import mock import six import testtools import pypowervm.adapter as apt import pypowervm.entities as ent import pypowervm.tests.test_fixtures as fx from pypowervm.tests.test_utils import pvmhttp from pypowervm.tests.test_utils import test_wrapper_abc as twrap import pypowervm.utils.uuid as pvm_uuid import pypowervm.wrappers.cluster as clust import pypowervm.wrappers.entry_wrapper as ewrap import pypowervm.wrappers.iocard as card import pypowervm.wrappers.logical_partition as lpar import pypowervm.wrappers.managed_system as ms import pypowervm.wrappers.network as net import pypowervm.wrappers.storage as stor import pypowervm.wrappers.vios_file as vf import pypowervm.wrappers.virtual_io_server as vios NET_BRIDGE_FILE = 'fake_network_bridge.txt' LPAR_FILE = 'lpar.txt' VIOS_FILE = 'fake_vios_feed.txt' VNETS_FILE = 'nbbr_virtual_network.txt' SYS_VNIC_FILE = 'vnic_feed.txt' SYS_SRIOV_FILE = 'sys_with_sriov.txt' def _assert_clusters_equal(tc, cl1, cl2): tc.assertEqual(cl1.name, cl2.name) tc.assertEqual(cl1.repos_pv.name, cl2.repos_pv.name) tc.assertEqual(cl1.repos_pv.udid, cl2.repos_pv.udid) tc.assertEqual(cl1.nodes[0].hostname, cl2.nodes[0].hostname) class SubWrapper(ewrap.Wrapper): schema_type = 'SubWrapper' _type_and_uuid = 'SubWrapper_TestClass' def __init__(self, **kwargs): class Txt(object): def __init__(self, val): self.text = val super(SubWrapper, self).__init__() self.data = dict((k, Txt(v)) for k, v in six.iteritems(kwargs)) def _find(self, prop_name, use_find_all=False): try: return self.data[prop_name] except KeyError: return None class TestElement(twrap.TestWrapper): file = NET_BRIDGE_FILE wrapper_class_to_test = net.NetBridge def test_child_poaching(self): """Creating an element with children of another existing element. Ensure the existing element remains intact. """ # How many load groups did we start with? num_lg = len(self.dwrap.load_grps) # Create a new element with a load group from the NetBridge as a child. newel = ent.Element('foo', None, children=[self.dwrap.load_grps[0].element]) # Element creation did not poach the load group from the NetBridge. self.assertEqual(num_lg, len(self.dwrap.load_grps)) # pypowervm.entities.Element.append (not to be confused with # etree.Element.append or list.append) should behave the same way. newel.append(self.dwrap.load_grps[1].element) # TODO(IBM): ...but it doesn't. See comment in that method. # self.assertEqual(num_lg, len(self.dwrap.load_grps)) @mock.patch('lxml.etree.tostring') def test_toxmlstring(self, mock_tostring): newel = ent.Element('foo', None) # No args self.assertEqual(mock_tostring.return_value, newel.toxmlstring()) mock_tostring.assert_called_once_with(newel.element) # With kwargs mock_tostring.reset_mock() self.assertEqual(mock_tostring.return_value, newel.toxmlstring( pretty=False)) mock_tostring.assert_called_once_with(newel.element) mock_tostring.reset_mock() self.assertEqual(mock_tostring.return_value, newel.toxmlstring(pretty=True)) mock_tostring.assert_called_once_with(newel.element, pretty_print=True) class TestElementList(twrap.TestWrapper): file = SYS_SRIOV_FILE wrapper_class_to_test = ms.System def setUp(self): super(TestElementList, self).setUp() self.pport = self.dwrap.asio_config.sriov_adapters[0].phys_ports[0] self.tag = 'ConfiguredOptions' def _validate_xml(self, val_list): outer_tag = self.pport.schema_type tag_before = 'ConfiguredMTU' tag_after = 'ConfiguredPortSwitchMode' # Opening tag_pat_fmt = r'<%s(\s[^>]*)?>' elem_pat_fmt = tag_pat_fmt + r'%s\s*' pattern = '.*' pattern += tag_pat_fmt % outer_tag pattern += '.*' pattern += elem_pat_fmt % (tag_before, '[^<]*', tag_before) for val in val_list: pattern += elem_pat_fmt % (self.tag, val, self.tag) pattern += elem_pat_fmt % (tag_after, '[^<]*', tag_after) pattern += '.*' pattern += tag_pat_fmt % ('/' + outer_tag) pattern += '.*' self.assertTrue(re.match(pattern.encode('utf-8'), self.pport.toxmlstring(), flags=re.DOTALL)) def test_everything(self): """Ensure ElementList behaves like a list where implemented.""" # Wrapper._get_elem_list, ElementList.__init__ coel = self.pport._get_elem_list(self.tag) # index self.assertEqual(0, coel.index('autoDuplex')) self.assertEqual(1, coel.index('Veb')) self.assertRaises(ValueError, coel.index, 'foo') # __len__ self.assertEqual(2, len(coel)) # __repr__ self.assertEqual("['autoDuplex', 'Veb']", repr(coel)) # __contains__ self.assertIn('autoDuplex', coel) self.assertIn('Veb', coel) self.assertNotIn('foo', coel) # __str__ self.assertEqual("['autoDuplex', 'Veb']", str(coel)) # __getitem__ self.assertEqual('autoDuplex', coel[0]) self.assertEqual('Veb', coel[1]) self.assertRaises(IndexError, coel.__getitem__, 2) # __setitem__ coel[0] = 'fullDuplex' self.assertEqual('fullDuplex', coel[0]) self.assertRaises(IndexError, coel.__setitem__, 2, 'foo') # append coel.append('foo') self._validate_xml(['fullDuplex', 'Veb', 'foo']) # extend coel.extend(['bar', 'baz']) self._validate_xml(['fullDuplex', 'Veb', 'foo', 'bar', 'baz']) # __delitem__ del coel[3] self._validate_xml(['fullDuplex', 'Veb', 'foo', 'baz']) # remove coel.remove('foo') self._validate_xml(['fullDuplex', 'Veb', 'baz']) # __iter__ self.assertEqual(['fullDuplex', 'Veb', 'baz'], [val for val in coel]) # clear coel.clear() self.assertEqual(0, len(coel)) self._validate_xml([]) # Inserting stuff back in puts it in the right place coel.extend(['one', 'two', 'three']) self._validate_xml(['one', 'two', 'three']) # Wrapper._set_elem_list self.pport._set_elem_list(self.tag, ['four', 'five', 'six']) self._validate_xml(['four', 'five', 'six']) class TestWrapper(unittest.TestCase): def test_get_val_str(self): w = SubWrapper(one='1', foo='foo', empty='') self.assertEqual(w._get_val_str('one'), '1') self.assertEqual(w._get_val_str('foo'), 'foo') self.assertEqual(w._get_val_str('empty'), '') self.assertIsNone(w._get_val_str('nonexistent')) self.assertEqual(w._get_val_str('nonexistent', default='10'), '10') def test_get_val_percent(self): w = SubWrapper(one='2.45%', two='2.45', three=None, four='123', five='1.2345', six='123.0', seven='123%', eight='%123', nine='12%3') self.assertEqual(w._get_val_percent('one'), 0.0245) self.assertEqual(w._get_val_percent('two'), 0.0245) self.assertEqual(w._get_val_percent('three'), None) self.assertEqual(w._get_val_percent('four'), 1.23) self.assertEqual(w._get_val_percent('five'), 0.012345) self.assertEqual(w._get_val_percent('six'), 1.23) self.assertEqual(w._get_val_percent('seven'), 1.23) self.assertEqual(w._get_val_percent('eight'), 1.23) # Interesting test: self.assertEqual(w._get_val_percent('nine'), 0.12) self.assertIsNone(w._get_val_percent('nonexistent')) def test_get_val_int(self): w = SubWrapper(one='1', nan='foo', empty='') self.assertEqual(w._get_val_int('one'), 1) self.assertIsNone(w._get_val_int('nan')) self.assertIsNone(w._get_val_int('empty')) self.assertIsNone(w._get_val_int('nonexistent')) self.assertEqual(w._get_val_int('nonexistent', default=10), 10) def test_get_val_float(self): w = SubWrapper(one='1', two_point_two='2.2', nan='foo', empty='') self.assertAlmostEqual(w._get_val_float('one'), 1) self.assertAlmostEqual(w._get_val_float('two_point_two'), 2.2) self.assertIsNone(w._get_val_float('nan')) self.assertIsNone(w._get_val_float('empty')) self.assertIsNone(w._get_val_float('nonexistent')) self.assertAlmostEqual(w._get_val_float('one', default=2), 1) self.assertAlmostEqual(w._get_val_float('two_point_two', default=3), 2.2) self.assertAlmostEqual(w._get_val_float('nan', default=1), 1) self.assertAlmostEqual(w._get_val_float('empty', default=1), 1) self.assertAlmostEqual(w._get_val_int('nonexistent', default=1.7), 1.7) def test_get_val_bool(self): w = SubWrapper(one='1', t='true', T='TRUE', f='false', F='FALSE', empty='') self.assertTrue(w._get_val_bool('t')) self.assertTrue(w._get_val_bool('T')) self.assertFalse(w._get_val_bool('one')) self.assertFalse(w._get_val_bool('empty')) self.assertFalse(w._get_val_bool('f')) self.assertFalse(w._get_val_bool('F')) self.assertFalse(w._get_val_bool('nonexistent')) self.assertTrue(w._get_val_bool('t', default=False)) self.assertTrue(w._get_val_bool('T', default=False)) self.assertFalse(w._get_val_bool('one', default=True)) self.assertFalse(w._get_val_bool('empty', default=True)) self.assertFalse(w._get_val_bool('f', default=True)) self.assertFalse(w._get_val_bool('F', default=True)) self.assertTrue(w._get_val_bool('nonexistent', default=True)) class TestEntryWrapper(testtools.TestCase): def setUp(self): super(TestEntryWrapper, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt def test_etag(self): fake_entry = ent.Entry({}, ent.Element('fake_entry', self.adpt), self.adpt) etag = '1234' ew = ewrap.EntryWrapper.wrap(fake_entry, etag=etag) self.assertEqual(etag, ew.etag) ew = ewrap.EntryWrapper.wrap(fake_entry) self.assertEqual(None, ew.etag) def test_set_uuid(self): # Test that an AttributeError is raised def set_wrap_uuid(wrap, value): wrap.uuid = value self.assertRaises(AttributeError, set_wrap_uuid, ewrap.EntryWrapper(None), 'fake-uuid-value') # Test that we call the mixin set_uuid method for valid cases. class ValidEntryWrap(ewrap.EntryWrapper, ewrap.WrapperSetUUIDMixin): pass with mock.patch('pypowervm.wrappers.entry_wrapper.WrapperSetUUIDMixin' '.set_uuid') as mock_setup: uuid1 = pvm_uuid.convert_uuid_to_pvm(str(uuid.uuid4())) ValidEntryWrap(None).uuid = uuid1 mock_setup.assert_called_with(uuid1) def test_load(self): etag = '1234' resp = apt.Response('reqmethod', 'reqpath', 'status', 'reason', dict(etag=etag)) # Entry or Feed is not set, so expect an exception self.assertRaises(KeyError, ewrap.EntryWrapper.wrap, resp) # Set an entry... entry = ent.Entry({}, ent.Element('entry', self.adpt), self.adpt) resp.entry = entry # Run ew = ewrap.EntryWrapper.wrap(resp) # Validate self.assertEqual(entry, ew.entry) self.assertEqual(etag, ew.etag) # Create a response with no headers resp2 = apt.Response('reqmethod', 'reqpath', 'status', 'reason', {}) resp2.entry = entry # Run ew = ewrap.EntryWrapper.wrap(resp2) # Validate the etag is None since there were no headers self.assertEqual(None, ew.etag) # Wipe our entry, add feed. resp.entry = None e1 = ent.Entry({'etag': '1'}, ent.Element('e1', self.adpt), self.adpt) e2 = ent.Entry({'etag': '2'}, ent.Element('e2', self.adpt), self.adpt) resp.feed = ent.Feed({}, [e1, e2]) # Run ew = ewrap.EntryWrapper.wrap(resp) # Validate self.assertEqual(e1, ew[0].entry) self.assertEqual('1', ew[0].etag) self.assertEqual(e2, ew[1].entry) self.assertEqual('2', ew[1].etag) @mock.patch('lxml.etree.tostring') def test_toxmlstring(self, mock_tostring): wrp = ewrap.EntryWrapper.wrap(ent.Entry( {}, ent.Element('fake_entry', None), None)) # No args self.assertEqual(mock_tostring.return_value, wrp.toxmlstring()) mock_tostring.assert_called_once_with(wrp.entry.element) # With kwargs mock_tostring.reset_mock() self.assertEqual(mock_tostring.return_value, wrp.toxmlstring( pretty=False)) mock_tostring.assert_called_once_with(wrp.entry.element) mock_tostring.reset_mock() self.assertEqual(mock_tostring.return_value, wrp.toxmlstring( pretty=True)) mock_tostring.assert_called_once_with( wrp.entry.element, pretty_print=True) class TestElementWrapper(testtools.TestCase): """Tests for the ElementWrapper class.""" def setUp(self): super(TestElementWrapper, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt self.resp = pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response() self.nb1 = ewrap.EntryWrapper.wrap(self.resp.feed.entries[0]) self.resp2 = pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response() self.nb2 = ewrap.EntryWrapper.wrap(self.resp2.feed.entries[0]) def test_equality(self): """Validates that two elements loaded from the same data is equal.""" sea1 = self._find_seas(self.nb1.entry)[0] sea2 = self._find_seas(self.nb2.entry)[0] sea2copy = copy.deepcopy(sea2) self.assertTrue(sea1 == sea2) self.assertEqual(sea2, sea2copy) # Change the other SEA sea2.element.element.append(etree.Element('Bob')) self.assertFalse(sea1 == sea2) def test_inequality_by_subelem_change(self): sea1 = self._find_seas(self.nb1.entry)[0] sea2 = self._find_seas(self.nb2.entry)[0] sea_trunk = sea2.element.findall('TrunkAdapters/TrunkAdapter')[0] pvid = sea_trunk.find('PortVLANID') pvid.text = '1' self.assertFalse(sea1 == sea2) def test_unequal(self): sea1 = self._find_seas(self.nb1.entry)[0] sea2 = self._find_seas(self.nb2.entry)[0] self.assertEqual(sea1, sea2) # Different text makes 'em different sea1.element.text = 'Bogus' self.assertNotEqual(sea1, sea2) # reset sea1.element.text = sea2.element.element.text # Different tag makes 'em different sea1.element.tag = 'Bogus' self.assertNotEqual(sea1, sea2) def _find_seas(self, entry): """Wrapper for the SEAs.""" found = entry.element.find('SharedEthernetAdapters') return ewrap.WrapperElemList(found, net.SEA) def test_fresh_element(self): # Default: UOM namespace, no class MyElement(ewrap.ElementWrapper): schema_type = 'SomePowerObject' myel = MyElement._bld(self.adpt) self.assertEqual(myel.schema_type, 'SomePowerObject') self.assertEqual(myel.element.toxmlstring(), ''.encode("utf-8")) # Can override namespace and attrs and trigger inclusion of class MyElement3(ewrap.ElementWrapper): schema_type = 'SomePowerObject' default_attrib = {'foo': 'bar'} schema_ns = 'baz' has_metadata = True myel = MyElement3._bld(self.adpt) self.assertEqual( myel.element.toxmlstring(), '' ''.encode("utf-8")) # Same thing, but via the decorator @ewrap.ElementWrapper.pvm_type('SomePowerObject', has_metadata=True, ns='baz', attrib={'foo': 'bar'}) class MyElement4(ewrap.ElementWrapper): pass myel = MyElement4._bld(self.adpt) self.assertEqual( myel.element.toxmlstring(), '' ''.encode("utf-8")) # Now 'SomePowerObject' is registered. Prove that we can use wrap() to # instantiate MyElement4 straight from ElementWrapper. el = ent.Element('SomePowerObject', self.adpt, ns='baz', attrib={'foo': 'bar'}) w = ewrap.ElementWrapper.wrap(el) self.assertIsInstance(w, MyElement4) def test_href(self): path = 'LoadGroups/LoadGroup/VirtualNetworks/link' # Get all hrefs = self.nb1.get_href(path) self.assertEqual(len(hrefs), 13) self.assertEqual( hrefs[2], 'https://9.1.2.3:12443/rest/api/uom/ManagedSystem/726e9cb3-6576-3d' 'f5-ab60-40893d51d074/VirtualNetwork/2b4ab8ea-4b15-3430-b2cd-45954' 'cfaba0d') # Request one - should return None hrefs = self.nb1.get_href(path, one_result=True) self.assertIsNone(hrefs) # set_href should refuse to set multiple links self.assertRaises(ValueError, self.nb1.set_href, path, 'foo') # Drill down to the (only) SEA sea = ewrap.ElementWrapper.wrap( self.nb1._find('SharedEthernetAdapters/SharedEthernetAdapter')) path = 'AssignedVirtualIOServer' hrefs = sea.get_href(path) self.assertEqual(len(hrefs), 1) self.assertEqual( hrefs[0], 'https://9.1.2.3:12443/rest/api/uom/ManagedSystem/726e9cb3-6576-3d' 'f5-ab60-40893d51d074/VirtualIOServer/691019AF-506A-4896-AADE-607E' '21FA93EE') # Now make sure one_result returns the string (not a list) href = sea.get_href(path, one_result=True) self.assertEqual( href, 'https://9.1.2.3:12443/rest/api/uom/ManagedSystem/726e9cb3-6576-3d' 'f5-ab60-40893d51d074/VirtualIOServer/691019AF-506A-4896-AADE-607E' '21FA93EE') # Test setter sea.set_href(path, 'foo') self.assertEqual(sea.get_href(path, one_result=True), 'foo') # Now try setting one that doesn't exist. First on a top-level path. path = 'NewElement' sea.set_href(path, 'bar') self.assertEqual(sea.get_href(path, one_result=True), 'bar') # ...and now on a nested path. path = 'BackingDeviceChoice/EthernetBackingDevice/NewLink' sea.set_href(path, 'baz') self.assertEqual(sea.get_href(path, one_result=True), 'baz') def _verify_element_clone(self, el1, el2): # Equal according to _element_equality self.assertEqual(el1, el2) # Not the same reference self.assertIsNot(el1, el2) # Adapter references are the same self.assertIs(el1.adapter, el2.adapter) # etree.Elements are not the same reference self.assertIsNot(el1.element, el2.element) # But they marshal identically self.assertEqual(el1.toxmlstring().strip(), el2.toxmlstring().strip()) def test_element_clone(self): el1 = self.nb1.element el2 = copy.deepcopy(el1) self._verify_element_clone(el1, el2) def _verify_properties_clone(self, props1, props2): # Properties should be deeply equal self.assertEqual(props1, props2) # But not the same reference self.assertIsNot(props1, props2) # Strings are shared copy-on-write. Ensure changing one does not # change the other props1['id'] = 'abc' self.assertNotEqual(props1['id'], props2['id']) def _verify_entry_clone(self, ent1, ent2): # Elements should be cloned to the same spec as Element.__deepcopy__() self._verify_element_clone(ent1.element, ent2.element) self._verify_properties_clone(ent1.properties, ent2.properties) # Ensure deep copy - sub-properties also not the same reference. links1 = ent1.properties['links'] links2 = ent2.properties['links'] self.assertIsNot(links1, links2) # And one more layer down self.assertIsNot(links1['SELF'], links2['SELF']) def test_entry_clone(self): ent1 = self.nb1.entry ent2 = copy.deepcopy(ent1) self._verify_entry_clone(ent1, ent2) def _verify_feed_clone(self, feed1, feed2): self._verify_properties_clone(feed1.properties, feed2.properties) self.assertEqual(len(feed1.entries), len(feed2.entries)) for ent1, ent2 in zip(feed1.entries, feed2.entries): self._verify_entry_clone(ent1, ent2) def test_feed_clone(self): feed1 = self.resp.feed feed2 = copy.deepcopy(feed1) self._verify_feed_clone(feed1, feed2) def _verify_response_clone(self, resp1, resp2): for attr in ('reqmethod', 'reqpath', 'reqheaders', 'reqbody', 'status', 'reason', 'headers', 'body', 'adapter'): self.assertEqual(getattr(resp1, attr), getattr(resp2, attr)) self.assertIsNot(resp1.headers, resp2.headers) self.assertIs(resp1.adapter, resp2.adapter) if resp1.feed is None: self.assertIsNone(resp2.feed) else: self._verify_feed_clone(resp1.feed, resp2.feed) if resp1.entry is None: self.assertIsNone(resp2.entry) else: self._verify_entry_clone(resp1.entry, resp2.entry) def test_response_clone(self): # Network Bridge Response has a feed resp1 = self.resp resp2 = copy.deepcopy(resp1) self._verify_response_clone(resp1, resp2) # This one has entry resp3 = pvmhttp.load_pvm_resp( 'get_volume_group_no_rep.txt').get_response() resp4 = copy.deepcopy(resp3) self._verify_response_clone(resp3, resp4) def test_entrywrapper_clone(self): ew1 = self.nb1 ew2 = copy.deepcopy(ew1) # Entrys should be cloned to the same spec as Entry.__deepcopy__() self._verify_entry_clone(ew1.entry, ew2.entry) # Etags should match self.assertEqual(ew1.etag, ew2.etag) # But changing one should not change the other ew1._etag = 'abc' self.assertNotEqual(ew1.etag, ew2.etag) def test_elementwrapper_clone(self): ew1 = self.nb1.seas[0] ew2 = copy.deepcopy(ew1) # Elements should be cloned to the same spec as Element.__deepcopy__() self._verify_element_clone(ew1.element, ew2.element) class TestWrapperElemList(testtools.TestCase): """Tests for the WrapperElemList class.""" def setUp(self): super(TestWrapperElemList, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt # No indirect self.seas_wel = net.NetBridge.wrap(pvmhttp.load_pvm_resp( NET_BRIDGE_FILE).get_response())[0].seas # With indirect self.backdev_wel = card.VNIC.wrap(pvmhttp.load_pvm_resp( SYS_VNIC_FILE).get_response())[0].back_devs def test_get(self): self.assertIsInstance(self.seas_wel[0], net.SEA) self.assertRaises(IndexError, lambda a, i: a[i], self.seas_wel, 2) # Works with indirect self.assertIsInstance(self.backdev_wel[0], card.VNICBackDev) self.assertRaises(IndexError, lambda a, i: a[i], self.backdev_wel, 2) def test_length(self): self.assertEqual(2, len(self.seas_wel)) self.assertEqual(2, len(self.backdev_wel)) def test_append(self): sea_add = ewrap.ElementWrapper.wrap( ent.Element('SharedEthernetAdapter', self.adpt)) self.assertEqual(2, len(self.seas_wel)) # Test Append self.seas_wel.append(sea_add) self.assertEqual(3, len(self.seas_wel)) # Appending to indirect backdev = copy.deepcopy(self.backdev_wel[0]) self.backdev_wel.append(backdev) self.assertEqual(3, len(self.backdev_wel)) # Make sure we can also remove what was just added. self.seas_wel.remove(sea_add) self.assertEqual(2, len(self.seas_wel)) # Removing from indirect self.backdev_wel.remove(backdev) self.assertEqual(2, len(self.backdev_wel)) def test_extend(self): seas = [ ewrap.ElementWrapper.wrap(ent.Element('SharedEthernetAdapter', self.adpt)), ewrap.ElementWrapper.wrap(ent.Element('SharedEthernetAdapter', self.adpt)) ] self.assertEqual(2, len(self.seas_wel)) self.seas_wel.extend(seas) self.assertEqual(4, len(self.seas_wel)) self.adpt.build_href.return_value = 'href' # Extending indirect backdevs = [card.VNICBackDev.bld(self.adpt, 'vios_uuid', 1, 2), card.VNICBackDev.bld(self.adpt, 'vios_uuid', 3, 4)] self.backdev_wel.extend(backdevs) self.assertEqual(4, len(self.backdev_wel)) # Make sure that we can also remove what we added. We remove a # logically identical element to test the equivalence function e = ewrap.ElementWrapper.wrap(ent.Element('SharedEthernetAdapter', self.adpt)) self.seas_wel.remove(e) self.seas_wel.remove(e) self.assertEqual(2, len(self.seas_wel)) # With indirect self.backdev_wel.remove(card.VNICBackDev.bld(self.adpt, 'vios_uuid', 1, 2)) self.assertEqual(3, len(self.backdev_wel)) # Non-equivalent one doesn't work self.assertRaises(ValueError, self.backdev_wel.remove, card.VNICBackDev.bld(self.adpt, 'vios_uuid', 1, 3)) def test_in(self): # This really does fail without __contains__ self.assertIn(self.seas_wel[0], self.seas_wel) # Works with indirect self.assertIn(self.backdev_wel[0], self.backdev_wel) def test_index(self): self.assertEqual(self.seas_wel.index(self.seas_wel[0]), 0) # Works with indirect self.assertEqual(self.backdev_wel.index(self.backdev_wel[0]), 0) def test_str(self): strout = str(self.seas_wel) self.assertEqual('[', strout[0]) self.assertEqual(']', strout[-1]) for chunk in strout.split(','): self.assertIn('SEA', chunk) # And for indirect strout = str(self.backdev_wel) self.assertEqual('[', strout[0]) self.assertEqual(']', strout[-1]) for chunk in strout.split(','): self.assertIn('VNIC', chunk) def test_repr(self): strout = repr(self.seas_wel) self.assertEqual('[', strout[0]) self.assertEqual(']', strout[-1]) for chunk in strout.split(','): self.assertIn('SEA', chunk) # And for indirect strout = repr(self.backdev_wel) self.assertEqual('[', strout[0]) self.assertEqual(']', strout[-1]) for chunk in strout.split(','): self.assertIn('VNIC', chunk) class TestActionableList(unittest.TestCase): """Tests for the Actionable List class.""" def test_extend(self): def test(new_list): self.assertEqual([1, 2, 3, 4, 5], new_list) l = ewrap.ActionableList([1, 2, 3], test) # Extend here. l.extend([4, 5]) self.assertEqual(5, len(l)) self.assertEqual(5, l[4]) def test_append(self): def test(new_list): self.assertEqual([1, 2, 3, 4], new_list) l = ewrap.ActionableList([1, 2, 3], test) # Append here. l.append(4) self.assertEqual(4, len(l)) self.assertEqual(4, l[3]) def test_remove(self): def test(new_list): self.assertEqual([1, 3], new_list) l = ewrap.ActionableList([1, 2, 3], test) # Remove here. l.remove(2) self.assertEqual(2, len(l)) self.assertEqual(3, l[1]) def test_insert(self): def test(new_list): self.assertEqual([1, 2, 3, 4], new_list) l = ewrap.ActionableList([1, 2, 3], test) # Insert here. l.insert(3, 4) self.assertEqual(4, len(l)) self.assertEqual(4, l[3]) def test_pop(self): def test(new_list): self.assertEqual([1, 2], new_list) l = ewrap.ActionableList([1, 2, 3], test) # Pop here. l.pop(2) self.assertEqual(2, len(l)) self.assertEqual(2, l[1]) def test_complex_path(self): function = mock.MagicMock() l = ewrap.ActionableList([1, 2, 3], function) self.assertEqual(3, len(l)) self.assertEqual(3, l[2]) # Try extending l.extend([4, 5]) self.assertEqual(5, len(l)) self.assertEqual(5, l[4]) # Try appending l.append(6) self.assertEqual(6, len(l)) self.assertEqual(6, l[5]) # Try removing l.remove(6) self.assertEqual(5, len(l)) self.assertEqual(5, l[4]) # Try inserting l.insert(5, 6) self.assertEqual(6, len(l)) self.assertEqual(6, l[5]) # Try popping self.assertEqual(6, l.pop(5)) self.assertEqual(5, len(l)) self.assertEqual(5, l[4]) # Make sure our function was called each time self.assertEqual(5, function.call_count) class TestGet(testtools.TestCase): """Tests for EntryWrapper.get().""" def setUp(self): super(TestGet, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt @mock.patch('pypowervm.wrappers.logical_partition.LPAR.wrap') def test_get_root(self, mock_wrap): """Various permutations of EntryWrapper.get on a ROOT object.""" # Happy path - feed. Ensure misc args are passed through. lpar.LPAR.get(self.adpt, foo='bar', baz=123) self.adpt.read.assert_called_with(lpar.LPAR.schema_type, foo='bar', baz=123) mock_wrap.assert_called_with(self.adpt.read.return_value) mock_wrap.reset_mock() # Happy path - entry with 'uuid' lpar.LPAR.get(self.adpt, uuid='123') self.adpt.read.assert_called_with(lpar.LPAR.schema_type, root_id='123') mock_wrap.assert_called_with(self.adpt.read.return_value) mock_wrap.reset_mock() # Happy path - entry with 'root_id' lpar.LPAR.get(self.adpt, root_id='123') self.adpt.read.assert_called_with(lpar.LPAR.schema_type, root_id='123') mock_wrap.assert_called_with(self.adpt.read.return_value) mock_wrap.reset_mock() @mock.patch('pypowervm.wrappers.network.CNA.wrap') def test_get_child(self, mock_wrap): """Various permutations of EntryWrapper.get on a CHILD object.""" # Happy path - feed. Parent specified as class net.CNA.get(self.adpt, parent_type=lpar.LPAR, parent_uuid='123') self.adpt.read.assert_called_with(lpar.LPAR.schema_type, root_id='123', child_type=net.CNA.schema_type) mock_wrap.assert_called_with(self.adpt.read.return_value) mock_wrap.reset_mock() # Happy path - entry with 'uuid'. Parent specified as string. net.CNA.get( self.adpt, parent_type=lpar.LPAR.schema_type, parent_uuid='123', uuid='456') self.adpt.read.assert_called_with( lpar.LPAR.schema_type, root_id='123', child_type=net.CNA.schema_type, child_id='456') mock_wrap.assert_called_with(self.adpt.read.return_value) mock_wrap.reset_mock() # Happy path - entry with 'child_id'. Parent specified as instance. parent = mock.Mock(spec=lpar.LPAR, schema_type=lpar.LPAR.schema_type, uuid='123') net.CNA.get(self.adpt, parent=parent, child_id='456') self.adpt.read.assert_called_with( lpar.LPAR.schema_type, root_id='123', child_type=net.CNA.schema_type, child_id='456') mock_wrap.assert_called_with(self.adpt.read.return_value) mock_wrap.reset_mock() @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.wrap') def test_get_errors(self, mock_wrap): """Error paths in EntryWrapper.get.""" # parent_type specified, parent_uuid not. self.assertRaises(ValueError, net.CNA.get, self.adpt, parent_type=lpar.LPAR) # CHILD mode forbids 'root_id' (must use 'parent_uuid'). self.assertRaises(ValueError, net.CNA.get, self.adpt, parent_type=lpar.LPAR, parent_uuid='1', root_id='2') # CHILD mode can't have both 'uuid' and 'child_id'. self.assertRaises(ValueError, net.CNA.get, self.adpt, parent_type=lpar.LPAR, parent_uuid='12', uuid='34', child_id='56') # ROOT mode forbids parent_uuid. self.assertRaises(ValueError, lpar.LPAR.get, self.adpt, parent_uuid='123') # ROOT mode forbids child_type. self.assertRaises(ValueError, lpar.LPAR.get, self.adpt, child_type=net.CNA) # ROOT mode forbids child_id. self.assertRaises(ValueError, lpar.LPAR.get, self.adpt, child_id='123') # ROOT mode can't have both 'uuid' and 'root_id'. self.assertRaises(ValueError, lpar.LPAR.get, self.adpt, uuid='12', root_id='34') # Nothing was ever wrapped mock_wrap.assert_not_called() @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.wrap') def test_get_by_href(self, mock_wrap): self.assertEqual( mock_wrap.return_value, ewrap.EntryWrapper.get_by_href(self.adpt, 'href', one=2, three=4)) self.adpt.read_by_href.assert_called_once_with('href', one=2, three=4) class TestSearch(testtools.TestCase): """Tests for EntryWrapper.search().""" def setUp(self): super(TestSearch, self).setUp() self.adp = apt.Adapter(self.useFixture(fx.SessionFx()).sess) def _validate_request(self, path, *feedcontents): def validate_request(meth, _path, *args, **kwargs): self.assertTrue(_path.endswith(path)) resp = apt.Response('meth', 'path', 'status', 'reason', {}, reqheaders={'Accept': ''}) resp.feed = ent.Feed({}, feedcontents) return resp return validate_request @mock.patch('pypowervm.adapter.Adapter._request') def test_good(self, mock_rq): def validate_result(clwrap): self.assertIsInstance(clwrap, clust.Cluster) self.assertEqual(clwrap.name, 'cl1') self.assertEqual(clwrap.repos_pv.name, 'hdisk1') self.assertEqual(clwrap.nodes[0].hostname, 'vios1') mock_rq.side_effect = self._validate_request( "/rest/api/uom/Cluster/search/(ClusterName=='cl1')?group=None", clust.Cluster.bld(self.adp, 'cl1', stor.PV.bld(self.adp, 'hdisk1', 'udid1'), clust.Node.bld( self.adp, hostname='vios1')).entry) clwraps = clust.Cluster.search(self.adp, name='cl1') self.assertEqual(len(clwraps), 1) validate_result(clwraps[0]) # Test one_result on a registered key with a single hit validate_result(clust.Cluster.search(self.adp, one_result=True, name='cl1')) @mock.patch('pypowervm.adapter.Adapter._request') def test_negate(self, mock_rq): mock_rq.side_effect = self._validate_request( "/rest/api/uom/Cluster/search/(ClusterName!='cl1')?group=None") clwraps = clust.Cluster.search(self.adp, negate=True, name='cl1') self.assertEqual(clwraps, []) # Test one_result with no hits self.assertIsNone(clust.Cluster.search(self.adp, negate=True, one_result=True, name='cl1')) def test_no_such_search_key(self): """Ensure an invalid search key gives ValueError.""" self.assertRaises(ValueError, clust.Cluster.search, self.adp, foo='bar') @mock.patch('pypowervm.adapter.Adapter._request') def test_quote(self, mock_rq): """Ensure special chars in the search value are properly encoded.""" mock_rq.side_effect = self._validate_request( "/rest/api/uom/Cluster/search/(ClusterName==" "'%3B%2F%3F%3A%40%20%26%3D%2B%24%2C')?group=None") clust.Cluster.search(self.adp, name=';/?:@ &=+$,') @mock.patch('pypowervm.adapter.Adapter.read') def test_search_by_feed(self, mock_read): """Test a search key that's not in search_keys.""" def validate_read(root_type, xag): # This should be called by _search_by_feed, not by search. # Otherwise, we'll get an exception on the arg list. self.assertEqual(net.NetBridge.schema_type, root_type) self.assertIsNone(xag) return pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response() mock_read.side_effect = validate_read # vswitch_id is particularly cool because it's not just a top-level # child element - it dives into the SEAs, finds the primary trunk # adapter, and returns the vswitch ID from there. rets = net.NetBridge.search(self.adp, vswitch_id=0) self.assertEqual(1, len(rets)) nb = rets[0] self.assertIsInstance(nb, net.NetBridge) self.assertEqual('d648eb60-4d39-34ad-ae2b-928d8c9577ad', nb.uuid) # Test one_result down the no-search-key path nb = net.NetBridge.search(self.adp, one_result=True, vswitch_id=0) self.assertEqual('d648eb60-4d39-34ad-ae2b-928d8c9577ad', nb.uuid) # Now do a search that returns more than one item. # Use a string for an int field to prove it works anyway. rets = net.NetBridge.search(self.adp, pvid='1') self.assertEqual(2, len(rets)) self.assertIsInstance(rets[0], net.NetBridge) self.assertIsInstance(rets[1], net.NetBridge) self.assertEqual({'d648eb60-4d39-34ad-ae2b-928d8c9577ad', '764f3423-04c5-3b96-95a3-4764065400bd'}, {nb.uuid for nb in rets}) # Ensure one_result returns the first hit self.assertEqual(rets[0].uuid, net.NetBridge.search( self.adp, one_result=True, pvid=1).uuid) @mock.patch('pypowervm.adapter.Adapter.read') def test_search_with_xag(self, mock_read): """Test a search key that's in search_keys, but specifying xag.""" def validate_read(root_type, xag): # This should be called by _search_by_feed, not by search. # Otherwise, we'll get an exception on the arg list. self.assertEqual(lpar.LPAR.schema_type, root_type) self.assertEqual(['Foo', 'Bar'], xag) return pvmhttp.load_pvm_resp(LPAR_FILE).get_response() mock_read.side_effect = validate_read rets = lpar.LPAR.search(self.adp, name='linux1', xag=['Foo', 'Bar']) self.assertEqual(1, len(rets)) linux1 = rets[0] self.assertIsInstance(linux1, lpar.LPAR) self.assertEqual('9068B0FB-1CF0-4D23-8A23-31AC87D5F5D2', linux1.uuid) @mock.patch('pypowervm.adapter.Adapter.read') def test_child_no_search_key(self, mock_read): """CHILD search with or without a search key (uses GET-feed-loop).""" def validate_read(root_type, root_id, child_type, xag): # This should be called by _search_by_feed, not by search. # Otherwise, we'll get an exception on the arg list. self.assertEqual('SomeParent', root_type) self.assertEqual('some_uuid', root_id) self.assertEqual('Cluster', child_type) self.assertIsNone(xag) return pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response() mock_read.side_effect = validate_read clust.Cluster.search(self.adp, id=0, parent_type='SomeParent', parent_uuid='some_uuid') clust.Cluster.search(self.adp, name='mycluster', parent_type='SomeParent', parent_uuid='some_uuid') def test_child_bad_args(self): """Specifying parent_uuid without parent_type is an error.""" self.assertRaises(ValueError, net.NetBridge.search, self.adp, vswitch_id=0, parent_uuid='some_uuid') @mock.patch('pypowervm.adapter.Adapter.read') def test_search_all_parents(self, mock_read): """Anonymous ROOT for CHILD search.""" # We're going to pretend that VSwitch is a CHILD of VirtualIOServer. parent_type = 'VirtualIOServer' child_schema_type = 'VirtualNetwork' # Anonymous CHILD search should call GET(ROOT feed), followed by one # GET(CHILD feed) for each ROOT parent. Choosing a feed file with two # entries. The following mock_reads are chained in order. def validate_feed_get(root): # Chain to the first entry GET mock_read.side_effect = validate_entry_get1 self.assertEqual(parent_type, root) # VIOS_FILE has two s. return pvmhttp.load_pvm_resp(VIOS_FILE).get_response() def validate_entry_get1(root, root_id, child_type, **kwargs): # Chain to the second entry GET mock_read.side_effect = validate_entry_get2 self.assertEqual(parent_type, root) self.assertEqual('1300C76F-9814-4A4D-B1F0-5B69352A7DEA', root_id) self.assertEqual(child_schema_type, child_type) entry_resp = pvmhttp.load_pvm_resp(VNETS_FILE).get_response() # Use the first half of the feed, which contains two tagged vlans entry_resp.feed.entries = entry_resp.feed.entries[:4] return entry_resp def validate_entry_get2(root, root_id, child_type, **kwargs): self.assertEqual(parent_type, root) self.assertEqual('7DBBE705-E4C4-4458-8223-3EBE07015CA9', root_id) self.assertEqual(child_schema_type, child_type) entry_resp = pvmhttp.load_pvm_resp(VNETS_FILE).get_response() # Use the second half of the feed, which contains two tagged vlans entry_resp.feed.entries = entry_resp.feed.entries[4:] return entry_resp # Set up the first mock_read in the chain mock_read.side_effect = validate_feed_get # Do the search (with class as parent_type) wraps = net.VNet.search(self.adp, parent_type=vios.VIOS, tagged=True) # Make sure we got the right networks self.assertEqual(4, len(wraps)) for wrap, expected_vlanid in zip(wraps, (1234, 2, 1001, 1000)): self.assertIsInstance(wrap, net.VNet) self.assertTrue(wrap.tagged) self.assertEqual(expected_vlanid, wrap.vlan) @mock.patch('pypowervm.adapter.Adapter.read') def test_child_with_parent_spec(self, mock_read): """Test CHILD search using a parent instance.""" def validate_read(root_type, root_id, child_type, xag): self.assertEqual('st', root_type) self.assertEqual('uuid', root_id) self.assertEqual('Cluster', child_type) self.assertIsNone(xag) return pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response() mock_read.side_effect = validate_read parent = mock.Mock(spec=clust.Cluster, schema_type='st', uuid='uuid') clust.Cluster.search(self.adp, id=0, parent=parent) class TestRefresh(testtools.TestCase): """Tests for Adapter.refresh().""" clust_uuid = 'cluster_uuid' clust_href = 'https://server:12443/rest/api/uom/Cluster' + clust_uuid def setUp(self): super(TestRefresh, self).setUp() self.adp = apt.Adapter(mock.patch('requests.Session')) props = {'id': self.clust_uuid, 'links': {'SELF': [self.clust_href]}} self.old_etag = '123' self.clust_old = clust.Cluster.bld( self.adp, 'mycluster', stor.PV.bld(self.adp, 'hdisk1', 'udid1'), clust.Node.bld(self.adp, 'hostname1')) self.clust_old._etag = None self.clust_old.entry.properties = props self.new_etag = '456' self.clust_new = clust.Cluster.bld( self.adp, 'mycluster', stor.PV.bld(self.adp, 'hdisk2', 'udid2'), clust.Node.bld(self.adp, 'hostname2')) self.clust_new._etag = self.new_etag self.clust_new.entry.properties = props self.resp304 = apt.Response( 'meth', 'path', 304, 'reason', {'etag': self.old_etag}) self.resp200old = apt.Response( 'meth', 'path', 200, 'reason', {'etag': self.old_etag}) self.resp200old.entry = self.clust_old.entry self.resp200new = apt.Response( 'meth', 'path', 200, 'reason', {'etag': self.new_etag}) self.resp200new.entry = self.clust_new.entry def _mock_read_by_href(self, in_etag, out_resp): def read_by_href(href, etag, *args, **kwargs): self.assertEqual(href, self.clust_href) self.assertEqual(etag, in_etag) return out_resp return read_by_href @mock.patch('pypowervm.adapter.Adapter.read_by_href') def test_no_etag(self, mock_read): mock_read.side_effect = self._mock_read_by_href( None, self.resp200old) clust_old_save = copy.deepcopy(self.clust_old) clust_refreshed = self.clust_old.refresh() _assert_clusters_equal(self, clust_old_save, clust_refreshed) @mock.patch('pypowervm.adapter.Adapter.read_by_href') def test_etag_match(self, mock_read): mock_read.side_effect = self._mock_read_by_href( self.old_etag, self.resp304) self.clust_old._etag = self.old_etag clust_refreshed = self.clust_old.refresh() # On an etag match, refresh should return the same instance self.assertEqual(self.clust_old, clust_refreshed) @mock.patch('pypowervm.adapter.Adapter.read_by_href') def test_etag_no_match(self, mock_read): mock_read.side_effect = self._mock_read_by_href( self.old_etag, self.resp200new) self.clust_old._etag = self.old_etag clust_new_save = copy.deepcopy(self.clust_new) clust_refreshed = self.clust_old.refresh() _assert_clusters_equal(self, clust_new_save, clust_refreshed) @mock.patch('pypowervm.adapter.Adapter.read_by_href') def test_use_etag_false(self, mock_read): mock_read.side_effect = self._mock_read_by_href( None, self.resp200new) self.clust_old._etag = self.old_etag clust_new_save = copy.deepcopy(self.clust_new) clust_refreshed = self.clust_old.refresh(use_etag=False) _assert_clusters_equal(self, clust_new_save, clust_refreshed) class TestUpdate(testtools.TestCase): clust_uuid = 'cluster_uuid' clust_path = '/rest/api/uom/Cluster/' + clust_uuid clust_href = 'https://server:12443' + clust_path clust_etag = '123' def setUp(self): super(TestUpdate, self).setUp() self.adp = apt.Adapter(self.useFixture(fx.SessionFx()).sess) props = {'id': self.clust_uuid, 'links': {'SELF': [self.clust_href]}} self.cl = clust.Cluster.bld( self.adp, 'mycluster', stor.PV.bld( self.adp, 'hdisk1', udid='udid1'), clust.Node.bld(self.adp, 'hostname1')) self.cl._etag = self.clust_etag self.cl.entry.properties = props @mock.patch('pypowervm.adapter.Adapter.update_by_path') def test_update(self, mock_ubp): new_etag = '456' resp = apt.Response('meth', 'path', 200, 'reason', {'etag': new_etag}) resp.entry = self.cl.entry mock_ubp.return_value = resp newcl = self.cl.update() mock_ubp.assert_called_with( self.cl, self.clust_etag, self.clust_path, timeout=3600) _assert_clusters_equal(self, self.cl, newcl) self.assertEqual(newcl.etag, new_etag) @mock.patch('pypowervm.adapter.Adapter.update_by_path') def test_force_update(self, mock_ubp): # Update the entry with the new properties get_href = self.clust_href + "?group=None" props = {'id': self.clust_uuid, 'links': {'SELF': [get_href]}} self.cl.entry.properties = props new_etag = '456' resp = apt.Response('meth', 'path', 200, 'reason', {'etag': new_etag}) resp.entry = self.cl.entry mock_ubp.return_value = resp newcl = self.cl.update(force=True) mock_ubp.assert_called_with( self.cl, self.clust_etag, self.clust_path + '?group=None&force=true', timeout=3600) _assert_clusters_equal(self, self.cl, newcl) self.assertEqual(newcl.etag, new_etag) @mock.patch('pypowervm.adapter.Adapter.update_by_path') @mock.patch('warnings.warn') def test_update_xag(self, mock_warn, mock_ubp): new_etag = '456' resp = apt.Response('meth', 'path', 200, 'reason', {'etag': new_etag}) resp.entry = self.cl.entry mock_ubp.return_value = resp newcl = self.cl.update(xag=['one', 'two', 'three'], timeout=123) mock_ubp.assert_called_with( self.cl, self.clust_etag, self.clust_path, timeout=123) _assert_clusters_equal(self, self.cl, newcl) self.assertEqual(newcl.etag, new_etag) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) @mock.patch('pypowervm.adapter.Adapter.update_by_path') @mock.patch('warnings.warn') def test_update_with_get_xag(self, mock_warn, mock_ubp): # Update the entry with the new properties get_href = self.clust_href + "?group=one,three,two" props = {'id': self.clust_uuid, 'links': {'SELF': [get_href]}} self.cl.entry.properties = props new_etag = '456' resp = apt.Response('meth', 'path', 200, 'reason', {'etag': new_etag}) resp.entry = self.cl.entry mock_ubp.return_value = resp newcl = self.cl.update(xag=['should', 'be', 'ignored'], timeout=-1) mock_ubp.assert_called_with( self.cl, self.clust_etag, self.clust_path + '?group=one,three,two', timeout=3600) _assert_clusters_equal(self, self.cl, newcl) self.assertEqual(newcl.etag, new_etag) mock_warn.assert_called_with(mock.ANY, DeprecationWarning) class TestDelete(testtools.TestCase): def setUp(self): super(TestDelete, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt def test_delete(self): vswitch = net.VSwitch.bld(self.adpt, 'a_switch') vswitch.entry = mock.MagicMock() vswitch.entry.href = 'test' vswitch.entry.etag = 5 def validate_delete(uri, etag): self.assertEqual('test', uri) self.assertEqual(5, etag) return self.adpt.delete_by_href.side_effect = validate_delete vswitch.delete() class TestCreate(testtools.TestCase): def setUp(self): super(TestCreate, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt def test_create_root(self): vswitch = net.VSwitch.bld(self.adpt, 'a_switch') def validate_create(element, root_type, service, timeout=-1): self.assertIsInstance(element, net.VSwitch) self.assertEqual(net.VSwitch.schema_type, root_type) self.assertEqual('uom', service) self.assertEqual(123, timeout) return vswitch.entry self.adpt.create.side_effect = validate_create vswitch.create(timeout=123) def test_create_child(self): # We can safely pretend VSwitch is a child for purposes of this test. vswitch = net.VSwitch.bld(self.adpt, 'a_switch') def validate_create(element, root_type, root_id, child_type, service, timeout=456): self.assertIsInstance(element, net.VSwitch) self.assertEqual(net.VSwitch.schema_type, child_type) self.assertEqual('NetworkBridge', root_type) self.assertEqual('SomeUUID', root_id) self.assertEqual('uom', service) self.assertEqual(-1, timeout) return vswitch.entry self.adpt.create.side_effect = validate_create # Make sure it works when parent_type is a class... vswitch.create(parent_type=net.NetBridge, parent_uuid='SomeUUID') # ...or a string vswitch.create(parent_type='NetworkBridge', parent_uuid='SomeUUID') # Or an instance parent = mock.Mock(spec=net.NetBridge, schema_type='NetworkBridge', uuid='SomeUUID') vswitch.create(parent=parent) def test_create_other_service(self): """Ensure non-UOM service goes through.""" vfile = vf.File.bld(self.adpt, "filename", "filetype", "vios_uuid") def validate_create(element, root_type, service, timeout=345): self.assertIsInstance(element, vf.File) self.assertEqual(vf.File.schema_type, root_type) self.assertEqual('web', service) self.assertEqual(-1, timeout) return vfile.entry self.adpt.create.side_effect = validate_create vfile.create(timeout=-1) def test_create_raises(self): """Verify invalid inputs raise exceptions.""" vswitch = net.VSwitch.bld(self.adpt, 'a_switch') self.assertRaises(ValueError, vswitch.create, parent_type='Foo') self.assertRaises(ValueError, vswitch.create, parent_uuid='Foo') class TestSetUUIDMixin(testtools.TestCase): """Generic tests for WrapperSetUUIDMixin.""" def test_set_uuid(self): """Test mixins of Element (with/without Metadata) and Entry.""" old_uuid = pvm_uuid.convert_uuid_to_pvm(str(uuid.uuid4())) new_uuid = pvm_uuid.convert_uuid_to_pvm(str(uuid.uuid4())) def set_wrap_uuid(wrap, value): wrap.uuid = value def assert_uuid(wrap, uuid, has_entry=True): if has_entry: self.assertEqual(uuid, wrap.uuid) # Same as above if uuid is None: self.assertTrue('id' not in wrap.entry.properties or wrap.entry.properties['id'] is None) else: self.assertEqual(uuid, wrap.entry.properties['id']) else: self.assertFalse(hasattr(wrap, 'entry')) self.assertEqual(uuid, wrap.uuid) @ewrap.EntryWrapper.pvm_type('SomeEntry') class SomeEntry(ewrap.EntryWrapper, ewrap.WrapperSetUUIDMixin): """EntryWrapper with set-uuid mixin.""" pass # Set bad uuid value bad_uuid = 'F' + new_uuid[1:] self.assertRaises(ValueError, set_wrap_uuid, SomeEntry(None), bad_uuid) # Entry has both Metadata and properties['id'] some_ent = SomeEntry._bld(None) # Starts off empty assert_uuid(some_ent, None) # Can set from empty some_ent.set_uuid(old_uuid) assert_uuid(some_ent, old_uuid) # Can change from already-set some_ent.set_uuid(new_uuid) assert_uuid(some_ent, new_uuid) @ewrap.ElementWrapper.pvm_type('SomeObject', has_metadata=True) class SomeElementWithMetadata(ewrap.ElementWrapper, ewrap.WrapperSetUUIDMixin): """ElementWrapper with set-uuid mixin - WITH Metadata.""" pass # Element has it in one place. Also testing vivification of AtomID. sewm = SomeElementWithMetadata._bld(None) # Starts with no AtomID self.assertEqual( '' ''.encode('utf-8'), sewm.toxmlstring()) assert_uuid(sewm, None, has_entry=False) # Can set sewm.set_uuid(old_uuid) assert_uuid(sewm, old_uuid, has_entry=False) # Can change sewm.set_uuid(new_uuid) assert_uuid(sewm, new_uuid, has_entry=False) @ewrap.ElementWrapper.pvm_type('SomeOtherObject') class SomeElementWithoutMetadata(ewrap.ElementWrapper, ewrap.WrapperSetUUIDMixin): """ElementWrapper with set-uuid mixin - WITHOUT Metadata.""" pass sewom = SomeElementWithoutMetadata._bld(None) # No Metadata self.assertEqual( ''. encode('utf-8'), sewom.toxmlstring()) assert_uuid(sewom, None, has_entry=False) # Exception attempting to set on an element with no metadata self.assertRaises(AttributeError, sewom.set_uuid, new_uuid) class TestGetters(twrap.TestWrapper): file = LPAR_FILE wrapper_class_to_test = lpar.LPAR @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.refresh') def test_entry_wrapper_getter(self, mock_refresh): self.adpt.read.return_value = self.dwrap.entry mock_refresh.return_value = self.dwrap # ROOT getter = ewrap.EntryWrapperGetter(self.adpt, lpar.LPAR, 'lpar_uuid') self.assertEqual('lpar_uuid', getter.uuid) lwrap = getter.get() self.assertIsInstance(lwrap, lpar.LPAR) self.assertEqual(self.dwrap.entry, lwrap.entry) self.adpt.read.assert_called_with( 'LogicalPartition', 'lpar_uuid', child_id=None, child_type=None, xag=None) self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(0, mock_refresh.call_count) # Second get doesn't re-read lwrap = getter.get() self.assertIsInstance(lwrap, lpar.LPAR) self.assertEqual(self.dwrap.entry, lwrap.entry) self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(0, mock_refresh.call_count) # get with refresh doesn't read, but does refresh lwrap = getter.get(refresh=True) self.assertIsInstance(lwrap, lpar.LPAR) self.assertEqual(self.dwrap.entry, lwrap.entry) self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(1, mock_refresh.call_count) # CHILD, use the EntryWrapper.getter classmethod, use xags getter = lpar.LPAR.getter( self.adpt, 'lpar_uuid', parent_class=stor.VDisk, parent_uuid='parent_uuid', xag=['one', 'two']) self.assertIsInstance(getter, ewrap.EntryWrapperGetter) self.assertEqual('lpar_uuid', getter.uuid) lwrap = getter.get() self.assertIsInstance(lwrap, lpar.LPAR) self.adpt.read.assert_called_with( 'VirtualDisk', 'parent_uuid', child_type='LogicalPartition', child_id='lpar_uuid', xag=['one', 'two']) # With string parent_class getter = lpar.LPAR.getter( self.adpt, 'lpar_uuid', parent_class='VirtualDisk', parent_uuid='parent_uuid', xag=['one', 'two']) self.assertIsInstance(getter, ewrap.EntryWrapperGetter) self.assertEqual('lpar_uuid', getter.uuid) lwrap = getter.get() self.assertIsInstance(lwrap, lpar.LPAR) self.adpt.read.assert_called_with( 'VirtualDisk', 'parent_uuid', child_type='LogicalPartition', child_id='lpar_uuid', xag=['one', 'two']) # With parent instance parent = mock.Mock(spec=stor.VDisk, schema_type='st', uuid='uuid') getter = lpar.LPAR.getter(self.adpt, 'lpar_uuid', parent=parent) self.assertIsInstance(getter, ewrap.EntryWrapperGetter) self.assertEqual('lpar_uuid', getter.uuid) lwrap = getter.get() self.assertIsInstance(lwrap, lpar.LPAR) self.adpt.read.assert_called_with( 'st', 'uuid', child_type='LogicalPartition', child_id='lpar_uuid', xag=None) # parent type & uuid must both be specified self.assertRaises(ValueError, ewrap.EntryWrapperGetter, self.adpt, lpar.LPAR, 'lpar_uuid', parent_class=stor.VDisk) self.assertRaises(ValueError, ewrap.EntryWrapperGetter, self.adpt, lpar.LPAR, 'lpar_uuid', parent_uuid='parent_uuid') # entry_class must be a Wrapper subtype self.assertRaises(ValueError, ewrap.EntryWrapperGetter, self.adpt, 's', 'lpar_uuid') self.assertRaises(ValueError, ewrap.EntryWrapperGetter, self.adpt, None, 'lpar_uuid') self.assertRaises(ValueError, ewrap.EntryWrapperGetter, self.adpt, ewrap.EntryWrapperGetter, 'lpar_uuid') self.assertRaises(ValueError, ewrap.EntryWrapperGetter, self.adpt, lpar.LPAR, 'lpar_uuid', parent_class=None, parent_uuid='parent_uuid') self.assertRaises(ValueError, ewrap.EntryWrapperGetter, self.adpt, lpar.LPAR, 'lpar_uuid', parent_class=ewrap.EntryWrapperGetter, parent_uuid='parent_uuid') @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.refresh') def test_feed_getter(self, mock_refresh): self.adpt.read.return_value = self.resp feediter = iter(self.entries) mock_refresh.side_effect = lambda: next(feediter) # ROOT getter = ewrap.FeedGetter(self.adpt, lpar.LPAR) lfeed = getter.get() self.assertEqual(21, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.adpt.read.assert_called_with( 'LogicalPartition', None, child_id=None, child_type=None, xag=None) self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(0, mock_refresh.call_count) # Second get doesn't re-read lfeed = getter.get() self.assertEqual(21, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(0, mock_refresh.call_count) # get with refresh refreshes all 21 wrappers (but doesn't call read) lfeed = getter.get(refresh=True) self.assertEqual(21, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(21, mock_refresh.call_count) # get with refetch calls read, not refresh lfeed = getter.get(refetch=True) self.assertEqual(21, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.assertEqual(2, self.adpt.read.call_count) self.adpt.read.assert_called_with( 'LogicalPartition', None, child_id=None, child_type=None, xag=None) self.assertEqual(21, mock_refresh.call_count) # CHILD, use the EntryWrapper.getter classmethod, use xags getter = lpar.LPAR.getter(self.adpt, parent_class=stor.VDisk, parent_uuid='p_uuid', xag=['one', 'two']) self.assertIsInstance(getter, ewrap.FeedGetter) lfeed = getter.get() self.assertEqual(21, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.adpt.read.assert_called_with( 'VirtualDisk', 'p_uuid', child_type='LogicalPartition', child_id=None, xag=['one', 'two']) # CHILD, parent_class as string schema type getter = lpar.LPAR.getter(self.adpt, parent_class='VirtualDisk', parent_uuid='p_uuid', xag=['one', 'two']) self.assertIsInstance(getter, ewrap.FeedGetter) lfeed = getter.get() self.assertEqual(21, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.adpt.read.assert_called_with( 'VirtualDisk', 'p_uuid', child_type='LogicalPartition', child_id=None, xag=['one', 'two']) # CHILD, parent instance parent = mock.Mock(spec=stor.VDisk, schema_type='st', uuid='uuid') getter = lpar.LPAR.getter(self.adpt, parent=parent) self.assertIsInstance(getter, ewrap.FeedGetter) lfeed = getter.get() self.assertEqual(21, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.adpt.read.assert_called_with( 'st', 'uuid', child_type='LogicalPartition', child_id=None, xag=None) # entry_class must be a Wrapper subtype self.assertRaises(ValueError, ewrap.FeedGetter, self.adpt, 's') self.assertRaises(ValueError, ewrap.FeedGetter, self.adpt, None) self.assertRaises(ValueError, ewrap.FeedGetter, self.adpt, ewrap.EntryWrapperGetter) self.assertRaises(ValueError, ewrap.FeedGetter, self.adpt, lpar.LPAR, parent_class=None, parent_uuid='parent_uuid') self.assertRaises(ValueError, ewrap.FeedGetter, self.adpt, lpar.LPAR, parent_class=ewrap.EntryWrapperGetter, parent_uuid='parent_uuid') @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.refresh') def test_uuid_feed_getter(self, mock_refresh): """Verify UUIDFeedGetter.""" # Mock return separate entries per read. Need multiple copies for # multiple calls. read_iter = iter(wrp.entry for wrp in ( self.entries[:3] + self.entries[:3] + self.entries[:3] + self.entries[:3])) self.adpt.read.side_effect = lambda *a, **k: next(read_iter) # Separate iterator for refreshes refresh_iter = iter(self.entries[:3]) mock_refresh.side_effect = lambda: next(refresh_iter) # ROOT uuids = ['u1', 'u2', 'u3'] getter = ewrap.UUIDFeedGetter(self.adpt, lpar.LPAR, uuids) # In order to be useful for a FeedTask, this has to evaluate as an # instance of FeedGetter self.assertIsInstance(getter, ewrap.FeedGetter) lfeed = getter.get() self.assertEqual(3, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) # This does three separate reads self.adpt.read.assert_has_calls([mock.call( lpar.LPAR.schema_type, uuid, child_type=None, child_id=None, xag=None) for uuid in uuids]) self.assertEqual(0, mock_refresh.call_count) # Second get doesn't re-read lfeed = getter.get() self.assertEqual(3, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.assertEqual(3, self.adpt.read.call_count) self.assertEqual(0, mock_refresh.call_count) # get with refresh refreshes all thre wrappers (but doesn't call read) lfeed = getter.get(refresh=True) self.assertEqual(3, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.assertEqual(3, self.adpt.read.call_count) self.assertEqual(3, mock_refresh.call_count) # get with refetch calls read, not refresh lfeed = getter.get(refetch=True) self.assertEqual(3, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.assertEqual(6, self.adpt.read.call_count) self.assertEqual(3, mock_refresh.call_count) # CHILD getter = ewrap.UUIDFeedGetter( self.adpt, lpar.LPAR, uuids, parent_class=stor.VDisk, parent_uuid='p_uuid', xag=['one', 'two']) self.assertIsInstance(getter, ewrap.FeedGetter) lfeed = getter.get() self.assertEqual(3, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.adpt.read.assert_has_calls([mock.call( stor.VDisk.schema_type, 'p_uuid', child_type=lpar.LPAR.schema_type, child_id=uuid, xag=['one', 'two']) for uuid in uuids]) # With parent instance parent = mock.Mock(spec=stor.VDisk, schema_type='st', uuid='uuid') getter = ewrap.UUIDFeedGetter( self.adpt, lpar.LPAR, uuids, parent=parent) self.assertIsInstance(getter, ewrap.FeedGetter) lfeed = getter.get() self.assertEqual(3, len(lfeed)) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', lfeed[0].uuid) self.adpt.read.assert_has_calls( [mock.call('st', 'uuid', child_type=lpar.LPAR.schema_type, child_id=uuid, xag=None) for uuid in uuids]) if __name__ == '__main__': unittest.main() pypowervm-1.1.24/pypowervm/tests/wrappers/test_enterprise_pool.py0000664000175000017500000001232113571367171025146 0ustar neoneo00000000000000# Copyright 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pypowervm.tests.test_utils import test_wrapper_abc from pypowervm.wrappers import enterprise_pool as ep from pypowervm.wrappers import entry_wrapper class TestPoolFile(test_wrapper_abc.TestWrapper): file = 'enterprise_pool_feed.txt' wrapper_class_to_test = ep.Pool def test_entries(self): self.assertEqual(1, len(self.entries)) def test_type(self): self.assertIsInstance(self.dwrap, ep.Pool) def test_id(self): self.assertEqual(328, self.dwrap.id) def test_name(self): self.assertEqual('FVT_pool1', self.dwrap.name) def test_compliance_state(self): self.assertEqual(ep.ComplianceState.IN_COMPLIANCE, self.dwrap.compliance_state) def test_compliance_hours_left(self): self.assertEqual(0, self.dwrap.compliance_hours_left) def test_total_mobile_procs(self): self.assertEqual(20, self.dwrap.total_mobile_procs) def test_total_mobile_mem(self): self.assertEqual(0, self.dwrap.total_mobile_mem) def test_avail_mobile_procs(self): self.assertEqual(16, self.dwrap.avail_mobile_procs) def test_avail_mobile_mem(self): self.assertEqual(0, self.dwrap.avail_mobile_mem) def test_unret_mobile_procs(self): self.assertEqual(0, self.dwrap.unret_mobile_procs) def test_unret_mobile_mem(self): self.assertEqual(0, self.dwrap.unret_mobile_mem) def test_mgmt_consoles(self): self.assertIsInstance(self.dwrap.mgmt_consoles, entry_wrapper.WrapperElemList) self.assertEqual(1, len(self.dwrap.mgmt_consoles)) console = self.dwrap.mgmt_consoles[0] self.assertIsInstance(console, ep.PoolMgmtConsole) self.assertEqual('ip9-1-2-3', console.name) self.assertEqual('7042-CR7*10B6EDC', console.mtms.mtms_str) self.assertTrue(console.is_master_console) self.assertEqual('9.1.2.3', console.ip_addr) def test_master_console_mtms(self): self.assertEqual('7042-CR7*10B6EDC', self.dwrap.master_console_mtms.mtms_str) class TestPoolMemberFile(test_wrapper_abc.TestWrapper): file = 'enterprise_pool_member_feed.txt' wrapper_class_to_test = ep.PoolMember def test_entries(self): self.assertEqual(5, len(self.entries)) def test_type(self): for entry in self.entries: self.assertIsInstance(entry, ep.PoolMember) def test_mobile_procs(self): self.assertEqual(4, self.dwrap.mobile_procs) def test_mobile_mem(self): self.assertEqual(0, self.dwrap.mobile_mem) def test_set_mobile_procs(self): orig_value = self.dwrap.mobile_procs self.dwrap.mobile_procs = 999 self.assertEqual(999, self.dwrap.mobile_procs) self.dwrap.mobile_procs = orig_value def test_set_mobile_mem(self): orig_value = self.dwrap.mobile_mem self.dwrap.mobile_mem = 888 self.assertEqual(888, self.dwrap.mobile_mem) self.dwrap.mobile_mem = orig_value def test_inactive_procs(self): self.assertEqual(2, self.dwrap.inactive_procs) def test_inactive_mem(self): self.assertEqual(0, self.dwrap.inactive_mem) def test_unret_mobile_procs(self): self.assertEqual(0, self.dwrap.unret_mobile_procs) def test_unret_mobile_mem(self): self.assertEqual(0, self.dwrap.unret_mobile_mem) def test_proc_compliance_hours_left(self): self.assertEqual(0, self.dwrap.proc_compliance_hours_left) def test_mem_compliance_hours_left(self): self.assertEqual(0, self.dwrap.mem_compliance_hours_left) def test_sys_name(self): self.assertEqual('Server-8284-22A-SN21B63CV', self.dwrap.sys_name) def test_sys_installed_procs(self): self.assertEqual(20, self.dwrap.sys_installed_procs) def test_sys_installed_mem(self): self.assertEqual(524288, self.dwrap.sys_installed_mem) def test_sys_mtms(self): self.assertEqual('8284-22A*21B63CV', self.dwrap.sys_mtms.mtms_str) def test_sys_state(self): self.assertEqual('operating', self.dwrap.sys_state) def test_mgmt_consoles(self): self.assertIsInstance(self.dwrap.mgmt_consoles, entry_wrapper.WrapperElemList) self.assertEqual(1, len(self.dwrap.mgmt_consoles)) console = self.dwrap.mgmt_consoles[0] self.assertIsInstance(console, ep.PoolMgmtConsole) self.assertEqual('ip9-1-2-3', console.name) self.assertEqual('7042-CR7*10B6EDC', console.mtms.mtms_str) self.assertTrue(console.is_master_console) self.assertEqual('9.1.2.3', console.ip_addr) pypowervm-1.1.24/pypowervm/tests/wrappers/test_mgmt_console.py0000664000175000017500000000700113571367171024422 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pypowervm.tests.test_utils import test_wrapper_abc as twrap import pypowervm.wrappers.management_console as mc _PUB_KEY = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuA/Av0jMYlG54YiaaaQXho8iO' 'ZfY+WkBnuFfweESZOy824Ce9FvPqXsNL+nPAgKWG3TONwJldYgCgnBsFXUizkcne' '9Dt/T/zs2Bzl7b1YPrXyYS1hxKFrV/pYEERUiFa9ppR+M8mxdNYO0+ph356LO3mb' 'xOM6nEZ1L6l6RUvbUwV9Zuw3Hpiz1lAV6d6EwMHJZ+WFlipJ2wxpM4QUKmb0V2UJ' 'oHAb7tp3zipr3CCo0NtnpcD7wxsFhtz2ccRvNMbGhe1i9KikmBtQQDl1adMSbBL2' '+tGmyqHNq/H6d75bfXOUCl7NKtUq7VVGcXDOlTS1CDdLdmUn0l4z0AlyciQt wlp' '@9.0.0.0') _AUTH_KEYS = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCyl3+yXyoYAzvScxTiWqxe0P' 'DwYvTHwLsIkgAY7s7n+8tUR7zA0dYWggl4aCfOAE2RMF0zKoFyRK8a9M/I1kVC' 'YLb9y1rWp76jnxZpRBD/1DjjQ0qW5e1fbdrS52mJcFLL1+MzeoLT7+6GeMUcgN' 'rmZQMUqSbwF+Rdxv56YTdx9u0EH1qaT/H0syp1Y8EHCaBVwdZcmNQLBFaYnVxH' 'NHTQMYMTqokkyrZ9whSaK98OiYQO//5gnJzESOxOURYTzLKLz8WPkiONM6QgF+' 'E5Zobt/REr3Tq8l1e1V/e2+7owFkMMte14I2sfK8QnZUrpJziXv3gwOpUP34gD' 'ud6ceBlv wlp@9.0.0.0', 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3shE8yLGII+BPaPMIOdNgA6' 'ZyDYKobCtXE6td8X9dgI0Sz08YCUQY9pOeWr/D63LwJaYsgqVspQaUEM5WH6s2' 'eNKAERYayog6iCEaqApDDQETuf4XQ0JXo08izRPpMeRZwp3/RhNJVrxNheUp9n' 'kHI3Mbx7jHvgwih48BTeqfj8L1Nnp4srhYDuzuN6NhUvbWLKJAjaQojRLSYEty' 's5ASq7v+D+OEXqVBSRheKf5eWOdEF68sBYpOaS4qLycZjd5YGPUg0b+DfME2jr' '8kjbig1js8omgljSvKIwHIKfrfWPwKbWxtHaqWzTT+fUPygD7IDxPqsSEQIAjN' 'PWmWQM+D wlp@9.0.0.0') class TestMCEntryWrapper(twrap.TestWrapper): file = 'managementconsole.txt' wrapper_class_to_test = mc.ManagementConsole def test_mgmt_console(self): self.assertEqual(self.dwrap.name, "hmc7") self.assertEqual(self.dwrap.mtms.model, "f93") self.assertEqual(self.dwrap.mtms.machine_type, "Ve57") self.assertEqual(self.dwrap.mtms.serial, "2911559") con_inf = self.dwrap.network_interfaces.console_interface self.assertEqual('eth0', con_inf.name) self.assertEqual('9.1.2.3 fe80:0:0:0:5054:ff:fed8:a951', con_inf.address) self.assertEqual(None, self.dwrap.ssh_public_key) self.assertEqual((), self.dwrap.ssh_authorized_keys) class TestMCEntryWrapperSSH(twrap.TestWrapper): file = 'managementconsole_ssh.txt' wrapper_class_to_test = mc.ManagementConsole def test_mgmt_console(self): self.assertEqual(_PUB_KEY, self.dwrap.ssh_public_key) self.assertEqual(_AUTH_KEYS, self.dwrap.ssh_authorized_keys) # Now set the keys keys = ('key1', 'key2') self.dwrap.ssh_authorized_keys = keys self.assertEqual(keys, self.dwrap.ssh_authorized_keys) self.dwrap.ssh_authorized_keys = [] self.assertEqual(tuple(), self.dwrap.ssh_authorized_keys) pypowervm-1.1.24/pypowervm/tests/wrappers/test_vios_file.py0000664000175000017500000000400513571367171023714 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pypowervm.const as pc import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.wrappers.vios_file as vf class TestVIOSFile(twrap.TestWrapper): file = 'file_feed.txt' wrapper_class_to_test = vf.File def test_wrapper_class(self): self.assertEqual(vf.File.schema_type, 'File') self.assertEqual(vf.File.schema_ns, pc.WEB_NS) self.assertTrue(vf.File.has_metadata) self.assertEqual(vf.File.default_attrib, pc.DEFAULT_SCHEMA_ATTR) def test_file(self): self.assertTrue(len(self.entries) > 0) vio_file = self.entries[0] self.assertEqual(vio_file.schema_type, 'File') self.assertEqual('boot_9699a0f5', vio_file.file_name) self.assertEqual('1421736166276', vio_file.date_modified) self.assertEqual('application/octet-stream', vio_file.internet_media_type) self.assertEqual('5cd8e4b0-083e-4c71-bcff-2432807cfdcc', vio_file.file_uuid) self.assertEqual(25165824, vio_file.expected_file_size) self.assertEqual(25165824, vio_file.current_file_size) self.assertEqual(vf.FileType.DISK_IMAGE, vio_file.enum_type) self.assertEqual('14B854F7-42CE-4FF0-BD57-1D117054E701', vio_file.vios_uuid) self.assertEqual('0300f8d6de00004b000000014a54555cd9.28', vio_file.tdev_udid) pypowervm-1.1.24/pypowervm/tests/__init__.py0000664000175000017500000000000013571367171020561 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/test_fixtures.py0000664000175000017500000004314313571367171021751 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import fixtures import importlib import mock import six from pypowervm import traits as trt # An anchor for traits we construct artificially so the session isn't # garbage collected. _mk_traits_sessions = [] def _mk_traits(local, hmc): """Mock a single APITraits configuration. :param local: Should the APITraits pretend to be local? True or False. :param hmc: Should the APITraits pretend to be running against HMC (True) or PVM (False)? :return: APITraits instance with the specified behavior. """ _sess = mock.Mock() _sess.use_file_auth = local _sess.mc_type = 'HMC' if hmc else 'PVM' # Traits use a weak ref to the session to avoid a circular reference # so anchor the mock session globally otherwise it'll be gone # right after we return it. global _mk_traits_sessions _mk_traits_sessions.append(_sess) return trt.APITraits(_sess) LocalPVMTraits = _mk_traits(local=True, hmc=False) RemotePVMTraits = _mk_traits(local=False, hmc=False) RemoteHMCTraits = _mk_traits(local=False, hmc=True) class SessionFx(fixtures.Fixture): """Patch pypowervm.adapter.Session.""" def __init__(self, traits=LocalPVMTraits): """Create Session patcher with traits. :param traits: APITraits instance to be assigned to the .traits attribute of the mock Session. If not specified, sess.traits will be LocalPVMTraits. LocalPVMTraits, RemotePVMTraits, and RemoteHMCTraits are provided below for convenience. :return: """ self.traits = traits self._patcher = mock.patch('pypowervm.adapter.Session') def setUp(self): super(SessionFx, self).setUp() self.sess = self._patcher.start() self.addCleanup(self._patcher.stop) self.sess.traits = self.traits self.sess.timeout = 1200 class AdapterFx(fixtures.Fixture): """Patch pypowervm.adapter.Adapter.""" def __init__(self, session=None, traits=None): """Create Adapter and/or Session patchers with traits. :param session: A pypowervm.adapter.Session instance or mock with which to back this mocked Adapter. If not specified, a new SessionFx fixture is created and used. :param traits: APITraits instance to be assigned to the .traits attribute of the Session and/or Adapter mock. If not specified, LocalPVMTraits will be used. If both session and traits are specified, the session's traits will be overwritten with the traits parameter. LocalPVMTraits, RemotePVMTraits, and RemoteHMCTraits are provided below for convenience. """ super(AdapterFx, self).__init__() self.session = session if traits is None and (session is None or session.traits is None): self.traits = LocalPVMTraits elif traits: self.traits = traits else: self.traits = session.traits self._patcher = mock.patch('pypowervm.adapter.Adapter') def setUp(self): super(AdapterFx, self).setUp() if not self.session: self.session = self.useFixture(SessionFx(self.traits)).sess self.adpt = self._patcher.start() self.addCleanup(self._patcher.stop) self.adpt.session = self.session self.set_traits(self.traits) def set_traits(self, traits): # Mocked Adapter needs to see both routes to traits. self.adpt.session.traits = traits self.adpt.traits = traits class SimplePatcher(object): """Provide a basic mocking patcher on a test fixture. The main purpose of this class is to be used with SimplePatchingFx. That said, the following are equivalent: @mock.patch('path.to.method') def test_foo(self, mock_meth): mock_meth.return_value = 'abc' # ... def test_foo(self): mock_meth = SimplePatcher(self, 'whatever', 'path.to.method', return_value='abc').start() # ... """ def __init__(self, fx, name, path, patch_object=False, side_effect=None, return_value=None): """Create a patcher on a given fixture. :param fx: The fixtures.Fixture (subclass) on which to register the patcher. :param name: String name for the patcher. :param path: String python path of the object being mocked. :param patch_object: If True, the path parameter is parsed to create a mock.patch.object with autospec=True instead of a regular mock.patch. For example, patch='foo.bar.Baz.meth' would result in mock.patch.object(foo.bar.Baz, 'meth', autospec=True) Note that this means the mock call will include the instance through which it was invoked. :param side_effect: Side effect for the mock created by this patcher. If side_effect is supplied, return_value is ignored. :param return_value: Return value for the mock created by this patcher. If side_effect is supplied, return_value is ignored. """ self.fx = fx self.name = name if patch_object: modname, klassname, methname = path.rsplit('.', 2) module = importlib.import_module(modname) klass = getattr(module, klassname) self.patcher = mock.patch.object(klass, methname, autospec=True) else: self.patcher = mock.patch(path) self.return_value = return_value self.side_effect = side_effect self.mock = None def start(self): """Start the patcher, creating the and setting up the mock.""" self.mock = self.patcher.start() if self.side_effect: self.mock.side_effect = self.side_effect else: self.mock.return_value = self.return_value self.fx.addCleanup(self.patcher.stop) return self.mock class LoggingPatcher(SimplePatcher): """SimplePatcher whose mock logs its name and returns a value.""" FIRST_ARG = '__MOCK_RETURNS_FIRST_ARGUMENT__' def __init__(self, fx, name, path, patch_object=False, return_value=None): """Create the logging patcher. :param fx: The fixtures.Fixture (subclass) on which to register the patcher. Must be a fixture providing a .log(msg) method. :param name: String name for the patcher. :param path: String python path of the object being mocked. :param patch_object: If True, the path parameter is parsed to create a mock.patch.object with autospec=True instead of a regular mock.patch. For example, patch='foo.bar.Baz.meth' would result in mock.patch.object(foo.bar.Baz, 'meth', autospec=True) Note that this means the mock call will include the instance through which it was invoked. :param return_value: The return value for the mocked method. """ def _log(*a, **k): self.fx.log(self.name) return a[0] if self.ret is self.FIRST_ARG and len(a) != 0 \ else self.ret # This ignores/overrides the superclass's return_value semantic. self.ret = return_value super(LoggingPatcher, self).__init__( fx, name, path, patch_object=patch_object, side_effect=_log) @six.add_metaclass(abc.ABCMeta) class Logger(object): """Base class for mixins wanting simple 'log to a list' semantics.""" def __init__(self): """Create a new Logger.""" super(Logger, self).__init__() self._tx_log = [] def get_log(self): """Retrieve the event log. :return: The log, a list of strings in the order they were added. """ return self._tx_log def log(self, val): """Add a message to the log. :param val: String value to append to the log. """ self._tx_log.append(val) def reset_log(self): """Clear the log.""" self._tx_log = [] @six.add_metaclass(abc.ABCMeta) class SimplePatchingFx(fixtures.Fixture): """Fixture base class supporting SimplePatcher. Subclasses should invoke add_patchers from __init__ after super().__init__, but before useFixture. """ def __init__(self): """Create the simple-patching fixture.""" super(SimplePatchingFx, self).__init__() self.patchers = {} def add_patchers(self, *patchers): """Add some number of SimplePatcher instances to the fixture. :param patchers: Zero or more SimplePatcher instances to add. """ for patcher in patchers: self.patchers[patcher.name] = patcher def setUp(self): """Start the fixture and its member SimplePatchers. This is generally invoked via useFixture and should not be called directly. """ super(SimplePatchingFx, self).setUp() for patcher in self.patchers.values(): patcher.start() class SleepPatcher(SimplePatcher): def __init__(self, fx, side_effect=None): super(SleepPatcher, self).__init__(fx, 'sleep', 'time.sleep', side_effect=side_effect) class SleepFx(SimplePatchingFx): """Fixture for time.sleep.""" def __init__(self, side_effect=None): """Create the fixture for time.sleep.""" super(SleepFx, self).__init__() self.add_patchers(SleepPatcher(self, side_effect=side_effect)) # Thread locking primitives are located slightly differently in py2 vs py3 SEM_ENTER = 'threading.%sSemaphore.__enter__' % ('_' if six.PY2 else '') SEM_EXIT = 'threading.%sSemaphore.__exit__' % ('_' if six.PY2 else '') class WrapperTaskFx(SimplePatchingFx, Logger): """Customizable mocking and pseudo-logging for WrapperTask primitives. Provides LoggingPatchers for REST and locking primitives. By default, these patchers simply log their name and return a sensible value (see below). However, patchers can be added, changed, or removed by name from the fixture instance via its 'patchers' dict. In order to have effect on your test case, such modifications must be done between fixture initialization and useFixture. For example: # Init the fixture, but do not start it: wtfx = WrapperTaskFx(a_wrapper) # An existing patcher can be modified: upd = wtfx.patchers['update'].side_effect = SomeException() # Or deleted: del wtfx.patchers['refresh'] # New patchers can be added. They must be instances of SimplePatcher (or a # subclass). Add directly to 'patchers': wtfx.patchers['foo'] = LoggingPatcher(wtfx, 'frob', 'pypowervm.utils.frob') # ...or use add_patchers to add more than one: wtfx.add_patchers(p1, p2, p3) # Finally, don't forget to start the fixture self.useFixture(wtfx) # Mocks can be accessed via their patchers and queried during testing as # usual: wtfx.patchers['foo'].mock.assert_called_with('bar', 'baz') self.assertEqual(3, wtfx.patchers['update'].mock.call_count) See live examples in pypowervm.tests.utils.test_transaction.TestWrapperTask Default mocks: 'get': Mocks EntyrWrapperGetter.get. Logs 'get'. Returns the wrapper with which the fixture was initialized. 'refresh': Mocks EntryWrapper.refresh. Logs 'refresh'. Returns the wrapper with which the fixture was initialized. 'update': Mocks EntryWrapper.update. Logs 'update'. Returns the wrapper with which the fixture was initialized. 'lock', 'unlock': Mocks semaphore locking (oslo_concurrency.lockutils.lock and @synchronized, ultimately threading.Semaphore) performed by the @entry_transaction decorator. Logs 'lock'/'unlock', respectively. Returns None. """ def __init__(self, wrapper): """Create the fixture around a specific EntryWrapper. :param wrapper: EntryWrapper instance to be returned by mocked EntryWrapperGetter.get and EntryWrapper.refresh methods """ super(WrapperTaskFx, self).__init__() self._wrapper = wrapper self.add_patchers( LoggingPatcher( self, 'get', 'pypowervm.wrappers.entry_wrapper.EntryWrapperGetter.get', return_value=self._wrapper), LoggingPatcher( self, 'refresh', 'pypowervm.wrappers.entry_wrapper.EntryWrapper.refresh', return_value=self._wrapper), LoggingPatcher( self, 'update', 'pypowervm.wrappers.entry_wrapper.EntryWrapper.update', return_value=self._wrapper), LoggingPatcher(self, 'lock', SEM_ENTER), LoggingPatcher(self, 'unlock', SEM_EXIT), SleepPatcher(self) ) class FeedTaskFx(SimplePatchingFx, Logger): """Customizable mocking and pseudo-logging for FeedTask primitives. Provides LoggingPatchers for REST and locking primitives. By default, these patchers simply log their name and return a sensible value (see below). However, patchers can be added, changed, or removed by name from the fixture instance via its 'patchers' dict. In order to have effect on your test case, such modifications must be done between fixture initialization and useFixture. For example: # Init the fixture, but do not start it: ftfx = FeedTaskFx(a_feed) # An existing patcher can be modified: upd = ftfx.patchers['update'].side_effect = SomeException() # Or deleted: del ftfx.patchers['refresh'] # New patchers can be added. They must be instances of SimplePatcher (or a # subclass). Add directly to 'patchers': ftfx.patchers['foo'] = LoggingPatcher(ftfx, 'frob', 'pypowervm.utils.frob') # ...or use add_patchers to add more than one: ftfx.add_patchers(p1, p2, p3) # Finally, don't forget to start the fixture self.useFixture(ftfx) # Mocks can be accessed via their patchers and queried during testing as # usual: ftfx.patchers['foo'].mock.assert_called_with('bar', 'baz') self.assertEqual(3, ftfx.patchers['update'].mock.call_count) See live examples in pypowervm.tests.utils.test_transaction.TestWrapperTask Default mocks: 'get': Mocks FeedGetter.get. Logs 'get'. Returns the feed with which the fixture was initialized. 'refresh': Mocks EntryWrapper.refresh. Logs 'refresh'. Returns the wrapper on which the refresh method was called. 'update': Mocks EntryWrapper.update. Logs 'update'. Returns the wrapper on which the update method was called. 'lock', 'unlock': Mocks semaphore locking (oslo_concurrency.lockutils.lock and @synchronized, ultimately threading.Semaphore) performed by the @entry_transaction decorator. Logs 'lock'/'unlock', respectively. Returns None. """ def __init__(self, feed): """Create the fixture around a given feed. :param feed: The feed (list of EntryWrappers) to be returned from the mocked FeedGetter.get method. """ super(FeedTaskFx, self).__init__() self._feed = feed self.add_patchers( LoggingPatcher( self, 'get', 'pypowervm.wrappers.entry_wrapper.FeedGetter.get', return_value=self._feed), LoggingPatcher( self, 'refresh', 'pypowervm.wrappers.entry_wrapper.EntryWrapper.refresh', patch_object=True, return_value=LoggingPatcher.FIRST_ARG), LoggingPatcher( self, 'update', 'pypowervm.wrappers.entry_wrapper.EntryWrapper.update', patch_object=True, return_value=LoggingPatcher.FIRST_ARG), LoggingPatcher(self, 'lock', SEM_ENTER), LoggingPatcher(self, 'unlock', SEM_EXIT), SleepPatcher(self) ) class LoggingFx(SimplePatchingFx): """Fixture for LOG.*, not to be confused with Logger/LoggingPatcher. Provides patches and mocks for LOG.x for x in ('info', 'warning', 'debug', 'error', 'exception') """ def __init__(self): """Create the fixture for the various logging methods.""" super(LoggingFx, self).__init__() self.add_patchers( *(SimplePatcher(self, x, 'oslo_log.log.BaseLoggerAdapter.%s' % x) for x in ('info', 'warning', 'debug', 'error', 'exception'))) pypowervm-1.1.24/pypowervm/tests/test_exceptions.py0000664000175000017500000000600413571367171022254 0ustar neoneo00000000000000# Copyright 2014, 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import unittest import six import pypowervm.exceptions as pvmex msg_params = { "backing_dev": "backing_dev_param", "cpu_size": 678, "element": "element_param", "element_type": "element_type_param", "error": "error_param", "file_name": "file_name_param", "image_volume": "image_volume_param", "lpar_name": "lpar_name_param", "min_vios": "min_vios_param", "name": "name_param", "operation_name": "operation_name_param", "reason": "reason_param", "seconds": 147, "valid_values": "valid_values_param", "vios_state": "vios_state_param", "volume": "volume_param", "access_file": "testfile", } os.environ['LANG'] = 'en_US' class2msg = { pvmex.NotFound: "Element not found: element_type_param element_param", pvmex.LPARNotFound: "LPAR not found: lpar_name_param", pvmex.JobRequestFailed: "The 'operation_name_param' operation failed. error_param", pvmex.JobRequestTimedOut: "The 'operation_name_param' operation failed. " "Failed to complete the task in 147 seconds.", pvmex.AuthFileReadError: "OS denied access to file testfile.", pvmex.AuthFileAccessError: "OS encountered an I/O error attempting to read file testfile: " "error_param", pvmex.MigrationFailed: "The migration task failed. error_param" } class TestExceptions(unittest.TestCase): """Test coverage for the pypowervm.exceptions module.""" def raise_helper(self, e): raise e def fmt_helper(self, eclass, expected_message): e = eclass(**msg_params) self.assertRaises(eclass, self.raise_helper, e) try: raise e except eclass as e1: self.assertEqual(e1.args[0], expected_message) def test_Error(self): e = pvmex.Error("test") self.assertRaises(pvmex.Error, self.raise_helper, e) try: raise e except pvmex.Error as e1: self.assertEqual(e1.args[0], "test") def test_fmterrors(self): for e, s in six.iteritems(class2msg): try: self.fmt_helper(e, s) except ValueError: self.fail(s) def test_bogusformatparams(self): class Bogus(pvmex.AbstractMsgFmtError): msg_fmt = "This has a %(bogus)s format parameter." self.assertRaises(KeyError, Bogus, **msg_params) if __name__ == "__main__": unittest.main() pypowervm-1.1.24/pypowervm/tests/test_util.py0000664000175000017500000004422713571367171021061 0ustar neoneo00000000000000# Copyright 2014, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six if six.PY2: import __builtin__ as builtins elif six.PY3: import builtins import unittest from pypowervm import const from pypowervm import util dummyuuid1 = "abcdef01-2345-2345-2345-67890abcdef0" dummyuuid2 = "67890abc-5432-5432-5432-def0abcdef01" class TestUtil(unittest.TestCase): """Unit tests for pypowervm.util.""" def test_convert_bytes_to_gb(self): # A round 1 GB test = util.convert_bytes_to_gb(1024 * 1024 * 1024) self.assertEqual(1.0, test) # A single MB test = util.convert_bytes_to_gb(1024 * 1024.0) self.assertEqual(0.0009765625, test) # A single byte - should be the low Value self.assertEqual(.0001, util.convert_bytes_to_gb(1)) # Try changing the low value self.assertEqual(.0005, util.convert_bytes_to_gb(1, .0005)) # Round up self.assertEqual(1.15, util.convert_bytes_to_gb(1224067890, dp=2)) # Low value still honors dp self.assertEqual(0.01, util.convert_bytes_to_gb(1, dp=2)) def test_round_gb_size_up(self): self.assertEqual(12.35, util.round_gb_size_up(12.34000000001)) self.assertEqual(12.34000000001, util.round_gb_size_up(12.34000000001, dp=11)) self.assertEqual(1048576, util.round_gb_size_up(1048576.0, dp=0)) self.assertEqual(1048576, util.round_gb_size_up(1048575.1, dp=0)) self.assertEqual(1048576, util.round_gb_size_up(1048576, dp=0)) self.assertEqual(1048600, util.round_gb_size_up(1048576.1234, dp=-2)) def test_sanitize_bool_for_api(self): self.assertEqual('true', util.sanitize_bool_for_api(True)) self.assertEqual('false', util.sanitize_bool_for_api(False)) self.assertEqual('true', util.sanitize_bool_for_api('True')) self.assertEqual('false', util.sanitize_bool_for_api('False')) def test_find_wrapper(self): wrap1 = mock.MagicMock() wrap1.uuid = 'a' wrap2 = mock.MagicMock() wrap2.uuid = 'b' wraps = [wrap1, wrap2] self.assertEqual(wrap1, util.find_wrapper(wraps, 'a')) self.assertEqual(wrap2, util.find_wrapper(wraps, 'b')) self.assertIsNone(util.find_wrapper(wraps, 'c')) def test_dice_href(self): href = 'https://server:1234/rest/api/uom/Obj/UUID//?group=One,Two#frag' self.assertEqual(util.dice_href(href), '/rest/api/uom/Obj/UUID?group=One,Two#frag') self.assertEqual(util.dice_href(href, include_query=True), '/rest/api/uom/Obj/UUID?group=One,Two#frag') self.assertEqual(util.dice_href(href, include_fragment=False), '/rest/api/uom/Obj/UUID?group=One,Two') self.assertEqual(util.dice_href(href, include_query=False), '/rest/api/uom/Obj/UUID#frag') self.assertEqual(util.dice_href(href, include_fragment=True), '/rest/api/uom/Obj/UUID?group=One,Two#frag') self.assertEqual(util.dice_href(href, include_query=False, include_fragment=True), '/rest/api/uom/Obj/UUID#frag') self.assertEqual(util.dice_href(href, include_scheme_netloc=True, include_query=False, include_fragment=False), 'https://server:1234/rest/api/uom/Obj/UUID') def test_get_req_path_uuid_and_is_instance_path(self): # Fail: no '/' path = dummyuuid1 self.assertIsNone(util.get_req_path_uuid(path)) self.assertRaises(IndexError, util.is_instance_path, path) path = '/' + dummyuuid1 self.assertEqual(dummyuuid1, util.get_req_path_uuid(path)) self.assertTrue(util.is_instance_path(path)) path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 self.assertEqual(dummyuuid1, util.get_req_path_uuid(path)) self.assertTrue(util.is_instance_path(path)) # Fail: last path element is not a UUID path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '/Child' self.assertIsNone(util.get_req_path_uuid(path)) self.assertFalse(util.is_instance_path(path)) # Fail: last path element is not quiiiite a UUID path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1[1:] self.assertIsNone(util.get_req_path_uuid(path)) self.assertFalse(util.is_instance_path(path)) # Ignore query/fragment path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '?group=One,Two#frag') self.assertEqual(dummyuuid1, util.get_req_path_uuid(path)) self.assertTrue(util.is_instance_path(path)) # Fail: last path element (having removed query/fragment) is not a UUID path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '/Child?group=One,Two#frag') self.assertIsNone(util.get_req_path_uuid(path)) self.assertFalse(util.is_instance_path(path)) # Default case conversion path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1.upper() self.assertEqual(dummyuuid1, util.get_req_path_uuid(path)) self.assertEqual(dummyuuid1, util.get_req_path_uuid( path, preserve_case=False)) self.assertTrue(util.is_instance_path(path)) # Force no case conversion self.assertEqual(dummyuuid1.upper(), util.get_req_path_uuid( path, preserve_case=True)) # Child URI gets child UUID by default path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '/Child/' + dummyuuid2) self.assertEqual(dummyuuid2, util.get_req_path_uuid(path)) self.assertTrue(util.is_instance_path(path)) # Get root UUID from child URI path = ('https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '/Child/' + dummyuuid2) self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True)) self.assertTrue(util.is_instance_path(path)) # root=True redundant on a root path path = '/' + dummyuuid1 self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True)) path = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 self.assertEqual(dummyuuid1, util.get_req_path_uuid(path, root=True)) def test_extend_basepath(self): ext = '/foo' # Various forms without query params or fragments for path in (dummyuuid1, '/' + dummyuuid1, 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1, 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 + '/Child'): self.assertEqual(path + ext, util.extend_basepath(path, ext)) basepath = 'https://server:1234/rest/api/uom/Obj/' + dummyuuid1 qp = '?foo=bar,baz&blah=123' frag = '#frag' # Query params self.assertEqual(basepath + ext + qp, util.extend_basepath(basepath + qp, ext)) # Fragment self.assertEqual(basepath + ext + frag, util.extend_basepath(basepath + frag, ext)) # Query params & fragment self.assertEqual(basepath + ext + qp + frag, util.extend_basepath(basepath + qp + frag, ext)) def test_sanitize_file_name_for_api(self): allc = ''.join(map(chr, range(256))) self.assertEqual('foo', util.sanitize_file_name_for_api('foo')) self.assertEqual( 'config_foo.iso', util.sanitize_file_name_for_api( 'foo', prefix='config_', suffix='.iso')) self.assertEqual( '______________________________________________._0123456789_______' 'ABCDEFGHIJKLMN', util.sanitize_file_name_for_api(allc)) self.assertEqual( 'OPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz_____________________' '______________', util.sanitize_file_name_for_api(allc[79:]) ) self.assertEqual( '_________________________________________________________________' '______________', util.sanitize_file_name_for_api(allc[158:]) ) self.assertEqual('___________________', util.sanitize_file_name_for_api(allc[237:])) self.assertEqual( (dummyuuid1 + dummyuuid2[:7] + dummyuuid1).replace('-', '_'), util.sanitize_file_name_for_api( dummyuuid2, prefix=dummyuuid1, suffix=dummyuuid1)) self.assertEqual('I____________', util.sanitize_file_name_for_api( u'I \u611B \u01A4\u0177\u03C1\uFF4F\u05E9\u5DF3' u'\u5C3A\uFF56\uFF4D')) self.assertRaises(ValueError, util.sanitize_file_name_for_api, allc, prefix=allc, suffix=allc) self.assertRaises(ValueError, util.sanitize_file_name_for_api, '') # Non-default max_len values self.assertEqual('abcdefghijklmno', util.sanitize_file_name_for_api( 'abcdefghijklmnopqrstuvwxyz', max_len=const.MaxLen.VDISK_NAME)) self.assertEqual( 'abcdefghijklmnopqrstuvwxyz0123456789A', util.sanitize_file_name_for_api( 'abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNO', max_len=const.MaxLen.VOPT_NAME)) def test_sanitize_partition_name_for_api(self): allc = ''.join(map(chr, range(256))) self.assertEqual('foo', util.sanitize_partition_name_for_api('foo')) self.assertEqual('_______________________________', util.sanitize_partition_name_for_api(allc)) self.assertEqual('_ !_#_%_____+,-./0123456789:;_=', util.sanitize_partition_name_for_api(allc[31:])) self.assertEqual('__@ABCDEFGHIJKLMNOPQRSTUVWXYZ__', util.sanitize_partition_name_for_api(allc[62:])) self.assertEqual('_^__abcdefghijklmnopqrstuvwxyz{', util.sanitize_partition_name_for_api(allc[93:])) self.assertEqual('_}_____________________________', util.sanitize_partition_name_for_api(allc[124:])) for start in (155, 186, 217): self.assertEqual( '_______________________________', util.sanitize_partition_name_for_api(allc[start:])) self.assertEqual('________', util.sanitize_partition_name_for_api(allc[248:])) self.assertEqual('I _ _________', util.sanitize_partition_name_for_api( u'I \u611B \u01A4\u0177\u03C1\uFF4F\u05E9\u5DF3' u'\u5C3A\uFF56\uFF4D')) self.assertRaises(ValueError, util.sanitize_partition_name_for_api, allc, trunc_ok=False) self.assertRaises(ValueError, util.sanitize_partition_name_for_api, '') self.assertRaises(ValueError, util.sanitize_partition_name_for_api, None) # Tests for check_and_apply_xag covered by # test_adapter.TestAdapter.test_extended_path def test_part_id_by_loc_code(self): test_loc = 'U8247.22L.2125D6A-V2-C3' fail_loc = 'abc1234' self.assertEqual(util.part_id_by_loc_code(test_loc), 2) self.assertIsNone(util.part_id_by_loc_code(fail_loc)) def test_xag_attrs(self): base = const.DEFAULT_SCHEMA_ATTR self.assertEqual(dict(base), util.xag_attrs('')) self.assertEqual(dict(base), util.xag_attrs(None)) self.assertEqual(dict(base, group='foo'), util.xag_attrs('foo')) # Test other bases self.assertEqual(dict(one=2), util.xag_attrs(None, base=dict(one=2))) self.assertEqual(dict(one=2, group='foo'), util.xag_attrs('foo', base=dict(one=2))) @mock.patch.object(builtins, 'open') def test_my_partition_id(self, m_open): """Test my_partition_id.""" def rit(): for line in ('foo=bar\n', 'partition_id=1234\n', '\n', 'a=b\n'): yield line m_open.return_value.__enter__.return_value.__iter__.side_effect = rit self.assertEqual(1234, util.my_partition_id()) def test_parent_spec(self): """Test parent_spec.""" # All params are None (ROOT request) self.assertEqual((None, None), util.parent_spec(None, None, None)) # Get values from parent parent = mock.Mock(schema_type='schema_type', uuid='uuid') self.assertEqual(('schema_type', 'uuid'), util.parent_spec( parent, None, None)) # Parent overrides parent_type/parent_uuid self.assertEqual(('schema_type', 'uuid'), util.parent_spec( parent, 'something', 'else')) # ValueError if type xor uuid specified self.assertRaises(ValueError, util.parent_spec, None, 'one', None) self.assertRaises(ValueError, util.parent_spec, None, None, 'two') # Non-wrapper, non-string parent type raises ValueError self.assertRaises(ValueError, util.parent_spec, None, 42, 'foo') # parent_type can be wrapper or string self.assertEqual(('schema_type', 'uuid2'), util.parent_spec( None, parent, 'uuid2')) self.assertEqual(('schema_type2', 'uuid2'), util.parent_spec( None, 'schema_type2', 'uuid2')) def test_retry_io_command(self): class MyOSError(OSError): def __init__(self, errno): super(MyOSError, self).__init__() self.errno = errno class MyIOError(IOError): def __init__(self, errno): super(MyIOError, self).__init__() self.errno = errno class MyValError(ValueError): def __init__(self, errno): super(MyValError, self).__init__() self.errno = errno func = mock.Mock() mock_os_intr = MyOSError(4) mock_io_intr = MyIOError(4) mock_val_intr = MyValError(4) mock_os_hup = MyOSError(1) mock_io_hup = MyIOError(1) func.side_effect = [mock_os_intr, mock_io_intr, mock_val_intr] self.assertRaises(MyValError, util.retry_io_command, func) self.assertEqual(3, func.call_count) func.reset_mock() func.side_effect = mock_os_hup self.assertRaises(MyOSError, util.retry_io_command, func, 1, 'a') func.assert_called_once_with(1, 'a') func.reset_mock() func.side_effect = mock_io_hup self.assertRaises(MyIOError, util.retry_io_command, func) func.assert_called_once_with() class TestAllowedList(unittest.TestCase): def test_all_none(self): for cls in (util.VLANList, util.MACList): for val in ('ALL', 'NONE'): self.assertEqual(val, cls.unmarshal(val)) for val in ('ALL', 'NONE', 'all', 'none', 'aLl', 'nOnE'): self.assertEqual(val.upper(), cls.marshal(val)) self.assertEqual(val.upper(), cls.const_or_list(val)) self.assertEqual(val.upper(), cls.marshal([val])) self.assertEqual(val.upper(), cls.const_or_list([val])) def test_unmarshal(self): # Test VLAN lists self.assertEqual([1, 2], util.VLANList.unmarshal('1 2')) self.assertEqual([0], util.VLANList.unmarshal('0')) self.assertEqual([5, 6, 2230, 3340], util.VLANList.unmarshal('5 6 2230 3340')) # Test MAC lists self.assertEqual(['AB12CD34EF56', '12AB34CD56EF'], util.MACList.unmarshal('AB12CD34EF56 12AB34CD56EF')) self.assertEqual(['AB12CD34EF56'], util.MACList.unmarshal('AB12CD34EF56')) def test_marshal(self): # Test VLAN lists self.assertEqual('1 2', util.VLANList.marshal([1, 2])) self.assertEqual('0', util.VLANList.marshal([0])) self.assertEqual('5 6 2230 3340', util.VLANList.marshal([5, 6, '2230', 3340])) # Test MAC lists self.assertEqual('AB12CD34EF56 12AB34CD56EF', util.MACList.marshal( ['aB:12:Cd:34:eF:56', '12Ab34cD56Ef'])) self.assertEqual('AB12CD34EF56', util.MACList.marshal( ['Ab:12:cD:34:Ef:56'])) # Test error cases for cls in (util.VLANList, util.MACList): self.assertRaises(ValueError, cls.marshal, None) self.assertRaises(ValueError, cls.marshal, '') self.assertRaises(ValueError, cls.marshal, ' ') self.assertRaises(ValueError, cls.marshal, 'bogus') def test_const_or_list(self): # Test VLAN lists for l2t in ([1, 2], [0], [5, 6, 2230, 3340]): self.assertEqual(l2t, util.VLANList.const_or_list(l2t)) # Test MAC lists self.assertEqual(['AB12CD34EF56', '12AB34CD56EF'], util.MACList.const_or_list( ['aB:12:Cd:34:eF:56', '12Ab34cD56Ef'])) self.assertEqual(['AB12CD34EF56'], util.MACList.const_or_list( ['Ab:12:cD:34:Ef:56'])) # Test error cases for cls in (util.VLANList, util.MACList): for meth in (cls.marshal, cls.const_or_list): self.assertRaises(ValueError, meth, None) self.assertRaises(ValueError, meth, '') self.assertRaises(ValueError, meth, ' ') self.assertRaises(ValueError, meth, 'bogus') self.assertRaises(ValueError, util.VLANList.marshal, ['1', 'NaN', 2]) self.assertRaises(ValueError, util.VLANList.const_or_list, ['1', 'NaN', 2]) pypowervm-1.1.24/pypowervm/tests/lib.py0000664000175000017500000000223113571367171017600 0ustar neoneo00000000000000# Copyright 2014, 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import six datapath = os.path.join(os.path.dirname(__file__), "data") def file2b(basename): """Reads a file into a byte string. :param basename: The base name (no path) of the file to consume. The file is expected to reside in the data/ subdirectory of the path containing this library. :return: Python 2- and 3-compatible byte string of the input file's contents, unaltered and unprocessed. """ with open(os.path.join(datapath, basename), "r") as fh: return six.b(fh.read()) pypowervm-1.1.24/pypowervm/tests/test_session.py0000664000175000017500000002352513571367171021565 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import gc import mock import os import requests.models as req_mod import requests.structures as req_struct import subunit import sys import testtools import pypowervm.adapter as adp import pypowervm.exceptions as pvmex import pypowervm.tests.lib as testlib import pypowervm.tests.test_fixtures as fx _logon_response_password = testlib.file2b("logon.xml") _logon_response_file = testlib.file2b("logon_file.xml") class TestSession(subunit.IsolatedTestCase, testtools.TestCase): """Test cases to test the Session classes and methods.""" @mock.patch('time.sleep') @mock.patch('lxml.etree.fromstring', new=mock.Mock()) @mock.patch('pypowervm.adapter.Session._get_auth_tok_from_file', new=mock.Mock()) def test_Session(self, mock_sleep): """Ensure Session can be instantiated, and test logon retries.""" # Passing in 0.0.0.0 will raise a ConnectionError or SSLError, but only # if it gets past all the __init__ setup since _logon is the last # statement. self.assertRaises((pvmex.ConnectionError, pvmex.SSLError), adp.Session, '0.0.0.0', 'uid', 'pwd') mock_sleep.assert_not_called() # Now set up a retry self.assertRaises((pvmex.ConnectionError, pvmex.SSLError), adp.Session, '0.0.0.0', 'uid', 'pwd', conn_tries=5) # 5 tries = 4 sleeps mock_sleep.assert_has_calls([mock.call(2)] * 4) # Ensure 404 on the logon URI also retries mock_sleep.reset_mock() with mock.patch('requests.Session.request') as mock_rq: mock_rq.side_effect = [mock.Mock(status_code=404), mock.Mock(status_code=204)] adp.Session(conn_tries=5) # Only retried once, after the 404 mock_sleep.assert_called_once_with(2) @mock.patch('pypowervm.adapter.Session._logon', new=mock.Mock()) @mock.patch('pypowervm.adapter._EventListener._get_events') def test_session_init(self, mock_get_evts): """Ensure proper parameter handling in the Session initializer.""" mock_get_evts.return_value = {'general': 'init'}, [], [] logfx = self.useFixture(fx.LoggingFx()) # No params - local, file-based, http. sess = adp.Session() self.assertTrue(sess.use_file_auth) self.assertIsNone(sess.password) self.assertTrue(sess.username.startswith('pypowervm_')) self.assertEqual('localhost', sess.host) self.assertEqual('http', sess.protocol) self.assertEqual(12080, sess.port) self.assertEqual('http://localhost:12080', sess.dest) self.assertEqual(1200, sess.timeout) self.assertEqual('/etc/ssl/certs/', sess.certpath) self.assertEqual('.crt', sess.certext) # localhost + http is okay self.assertEqual(0, logfx.patchers['warning'].mock.call_count) # Verify unique session names sess2 = adp.Session() self.assertNotEqual(sess.username, sess2.username) # Ensure proper protocol, port, and certpath defaulting when remote sess = adp.Session(host='host', username='user', password='pass') self.assertFalse(sess.use_file_auth) self.assertIsNotNone(sess.password) self.assertEqual('user', sess.username) self.assertEqual('host', sess.host) self.assertEqual('https', sess.protocol) self.assertEqual(12443, sess.port) self.assertEqual('https://host:12443', sess.dest) self.assertEqual(1200, sess.timeout) self.assertEqual('/etc/ssl/certs/', sess.certpath) self.assertEqual('.crt', sess.certext) # non-localhost + (implied) https is okay self.assertEqual(0, logfx.patchers['warning'].mock.call_count) @mock.patch('pypowervm.adapter.Session._logon', new=mock.Mock()) @mock.patch('pypowervm.adapter._EventListener._get_events') @mock.patch('imp.load_source') def test_session_ext_cfg(self, mock_load, mock_get_evts): """Test Session init with external config from env var.""" mock_get_evts.return_value = {'general': 'init'}, [], [] with mock.patch.dict(os.environ, {'PYPOWERVM_SESSION_CONFIG': 'path'}): sess = adp.Session() mock_load.assert_called_once_with('sesscfg', 'path') mock_load.return_value.session_config.assert_called_once_with(sess) @mock.patch('pypowervm.adapter.Session._logon') def test_session_init_remote_http(self, mock_logon): # Proper port defaulting and warning emitted when remote + http with self.assertLogs(adp.__name__, 'WARNING'): sess = adp.Session(host='host', protocol='http') self.assertEqual(12080, sess.port) @mock.patch.object(adp.Session, '_logon') @mock.patch.object(adp.Session, '_logoff') def test_session_clone(self, mock_logoff, mock_logon): sess = adp.Session() # Ensure the id that created the object is recorded. self.assertTrue(hasattr(sess, '_init_by')) # Create a shallow clone and ensure the _init_by does not match the id sess_clone = copy.copy(sess) self.assertTrue(hasattr(sess_clone, '_init_by')) self.assertNotEqual(sess._init_by, id(sess_clone)) # Now test what happens when the clone is garbage collected. self.assertFalse(mock_logoff.called) sess_clone = None gc.collect() # The clone was not logged off self.assertFalse(mock_logoff.called) # Deep copy (properly) raises TypeError in py2 and py3 >= 3.6. pyver = sys.version_info if pyver.major == 2 or (pyver.major == 3 and pyver.minor >= 6): # Ensure deep copies raise an exception. self.assertRaises(TypeError, copy.deepcopy, sess) else: # Or if works, it is not logged off sess_deepclone = copy.deepcopy(sess) # Make pep8 happy, use the clone self.assertIsNotNone(sess_deepclone) sess_deepclone = None gc.collect() # The clone was not logged off self.assertFalse(mock_logoff.called) sess = None gc.collect() # The original session was logged off self.assertTrue(mock_logoff.called) @mock.patch('pypowervm.util.validate_certificate') @mock.patch('requests.Session') def test_logon(self, mock_session, mock_validate_cert): """Ensure a Session can be created and log on to PowerVM.""" # Init test data host = '0.0.0.0' user = 'user' pwd = 'pwd' auditmemento = 'audit' # Create a Response object, that will serve as a mock return value my_response = req_mod.Response() my_response.status_code = 200 my_response.reason = 'OK' dict_headers = {'content-length': '576', 'x-powered-by': 'Servlet/3.0', 'set-cookie': 'JSESSIONID=0000a41BnJsGTNQvBGERA3wR1nj:' '759878cb-4f9a-4b05-a09a-3357abfea3b4; P' 'ath=/; Secure; HttpOnly, CCFWSESSION=E4' 'C0FFBE9130431DBF1864171ECC6A6E; Path=/;' ' Secure; HttpOnly', 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'x-transaction-id': 'XT10000073', 'cache-control': 'no-cache="set-cookie, set-cookie2"', 'date': 'Wed, 23 Jul 2014 21:51:10 GMT', 'content-type': 'application/vnd.ibm.powervm.web+xml; ' 'type=LogonResponse'} my_response.headers = req_struct.CaseInsensitiveDict(dict_headers) my_response._content = _logon_response_password # Mock out the method and class we are not currently testing session = mock_session.return_value session.request.return_value = my_response # Run the actual test result = adp.Session(host, user, pwd, auditmemento=auditmemento) # Verify the result self.assertTrue(result._logged_in) self.assertEqual('PUIoR6x0kP6fQqA7qZ8sLZQJ8MLx9JHfLCYzT4oGFSE2WaGIhaFX' 'IyQYvbqdKNS8QagjBpPi9NP7YR_h61SOJ3krS_RvKAp-oCf2p8x8' 'uvQrrDv-dUzc17IT5DkR7_jv2qc8iUD7DJ6Rw53a17rY0p63KqPg' '9oUGd6Bn3fNDLiEwaBR4WICftVxUFj-tfWMOyZZY2hWEtN2K8ScX' 'vyFMe-w3SleyRbGnlR34jb0A99s=', result._sessToken) self.assertEqual(1, mock_validate_cert.call_count) # No X-MC-Type header => 'HMC' is assumed. self.assertEqual('HMC', result.mc_type) # Now test file-based authentication and X-MC-Type my_response._content = _logon_response_file # Local/HMC is bad self.assertRaises(pvmex.Error, adp.Session) my_response.headers['X-MC-Type'] = 'PVM' result = adp.Session() # Verify the result. self.assertTrue(result._logged_in) # Token read from token_file, as indicated by logon_file.xml response. self.assertEqual('file-based-auth-token', result._sessToken) # validate_certificate should not have been called again self.assertEqual(1, mock_validate_cert.call_count) self.assertEqual('PVM', result.mc_type) pypowervm-1.1.24/pypowervm/tests/test_adapter.py0000664000175000017500000014103213571367171021514 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import errno import fixtures from lxml import etree import six import subunit if six.PY2: import __builtin__ as builtins elif six.PY3: import builtins try: import urlparse except ImportError: import urllib.parse as urlparse import mock import requests.models as req_mod import requests.structures as req_struct import testtools import pypowervm.adapter as adp import pypowervm.const as c import pypowervm.entities as ent import pypowervm.exceptions as pvmex import pypowervm.tests.lib as testlib import pypowervm.tests.test_fixtures as fx from pypowervm.tests.test_utils import pvmhttp from pypowervm.wrappers import storage as pvm_stor logon_text = testlib.file2b("logon.xml") response_text = testlib.file2b("event.xml") NET_BRIDGE_FILE = 'fake_network_bridge.txt' class TestAdapter(testtools.TestCase): """Test cases to test the adapter classes and methods.""" def _mk_response(self, status, content=None): reasons = {200: 'OK', 204: 'No Content', 401: 'Unauthorized'} # Create a Response object, that will serve as a mock return value my_response = req_mod.Response() my_response.status_code = status my_response.reason = reasons[status] clen = '0' if status == 200 and content: clen = str(len(content)) dict_headers = { 'content-length': clen, 'x-powered-by': 'Servlet/3.0', 'set-cookie': ('JSESSIONID=0000a41BnJsGTNQvBGERA3wR1nj:759878cb-4f' '9a-4b05-a09a-3357abfea3b4; Path=/; Secure; HttpOnl' 'y, CCFWSESSION=E4C0FFBE9130431DBF1864171ECC6A6E; P' 'ath=/; Secure; HttpOnly'), 'expires': 'Thu, 01 Dec 1994 16:00:00 GMT', 'x-transaction-id': 'XT10000073', 'cache-control': 'no-cache="set-cookie, set-cookie2"', 'date': 'Wed, 23 Jul 2014 21:51:10 GMT', 'content-type': 'application/vnd.ibm.powervm'} my_response.headers = req_struct.CaseInsensitiveDict(dict_headers) my_response._content = content return my_response def setUp(self): super(TestAdapter, self).setUp() """Set up a mocked Session instance.""" # Init test data host = '0.0.0.0' user = 'user' pwd = 'pwd' auditmemento = 'audit' # Create a Response object, that will serve as a mock return value my_response = self._mk_response(200, logon_text) # Mock out the method and class we are not currently testing with mock.patch('requests.Session') as mock_session: session = mock_session.return_value session.request.return_value = my_response # Create session for the test to use self.sess = adp.Session(host, user, pwd, auditmemento=auditmemento, certpath=None) # Mock out the logoff, which gets called when the session # goes out of scope during tearDown() self.sess._logoff = mock.Mock() def tearDown(self): """Tear down the Session instance.""" self.sess = None super(TestAdapter, self).tearDown() @mock.patch('pypowervm.wrappers.event.Event.wrap') @mock.patch('time.sleep') def test_event_listener(self, mock_sleep, mock_evt_wrap): with mock.patch.object(adp._EventListener, '_get_events') as m_events,\ mock.patch.object(adp, '_EventPollThread') as mock_poll: # With some fake events, event listener can be initialized self.sess._sessToken = 'token'.encode('utf-8') m_events.return_value = {'general': 'init'}, 'raw_evt', 'wrap_evt' event_listen = self.sess.get_event_listener() self.assertIsNotNone(event_listen) # Register the fake handlers and ensure they are called evh = mock.Mock(spec=adp.EventHandler, autospec=True) raw_evh = mock.Mock(spec=adp.RawEventHandler, autospec=True) wrap_evh = mock.Mock(spec=adp.WrapperEventHandler, autospec=True) event_listen.subscribe(evh) event_listen.subscribe(raw_evh) event_listen.subscribe(wrap_evh) events, raw_events, evtwraps = event_listen._get_events() event_listen._dispatch_events(events, raw_events, evtwraps) evh.process.assert_called_once_with({'general': 'init'}) raw_evh.process.assert_called_once_with('raw_evt') wrap_evh.process.assert_called_once_with('wrap_evt') self.assertTrue(mock_poll.return_value.start.called) # Ensure getevents() gets legacy events self.assertEqual({'general': 'init'}, event_listen.getevents()) # Outside our patching of _get_events, get the formatted events with mock.patch.object(event_listen, '_format_events') as mock_format,\ mock.patch.object(event_listen.adp, 'read') as mock_read: # Ensure exception path doesn't kill the thread mock_read.side_effect = Exception() self.assertEqual(({}, [], []), event_listen._get_events()) self.assertEqual(1, mock_read.call_count) mock_format.assert_not_called() mock_evt_wrap.assert_not_called() mock_sleep.assert_called_once_with(5) mock_read.reset_mock() # side_effect takes precedence over return_value; so kill it. mock_read.side_effect = None # Fabricate some mock entries, so format gets called. mock_read.return_value.feed.entries = (['entry1', 'entry2']) self.assertEqual(({}, [], mock_evt_wrap.return_value), event_listen._get_events()) self.assertEqual(1, mock_read.call_count) mock_format.assert_has_calls([mock.call('entry1', {}, []), mock.call('entry2', {}, [])]) mock_evt_wrap.assert_called_once_with(mock_read.return_value) # Test _format_events event_data = [ { 'EventType': 'NEW_CLIENT', 'EventData': 'href1', 'EventID': '1', 'EventDetail': 'detail', }, { 'EventType': 'CACHE_CLEARED', 'EventData': 'href2', 'EventID': '2', 'EventDetail': 'detail2', }, { 'EventType': 'ADD_URI', 'EventData': 'LPAR1', 'EventID': '3', 'EventDetail': 'detail3', }, { 'EventType': 'DELETE_URI', 'EventData': 'LPAR1', 'EventID': '4', 'EventDetail': 'detail4', }, { 'EventType': 'INVALID_URI', 'EventData': 'LPAR1', 'EventID': '4', 'EventDetail': 'detail4', }, ] # Setup a side effect that returns events from the test data. def get_event_data(item): data = event_data[0][item] if item == 'EventDetail': event_data.pop(0) return data # Raw events returns a sequence the same as the test data raw_result = copy.deepcopy(event_data) # Legacy events overwrites some events. dict_result = {'general': 'invalidate', 'LPAR1': 'delete'} # Build a mock entry entry = mock.Mock() entry.element.findtext.side_effect = get_event_data events = {} raw_events = [] x = len(raw_result) while x: x -= 1 event_listen._format_events(entry, events, raw_events) self.assertEqual(raw_result, raw_events) self.assertEqual(dict_result, events) @mock.patch('pypowervm.adapter.Session') def test_empty_init(self, mock_sess): adp.Adapter() mock_sess.assert_called_with() def test_no_cache(self): self.assertRaises(pvmex.CacheNotSupportedException, adp.Adapter, use_cache=True) @mock.patch('requests.Session') def test_read(self, mock_session): """Test read() method found in the Adapter class.""" # Init test data root_type = 'ManagedSystem' root_id = 'caae9209-25e5-35cd-a71a-ed55c03f294d' child_type = 'child' child_id = 'child' suffix_type = 'quick' adapter = adp.Adapter(self.sess) # Create a Response object, that will serve as a mock return value read_response = self._mk_response(200, response_text) # Mock out the method and class we are not currently testing session = mock_session.return_value session.request.return_value = read_response # Run the actual test ret_read_value = adapter.read(root_type, root_id, child_type, child_id, suffix_type) # Verify Correct path was built in build_path() reqpath = adp.Adapter.build_path('uom', root_type, root_id, child_type, child_id, suffix_type) # Verify the return value # self.assertIsInstance(ret_read_value, adp.Response) self.assertEqual('GET', ret_read_value.reqmethod) self.assertEqual(200, ret_read_value.status) self.assertEqual(reqpath, ret_read_value.reqpath) @mock.patch('pypowervm.adapter.Adapter._validate') @mock.patch('pypowervm.adapter.Adapter.build_path') @mock.patch('pypowervm.adapter.Adapter.read_by_path') def test_read2(self, mock_rbp, mock_bld, mock_val): """Validate shallow flow & arg passing.""" adap = adp.Adapter(session=self.sess) # Defaults self.assertEqual(mock_rbp.return_value, adap.read('root_type')) mock_val.assert_called_once_with( 'read', 'root_type', None, None, None, None, None, None) mock_bld.assert_called_once_with( 'uom', 'root_type', None, None, None, None, None, None, xag=None, add_qp=None) mock_rbp.assert_called_once_with( mock_bld.return_value, None, timeout=-1, auditmemento=None, age=-1, sensitive=False, helpers=None) # Specified kwargs mock_val.reset_mock() mock_bld.reset_mock() mock_rbp.reset_mock() self.assertEqual(mock_rbp.return_value, adap.read( 'root_type', root_id='root_id', child_type='child_type', child_id='child_id', suffix_type='suffix_type', suffix_parm='suffix_parm', detail='detail', service='service', etag='etag', timeout='timeout', auditmemento='auditmemento', age='age', xag='xag', sensitive='sensitive', helpers='helpers', add_qp='add_qp')) mock_val.assert_called_once_with( 'read', 'root_type', 'root_id', 'child_type', 'child_id', 'suffix_type', 'suffix_parm', 'detail') mock_bld.assert_called_once_with( 'service', 'root_type', 'root_id', 'child_type', 'child_id', 'suffix_type', 'suffix_parm', 'detail', xag='xag', add_qp='add_qp') mock_rbp.assert_called_once_with( mock_bld.return_value, 'etag', timeout='timeout', auditmemento='auditmemento', age='age', sensitive='sensitive', helpers='helpers') @mock.patch('pypowervm.adapter.Adapter.extend_path') def test_build_path(self, mock_exp): """Validate build_path.""" adap = adp.Adapter(session=self.sess) # Defaults self.assertEqual(mock_exp.return_value, adap.build_path( 'service', 'root_type')) mock_exp.assert_called_once_with( '/rest/api/service/root_type', suffix_type=None, suffix_parm=None, detail=None, xag=None, add_qp=None) # child specs ignored if no root ID mock_exp.reset_mock() self.assertEqual(mock_exp.return_value, adap.build_path( 'service', 'root_type', child_type='child_type', child_id='child_id')) mock_exp.assert_called_once_with( '/rest/api/service/root_type', suffix_type=None, suffix_parm=None, detail=None, xag=None, add_qp=None) # child ID ignored if no child type mock_exp.reset_mock() self.assertEqual(mock_exp.return_value, adap.build_path( 'service', 'root_type', root_id='root_id', child_id='child_id')) mock_exp.assert_called_once_with( '/rest/api/service/root_type/root_id', suffix_type=None, suffix_parm=None, detail=None, xag=None, add_qp=None) # Specified kwargs (including full child spec mock_exp.reset_mock() self.assertEqual(mock_exp.return_value, adap.build_path( 'service', 'root_type', root_id='root_id', child_type='child_type', child_id='child_id', suffix_type='suffix_type', suffix_parm='suffix_parm', detail='detail', xag='xag', add_qp='add_qp')) mock_exp.assert_called_once_with( '/rest/api/service/root_type/root_id/child_type/child_id', suffix_type='suffix_type', suffix_parm='suffix_parm', detail='detail', xag='xag', add_qp='add_qp') @mock.patch('pypowervm.adapter.Adapter._request') def test_headers(self, mock_request): def validate_hdrs_func(acc=None, inm=None): expected_headers = {} if acc is not None: expected_headers['Accept'] = acc if inm is not None: expected_headers['If-None-Match'] = inm def validate_request(meth, path, **kwargs): self.assertEqual(expected_headers, kwargs['headers']) return validate_request adpt = adp.Adapter(mock.Mock()) basepath = c.API_BASE_PATH + 'uom/SomeRootObject' uuid = "abcdef01-2345-2345-2345-67890abcdef0" hdr_xml = 'application/atom+xml' hdr_json = '*/*' etag = 'abc123' # Root feed mock_request.side_effect = validate_hdrs_func(acc=hdr_xml) adpt._read_by_path(basepath, None, None, None, None) # Root instance with etag mock_request.side_effect = validate_hdrs_func(acc=hdr_xml, inm=etag) adpt._read_by_path(basepath + '/' + uuid, etag, None, None, None) # Quick root anchor (produces XML report of available quick properties mock_request.side_effect = validate_hdrs_func(acc=hdr_xml) adpt._read_by_path(basepath + '/quick', None, None, None, None) # Quick root instance (JSON of all quick properties) mock_request.side_effect = validate_hdrs_func(acc=hdr_json) adpt._read_by_path('/'.join([basepath, uuid, 'quick']), None, None, None, None) # Specific quick property mock_request.side_effect = validate_hdrs_func(acc=hdr_json) adpt._read_by_path('/'.join([basepath, uuid, 'quick', 'property']), None, None, None, None) # Explicit JSON file mock_request.side_effect = validate_hdrs_func(acc=hdr_json) adpt._read_by_path('/'.join([basepath, 'somefile.json']), None, None, None, None) # Object that happens to end in 'json' mock_request.side_effect = validate_hdrs_func(acc=hdr_xml) adpt._read_by_path('/'.join([basepath, 'xml_about_json']), None, None, None, None) # Quick with query params and fragments mock_request.side_effect = validate_hdrs_func(acc=hdr_json) adpt._read_by_path('/'.join([basepath, uuid, 'quick']) + '?group=None#frag', None, None, None, None) @mock.patch('requests.Session') def test_create(self, mock_session): """Test create() method found in the Adapter class.""" # Init test data adapter = adp.Adapter(self.sess) new_scsi = pvm_stor.VSCSIClientAdapterElement.bld(adapter) element = new_scsi root_type = 'ManagedSystem' root_id = 'id' child_type = 'LogicalPartition' create_response = self._mk_response(200, response_text) # Mock out the method and class we are not currently testing session = mock_session.return_value session.request.return_value = create_response # Run the actual test ret_create_value = adapter.create(element, root_type, root_id, child_type) # Verify Correct path was built in build_path() reqpath = adp.Adapter.build_path('uom', root_type, root_id, child_type, xag=[]) # Verify the return value # self.assertIsInstance(ret_create_value, adp.Response) self.assertEqual('PUT', ret_create_value.reqmethod) self.assertEqual(200, ret_create_value.status) self.assertEqual(reqpath, ret_create_value.reqpath) @mock.patch('requests.Session') def test_update(self, mock_session): """Test update() method found in the Adapter class.""" # Init test data data = 'data' etag = 'etag' root_type = 'root type' root_id = 'root id' adapter = adp.Adapter(self.sess) update_response = self._mk_response(200, response_text) # Mock out the method and class we are not currently testing session = mock_session.return_value session.request.return_value = update_response # Run the actual test ret_update_value = adapter.update(data, etag, root_type, root_id) # Verify Correct path was built in build_path() reqpath = adp.Adapter.build_path('uom', root_type, root_id) # Verify the return value # self.assertIsInstance(ret_update_value, adp.Response) self.assertEqual('POST', ret_update_value.reqmethod) self.assertEqual(200, ret_update_value.status) self.assertEqual(reqpath, ret_update_value.reqpath) @mock.patch('requests.Session') def test_upload(self, mock_session): # Build the adapter adapter = adp.Adapter(self.sess) # Mock data filedesc_mock = mock.MagicMock() filedesc_mock.findtext.side_effect = ['uuid', 'mime'] with mock.patch.object(adapter, '_request') as mock_request: adapter.upload_file(filedesc_mock, None) # Validate expected_headers = {'Accept': 'application/vnd.ibm.powervm.web+xml', 'Content-Type': 'mime'} expected_path = '/rest/api/web/File/contents/uuid' mock_request.assert_called_once_with( 'PUT', expected_path, helpers=None, headers=expected_headers, timeout=-1, auditmemento=None, filehandle=None, chunksize=65536) def _test_upload_request(self, mock_rq, mock_fh, fhdata): """Test an upload requests with different kinds of "filehandle".""" adapter = adp.Adapter(self.sess) mock_fd = mock.Mock(findtext=mock.Mock(side_effect=['uuid', 'mime'])) def check_request(method, url, data=None, headers=None, timeout=None): """Validate the session.request call.""" self.assertEqual('PUT', method) self.assertEqual( self.sess.dest + '/rest/api/web/File/contents/uuid', url) # Verify that data is iterable self.assertEqual(fhdata, [chunk for chunk in data]) return mock.Mock(status_code=c.HTTPStatus.OK_NO_CONTENT) mock_rq.side_effect = check_request adapter.upload_file(mock_fd, mock_fh) @mock.patch('requests.sessions.Session.request') def test_upload_request_iter(self, mock_rq): """Test an upload request with an iterable.""" fhdata = ['one', 'two'] self._test_upload_request(mock_rq, fhdata, fhdata) @mock.patch('requests.sessions.Session.request') def test_upload_request_fh(self, mock_rq): """Test an upload request with a filehandle.""" # filehandle is a read()able fhdata = ['one', 'two'] mock_fh = mock.Mock(read=mock.Mock(side_effect=(fhdata + [None]))) self._test_upload_request(mock_rq, mock_fh, fhdata) # Make sure the file handle's read method was invoked mock_fh.read.assert_has_calls([mock.call(65536)] * len(fhdata)) def _assert_paths_equivalent(self, exp, act): """Ensures two paths or hrefs are "the same". Query parameter keys may be specified in any order, though their values must match exactly. The rest of the path must be identical. :param exp: Expected path :param act: Actual path (produced by test) """ p_exp = urlparse.urlparse(exp) p_act = urlparse.urlparse(act) self.assertEqual(p_exp.scheme, p_act.scheme) self.assertEqual(p_exp.netloc, p_act.netloc) self.assertEqual(p_exp.path, p_act.path) self.assertEqual(p_exp.fragment, p_act.fragment) qs_exp = urlparse.parse_qs(p_exp.query) qs_act = urlparse.parse_qs(p_act.query) for vals in qs_exp.values(): vals.sort() for vals in qs_act.values(): vals.sort() self.assertEqual(qs_exp, qs_act) @mock.patch('requests.Session') def test_extend_path(self, mock_session): # Init test data adapter = adp.Adapter(self.sess) path = adapter.extend_path('basepath', suffix_type='suffix', suffix_parm='suffix_parm', detail='detail', xag=[c.XAG.VIO_FMAP]) expected_path = ('basepath/suffix/suffix_parm?detail=detail&' 'group=ViosFCMapping') self._assert_paths_equivalent(expected_path, path) # Multiple XAGs in a set path = adapter.extend_path('basepath', suffix_type='suffix', suffix_parm='suffix_parm', detail='detail', xag={c.XAG.VIO_FMAP, c.XAG.VIO_NET}) expected_path = ('basepath/suffix/suffix_parm?detail=detail&' 'group=ViosFCMapping,ViosNetwork') self._assert_paths_equivalent(expected_path, path) # Verify sorting path = adapter.extend_path('basepath', suffix_type='suffix', suffix_parm='suffix_parm', detail='detail', xag=[c.XAG.VIO_NET, c.XAG.VIO_FMAP]) expected_path = ('basepath/suffix/suffix_parm?detail=detail&' 'group=ViosFCMapping,ViosNetwork') self._assert_paths_equivalent(expected_path, path) # Explicitly no XAG path = adapter.extend_path('basepath', suffix_type='suffix', suffix_parm='suffix_parm', detail='detail', xag=[]) expected_path = 'basepath/suffix/suffix_parm?detail=detail' self._assert_paths_equivalent(expected_path, path) # Ensure unspecified XAG defaults to group=None path = adapter.extend_path('basepath', suffix_type='suffix', suffix_parm='suffix_parm') expected_path = 'basepath/suffix/suffix_parm?group=None' self._assert_paths_equivalent(expected_path, path) # ...except for specific suffix types 'quick' and 'do' path = adapter.extend_path('basepath', suffix_type='quick', suffix_parm='suffix_parm') expected_path = 'basepath/quick/suffix_parm' self._assert_paths_equivalent(expected_path, path) path = adapter.extend_path('basepath', suffix_type='do', suffix_parm='suffix_parm') expected_path = 'basepath/do/suffix_parm' self._assert_paths_equivalent(expected_path, path) # Ensure arg xags and path xags interact correctly # path_xag=None, arg_xag=None => group=None self._assert_paths_equivalent( 'basepath?group=None', adapter.extend_path('basepath')) # path_xag='None', arg_xag=None => group=None self._assert_paths_equivalent( 'basepath?group=None', adapter.extend_path('basepath?group=None')) # path_xag='a,b,c', arg_xag=None => group=a,b,c self._assert_paths_equivalent( 'basepath?group=a,b,c', adapter.extend_path('basepath?group=a,b,c')) # path_xag=None, arg_xag=() => no group= self._assert_paths_equivalent( 'basepath', adapter.extend_path('basepath', xag=())) # path_xag='None', arg_xag={} => no group= self._assert_paths_equivalent( 'basepath', adapter.extend_path('basepath?group=None', xag={})) # path_xag='a,b,c', arg_xag=[] => ValueError self.assertRaises( ValueError, adapter.extend_path, 'basepath?group=a,b,c', xag=[]) # path_xag=None, arg_xag='a,b,c' => group='a,b,c' self._assert_paths_equivalent( 'basepath?group=a,b,c', adapter.extend_path('basepath', xag={'a', 'b', 'c'})) # path_xag='None', arg_xag='a,b,c' => group='a,b,c' self._assert_paths_equivalent( 'basepath?group=a,b,c', adapter.extend_path('basepath?group=None', xag=('a', 'b', 'c'))) # path_xag='a,b,c', arg_xag='a,b,c' => group='a,b,c' self._assert_paths_equivalent( 'basepath?group=a,b,c', adapter.extend_path('basepath?group=a,b,c', xag=['a', 'b', 'c'])) # path_xag='a,b,c', arg_xag='d,e,f' => ValueError self.assertRaises(ValueError, adapter.extend_path, 'basepath?group=a,b,c', xag=['d', 'e', 'f']) # Multi-instance query params properly reassembled. self._assert_paths_equivalent( 'basepath?foo=1,2,3&group=a,b,c&foo=4,5,6', adapter.extend_path('basepath?foo=4,5,6&group=None&foo=1,2,3', xag=['a', 'b', 'c'])) # Additional queryparams (add_qp) # Explicit None self._assert_paths_equivalent( 'basepath', adapter.extend_path('basepath', xag=[], add_qp=None)) # Proper escaping self._assert_paths_equivalent( 'basepath?one=%23%24%25%5E%26', adapter.extend_path('basepath', xag=[], add_qp=[('one', '#$%^&')])) # Duplicated keys (order preserved) and proper handling of non-strings self._assert_paths_equivalent( 'basepath?1=3&1=2', adapter.extend_path('basepath', xag=[], add_qp=[(1, 3), (1, 2)])) # Proper behavior combined with implicit xag self._assert_paths_equivalent( 'basepath?group=None&key=value&something=else', adapter.extend_path( 'basepath', add_qp=[('key', 'value'), ('something', 'else')])) # Combined with xags and an existing querystring self._assert_paths_equivalent( 'basepath?already=here&group=a,b,c&key=value&something=else', adapter.extend_path( 'basepath?already=here', xag=['a', 'b', 'c'], add_qp=[('key', 'value'), ('something', 'else')])) @mock.patch('pypowervm.adapter.LOG') @mock.patch('pypowervm.adapter.Adapter.read_by_path') def test_read_by_href(self, mock_read_by_path, mock_log): """Ensure read_by_href correctly extends, preserves query strings.""" def validate_read_by_path(expected): def _read_by_path(path, etag, timeout, auditmemento, age, sensitive, helpers): self._assert_paths_equivalent(expected, path) for param in (etag, auditmemento, helpers): self.assertIsNone(param) for param2 in (age, timeout): self.assertEqual(-1, param2) self.assertFalse(sensitive) return _read_by_path self.sess.host = 'foo' self.sess.port = 123 adapter = adp.Adapter(self.sess) mock_read_by_path.side_effect = validate_read_by_path( '/rest/api/uom/Bar?k=v&group=None#frag') adapter.read_by_href('http://foo:123/rest/api/uom/Bar?k=v#frag') self.assertFalse(mock_log.debug.called) self.sess.host = 'bar' mock_read_by_path.side_effect = validate_read_by_path( '/rest/api/uom/Bar?k=v&group=None#frag') adapter.read_by_href('http://foo:123/rest/api/uom/Bar?k=v#frag') self.assertTrue(mock_log.debug.called) mock_read_by_path.side_effect = validate_read_by_path( '/rest/api/uom/Bar?k=v&group=RealGroup#frag') adapter.read_by_href( 'http://foo:123/rest/api/uom/Bar?k=v&group=RealGroup#frag') @mock.patch('requests.Session') def test_delete(self, mock_session): """Test delete() method found in the Adapter class.""" # Init test data root_type = 'ManagedSystem' root_id = 'id' adapter = adp.Adapter(self.sess) delete_response = self._mk_response(204) # Mock out the method and class we are not currently testing session = mock_session.return_value session.request.return_value = delete_response # Run the actual test ret_delete_value = adapter.delete(root_type, root_id) # Verify Correct path was built in build_path() reqpath = adp.Adapter.build_path('uom', root_type, root_id, xag=[]) # Verify the return value # self.assertIsInstance(ret_delete_value, adp.Response) self.assertEqual('DELETE', ret_delete_value.reqmethod) self.assertEqual(204, ret_delete_value.status) self.assertEqual(reqpath, ret_delete_value.reqpath) @mock.patch.object(builtins, 'open') def test_auth_file_error(self, mock_open_patch): mock_open_patch.side_effect = IOError(errno.EACCES, 'Error') self.assertRaises(pvmex.AuthFileReadError, self.sess._get_auth_tok_from_file, mock.Mock(), mock.Mock()) mock_open_patch.side_effect = IOError(errno.EIO, 'Error') self.assertRaises(pvmex.AuthFileAccessError, self.sess._get_auth_tok_from_file, mock.Mock(), mock.Mock()) @mock.patch('pypowervm.adapter.LOG') @mock.patch('requests.Session') def test_unauthorized_error(self, mock_session, mock_log): """401 (unauthorized) calling Adapter.create().""" # Init test data adapter = adp.Adapter(self.sess) new_scsi = pvm_stor.VSCSIClientAdapterElement.bld(adapter) element = new_scsi root_type = 'ManagedSystem' root_id = 'id' child_type = 'LogicalPartition' create_response = self._mk_response(401) # Mock out the method and class we are not currently testing session = mock_session.return_value session.request.return_value = create_response # Run the actual test self.assertRaises(pvmex.HttpError, adapter.create, element, root_type, root_id, child_type) self.assertEqual(1, mock_log.warning.call_count) def test_element_iter(self): """Test the ETElement iter() method found in the Adapter class.""" # Init test data children = [ent.Element('Type1', None, text='T1_0'), ent.Element('Type12', None, text='T12_0'), ent.Element('Type1', None, text='T1_1'), ent.Element('Type12', None, text='T12_1'), ent.Element('Type1', None, text='T1_2')] top_element = ent.Element('Top', None, attrib={'schemaVersion': 'V1_0'}, children=children) def _count_elem(top, tag, it=None, assert_tag=True): elem_count = 0 it = it if it else top.iter(tag=tag) for elem in it: if assert_tag: self.assertEqual(elem.tag, tag) elem_count += 1 return elem_count # Run the actual tests # Ensure all elements are traversed if we don't specify a tag self.assertEqual(_count_elem(top_element, 'Type1', it=top_element.iter(), assert_tag=False), 6) # Ensure all elements are traversed for tag=* self.assertEqual(_count_elem(top_element, 'Type1', it=top_element.iter(tag='*'), assert_tag=False), 6) # Ensure all elements are traversed for tag=None self.assertEqual(_count_elem(top_element, 'Type1', it=top_element.iter(tag=None), assert_tag=False), 6) # Get only the Type1 elements self.assertEqual(_count_elem(top_element, 'Type1'), 3) # Get only the top self.assertEqual(_count_elem(top_element, 'Top'), 1) @mock.patch('pypowervm.entities.Feed.unmarshal_atom_feed') @mock.patch('pypowervm.entities.Entry.unmarshal_atom_entry') @mock.patch('lxml.etree.fromstring') def test_extract_atom(self, mock_fromstring, mock_unm_ent, mock_unm_feed): resp = adp.Response('meth', '/rest/api/uom/Debug/SetLoggingLevel', 'status', 'reason', 'headers', body='body') feed_ret = mock.Mock(tag=etree.QName(c.ATOM_NS, 'feed')) entry_ret = mock.Mock(tag=etree.QName(c.ATOM_NS, 'entry')) # Empty content; "Response is not an Atom feed/entry" mock_fromstring.return_value = None self.assertIsNotNone(resp._extract_atom()) mock_fromstring.assert_called_with('body') mock_unm_feed.assert_not_called() mock_unm_ent.assert_not_called() # Unmarshal feed (returns None) mock_fromstring.return_value = feed_ret self.assertIsNone(resp._extract_atom()) mock_unm_feed.assert_called_once_with(feed_ret, resp) mock_unm_ent.assert_not_called() mock_unm_feed.reset_mock() # Unmarshal entry (returns None) mock_fromstring.return_value = entry_ret self.assertIsNone(resp._extract_atom()) mock_unm_ent.assert_called_once_with(entry_ret, resp) mock_unm_feed.assert_not_called() mock_unm_ent.reset_mock() # Unmarshal a 'Debug' response (returns None) mock_fromstring.return_value = mock.Mock(tag='debug output') self.assertIsNone(resp._extract_atom()) mock_unm_feed.assert_not_called() mock_unm_ent.assert_not_called() # 'fromstring' raises. Make sure the return message came from the # right place (will include the exception text) mock_fromstring.side_effect = Exception("test_extract_atom") self.assertIn("test_extract_atom", resp._extract_atom()) mock_unm_feed.assert_not_called() mock_unm_ent.assert_not_called() @mock.patch('pypowervm.adapter.Adapter.read') def test_sys_uuid(self, mock_read): # Set and return the sys_uuid if not yet defined adapter = adp.Adapter(self.sess) mock_resp = mock.MagicMock() mock_resp.feed.entries[0].uuid = 'uuid' mock_read.return_value = mock_resp sys_uuid = adapter.sys_uuid mock_read.assert_called_once_with('ManagedSystem') self.assertEqual('uuid', sys_uuid) self.assertEqual('uuid', adapter._sys_uuid) # Return sys_uuid if defined already mock_read.reset_mock() sys_uuid = adapter.sys_uuid mock_read.assert_not_called() class TestElement(testtools.TestCase): def setUp(self): super(TestElement, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt def test_cdata(self): no_cdata = ent.Element('tag', self.adpt, text='text', cdata=False) with_cdata = ent.Element('tag', self.adpt, text='text', cdata=True) self.assertEqual( no_cdata.toxmlstring(), 'text'.encode('utf-8')) self.assertEqual( with_cdata.toxmlstring(), ''.encode('utf-8')) def test_tag_namespace(self): el = ent.Element('tag', self.adpt) self.assertEqual(el.element.tag, '{http://www.ibm.com/xmlns/systems/po' 'wer/firmware/uom/mc/2012_10/}tag') # entities.Element.tag strips the namespace self.assertEqual(el.tag, 'tag') self.assertEqual(el.namespace, 'http://www.ibm.com/xmlns/systems/powe' 'r/firmware/uom/mc/2012_10/') # Test setter el.tag = 'gat' self.assertEqual(el.element.tag, '{http://www.ibm.com/xmlns/systems/po' 'wer/firmware/uom/mc/2012_10/}gat') self.assertEqual(el.tag, 'gat') el.namespace = 'foo' self.assertEqual(el.namespace, 'foo') # Now with no namespace el = ent.Element('tag', self.adpt, ns='') self.assertEqual(el.element.tag, 'tag') self.assertEqual(el.tag, 'tag') self.assertEqual(el.namespace, '') el.tag = 'gat' self.assertEqual(el.element.tag, 'gat') self.assertEqual(el.tag, 'gat') el.namespace = 'foo' self.assertEqual(el.namespace, 'foo') class TestAdapterClasses(subunit.IsolatedTestCase, testtools.TestCase): def setUp(self): super(TestAdapterClasses, self).setUp() self.mock_logoff = self.useFixture( fixtures.MockPatchObject(adp.Session, '_logoff')).mock self.mock_logon = self.useFixture( fixtures.MockPatchObject(adp.Session, '_logon')).mock self.mock_events = self.useFixture( fixtures.MockPatchObject(adp._EventListener, '_get_events')).mock # Mock the initial events coming in on start self.mock_events.return_value = {'general': 'init'}, [], [] def test_instantiation(self): """Direct instantiation of EventListener is not allowed.""" # Get a session sess = adp.Session() # Now get the EventListener self.assertRaises(TypeError, adp.EventListener, sess) # Mock the session token like we logged on sess._sessToken = 'token'.encode('utf-8') # Ensure we get an EventListener self.assertIsInstance(sess.get_event_listener(), adp.EventListener) def test_shutdown_session(self): # Get a session sess = adp.Session() # Fake the session token like we logged on sess._sessToken = 'token'.encode('utf-8') # It should have logged on self.assertTrue(self.mock_logon.called) # Construct and get the event listener event_listen = sess.get_event_listener() # Test the circular reference (but one link is weak) sess.hello = 'hello' self.assertEqual(sess.hello, event_listen.adp.session.hello) # Test that we haven't already logged off self.assertFalse(self.mock_logoff.called) with mock.patch.object(event_listen, 'shutdown', wraps=event_listen.shutdown) as mock_elshutdown: # Stop referencing the session sess = None # Test that the event listener's shutdown has been triggered mock_elshutdown.assert_called() # Test that logoff has occurred self.assertTrue(self.mock_logoff.called) def test_shutdown_adapter(self): # Get Adapter adapter = adp.Adapter() # Fake the implicit session token like we logged on adapter.session._sessToken = 'token'.encode('utf-8') # Construct and get the event listener adapter.session.get_event_listener() # Turn off the event listener adapter.session.get_event_listener().shutdown() # Session is still active self.assertFalse(self.mock_logoff.called) # Stop referencing the adapter adapter = None # Test that logoff has occurred self.assertTrue(self.mock_logoff.called) class TestElementInject(testtools.TestCase): def setUp(self): super(TestElementInject, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt self.ordering_list = ('AdapterType', 'UseNextAvailableSlotID', 'RemoteLogicalPartitionID', 'RemoteSlotNumber') self.child_at = ent.Element('AdapterType', self.adpt, text='Client') self.child_unasi = ent.Element('UseNextAvailableSlotID', self.adpt, text='true') self.child_rlpi1 = ent.Element('RemoteLogicalPartitionID', self.adpt, text='1') self.child_rlpi2 = ent.Element('RemoteLogicalPartitionID', self.adpt, text='2') self.child_rlpi3 = ent.Element('RemoteLogicalPartitionID', self.adpt, text='3') self.child_rsn = ent.Element('RemoteSlotNumber', self.adpt, text='12') self.all_children = [ self.child_at, self.child_unasi, self.child_rlpi1, self.child_rsn] def _mk_el(self, children): return ent.Element('VirtualSCSIClientAdapter', self.adpt, attrib={'schemaVersion': 'V1_0'}, children=children) def assert_expected_children(self, parent, *expected_children): """Assert that *children are the children of parent, in that order. :param parent: Parent adapter.Element :param children: Child adapter.Elements """ # etree.Element doesn't implement __eq__, so different instances of the # same Element aren't "equal". Compare XML strings instead. actual = [etree.tostring(elem) for elem in list(parent.element)] expected = [etree.tostring(chld.element) for chld in expected_children] self.assertEqual(actual, expected) def test_no_children(self): """Inject when the element has no children - should "append".""" el = self._mk_el([]) el.inject(self.child_rlpi1) self.assert_expected_children(el, self.child_rlpi1) # Result should be same regardless of other params el = self._mk_el([]) el.inject(self.child_rlpi1, self.ordering_list, replace=False) self.assert_expected_children(el, self.child_rlpi1) def test_subelement_found_one_replace_true(self): """Replace existing child with same tag.""" el = self._mk_el(self.all_children) el.inject(self.child_rlpi2, self.ordering_list) self.assert_expected_children(el, self.child_at, self.child_unasi, self.child_rlpi2, self.child_rsn) # Proving default replace=True - same result if specified el = self._mk_el(self.all_children) el.inject(self.child_rlpi2, self.ordering_list, replace=True) self.assert_expected_children(el, self.child_at, self.child_unasi, self.child_rlpi2, self.child_rsn) def test_subelement_found_mult_replace_true(self): """Replace existing child with same tag when >1 such children. Should replace the last such child. """ el = self._mk_el([self.child_at, self.child_unasi, self.child_rlpi1, self.child_rlpi3, self.child_rsn]) el.inject(self.child_rlpi2, self.ordering_list) self.assert_expected_children(el, self.child_at, self.child_unasi, self.child_rlpi1, self.child_rlpi2, self.child_rsn) def test_subelement_found_replace_false(self): """Inject after existing child(ren) with same tag.""" el = self._mk_el(self.all_children) el.inject(self.child_rlpi2, self.ordering_list, False) self.assert_expected_children(el, self.child_at, self.child_unasi, self.child_rlpi1, self.child_rlpi2, self.child_rsn) el.inject(self.child_rlpi3, self.ordering_list, False) self.assert_expected_children(el, self.child_at, self.child_unasi, self.child_rlpi1, self.child_rlpi2, self.child_rlpi3, self.child_rsn) def test_subelement_not_in_ordering_list(self): """Subelement not in ordering list - should append.""" el = self._mk_el(self.all_children) ch = ent.Element('SomeNewElement', self.adpt, text='foo') el.inject(ch, ordering_list=self.ordering_list) self.assert_expected_children(el, self.child_at, self.child_unasi, self.child_rlpi1, self.child_rsn, ch) def test_first_populated(self): """Inject the first child when children are otherwise populated.""" el = self._mk_el(self.all_children[1:]) el.inject(self.child_at, self.ordering_list) self.assert_expected_children(el, self.child_at, self.child_unasi, self.child_rlpi1, self.child_rsn) def test_first_sparse(self): """Inject the first child when children are sparsely populated.""" # This is most interesting when the existing child is not the one right # next to the injectee. el = self._mk_el([self.child_rlpi1]) el.inject(self.child_at, self.ordering_list) self.assert_expected_children(el, self.child_at, self.child_rlpi1) def test_last_populated(self): """Inject the last child when children are otherwise populated.""" el = self._mk_el(self.all_children[:-1]) el.inject(self.child_rsn, self.ordering_list) self.assert_expected_children(el, self.child_at, self.child_unasi, self.child_rlpi1, self.child_rsn) def test_last_sparse(self): """Inject the last child when children are sparsely populated.""" # This is most interesting when the existing child is not the one right # next to the injectee. el = self._mk_el([self.child_unasi]) el.inject(self.child_rsn, self.ordering_list) self.assert_expected_children(el, self.child_unasi, self.child_rsn) def test_middle_populated(self): """Inject a middle child when children are otherwise populated.""" el = self._mk_el([self.child_at, self.child_unasi, self.child_rsn]) el.inject(self.child_rlpi1, self.ordering_list) self.assert_expected_children(el, self.child_at, self.child_unasi, self.child_rlpi1, self.child_rsn) def test_middle_sparse(self): """Inject a middle child when children are sparsely populated.""" el = self._mk_el([self.child_at, self.child_rsn]) el.inject(self.child_rlpi1, self.ordering_list) self.assert_expected_children( el, self.child_at, self.child_rlpi1, self.child_rsn) class TestElementWrapper(testtools.TestCase): """Tests for the ElementWrapper class.""" def setUp(self): super(TestElementWrapper, self).setUp() self.resp = pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response() self.nb1 = self.resp.feed.entries[0] self.resp2 = pvmhttp.load_pvm_resp(NET_BRIDGE_FILE).get_response() self.nb2 = self.resp2.feed.entries[0] def test_equality(self): """Validates that two elements loaded from the same data is equal.""" sea1 = self._find_seas(self.nb1)[0] sea2 = self._find_seas(self.nb2)[0] self.assertTrue(sea1 == sea2) # Change the other SEA sea2.element.append(etree.Element('Bob')) self.assertFalse(sea1 == sea2) def test_inequality_by_subelem_change(self): sea1 = self._find_seas(self.nb1)[0] sea2 = self._find_seas(self.nb2)[0] sea_trunk = sea2.findall('TrunkAdapters/TrunkAdapter')[0] pvid = sea_trunk.find('PortVLANID') pvid.text = '1' self.assertFalse(sea1 == sea2) def _find_seas(self, entry): """Wrapper for the SEAs.""" return entry.element.findall('SharedEthernetAdapters/' 'SharedEthernetAdapter') pypowervm-1.1.24/pypowervm/tests/test_helpers.py0000664000175000017500000000664613571367171021551 0ustar neoneo00000000000000# Copyright 2014, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import unittest import mock import pypowervm.adapter as adp def cat_string_helper(func, string): def wrapper(*args, **kwds): return func(*args, **kwds) + string return wrapper class TestHelpers(unittest.TestCase): def test_none(self): adpt = adp.Adapter('mock_session', helpers=None) self.assertEqual([], adpt.helpers) def test_single(self): hlp = functools.partial(cat_string_helper, string="purple!") adpt = adp.Adapter('mock_session', helpers=hlp) self.assertEqual([hlp], adpt.helpers) def test_single_list(self): hlp = functools.partial(cat_string_helper, string="purple!") hlp_list = [hlp] adpt = adp.Adapter('mock_session', helpers=hlp_list) self.assertEqual(hlp_list, adpt.helpers) # Use this test to ensure the list returned is a copy self.assertNotEqual(id(hlp_list), id(adpt.helpers)) def test_multi_list(self): hlp1 = functools.partial(cat_string_helper, string="1") hlp2 = functools.partial(cat_string_helper, string="2") adpt = adp.Adapter('mock_session', helpers=[hlp1, hlp2]) self.assertEqual([hlp1, hlp2], adpt.helpers) @mock.patch('pypowervm.adapter.Session') def test_no_helpers(self, mock_sess): mock_sess.request.return_value = 'ReturnValue' adpt = adp.Adapter(mock_sess) self.assertEqual('ReturnValue', adpt._request('method', 'path')) @mock.patch('pypowervm.adapter.Session') def test_runs(self, mock_sess): hlp1 = functools.partial(cat_string_helper, string="1") hlp2 = functools.partial(cat_string_helper, string="2") hlp3 = functools.partial(cat_string_helper, string="3") mock_sess.request.return_value = 'countdown:' adpt = adp.Adapter( mock_sess, helpers=[hlp1, hlp2, hlp3]) self.assertEqual('countdown:321', adpt._request('method', 'path')) # Override adapter helpers self.assertEqual('countdown:2', adpt._request('method', 'path', helpers=hlp2)) # No adapter helpers, but request helper adpt = adp.Adapter(mock_sess) self.assertEqual('countdown:1', adpt._request('method', 'path', helpers=[hlp1])) @mock.patch('pypowervm.adapter.Session') def test_invalid_helper(self, mock_sess): hlp = "bad helper, shame on you" mock_sess.request.return_value = 'Should not get returned' adpt = adp.Adapter(mock_sess, helpers=hlp) with self.assertRaises(TypeError): adpt._request('method', 'path') adpt = adp.Adapter(mock_sess) with self.assertRaises(TypeError): adpt._request('method', 'path', helpers=[hlp]) pypowervm-1.1.24/pypowervm/tests/test_traits.py0000664000175000017500000001317613571367171021411 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import mock import requests.models as req_mod import requests.structures as req_struct import pypowervm.adapter as adp import pypowervm.tests.lib as testlib from pypowervm.tests.test_utils import pvmhttp from pypowervm import traits import pypowervm.wrappers.entry_wrapper as ewrap import pypowervm.wrappers.network as net import pypowervm.wrappers.storage as stor _logon_response_text = testlib.file2b("logon_file.xml") _feed_file = pvmhttp.load_pvm_resp( "fake_network_bridge.txt").response.body.encode('utf-8') _entry_file = pvmhttp.load_pvm_resp( "fake_volume_group.txt").response.body.encode('utf-8') class TestTraits(unittest.TestCase): @mock.patch('pypowervm.adapter.Session') def test_traits(self, mock_sess): # PVM MC, local auth mock_sess.mc_type = 'PVM' mock_sess.use_file_auth = True t = traits.APITraits(mock_sess) self.assertFalse(t.vnet_aware) self.assertFalse(t._is_hmc) self.assertTrue(t.local_api) self.assertFalse(t.has_lpar_profiles) self.assertTrue(t.dynamic_pvid) self.assertTrue(t.rmdev_job_available) self.assertTrue(t.has_high_slot) self.assertTrue(t.vea_as_ibmi_console) # PVM MC, remote auth mock_sess.mc_type = 'PVM' mock_sess.use_file_auth = False t = traits.APITraits(mock_sess) self.assertFalse(t.vnet_aware) self.assertFalse(t._is_hmc) self.assertFalse(t.local_api) self.assertFalse(t.has_lpar_profiles) self.assertTrue(t.dynamic_pvid) self.assertTrue(t.rmdev_job_available) self.assertTrue(t.has_high_slot) self.assertTrue(t.vea_as_ibmi_console) # HMC, remote auth mock_sess.mc_type = 'HMC' mock_sess.use_file_auth = False t = traits.APITraits(mock_sess) self.assertTrue(t.vnet_aware) self.assertTrue(t._is_hmc) self.assertFalse(t.local_api) self.assertTrue(t.has_lpar_profiles) self.assertFalse(t.dynamic_pvid) self.assertFalse(t.rmdev_job_available) self.assertFalse(t.has_high_slot) self.assertFalse(t.vea_as_ibmi_console) @mock.patch('requests.Session.request') def test_traits_into_wrappers(self, mock_request): # Note traits param is None, which reflects the real value of # self.traits during _logon's request. httpresp = req_mod.Response() httpresp._content = _logon_response_text httpresp.status_code = 200 httpresp.headers = req_struct.CaseInsensitiveDict( {'X-MC-Type': 'PVM', 'content-type': 'application/vnd.ibm.powervm.web+xml; type=LogonResponse'}) mock_request.return_value = httpresp sess = adp.Session() self.assertEqual('PVM', sess.mc_type) self.assertIsNotNone(sess.traits) self.assertTrue(sess.traits.local_api) self.assertFalse(sess.traits._is_hmc) adapter = adp.Adapter(sess) self.assertEqual(sess.traits, adapter.traits) # Response => Feed => Entrys => EntryWrappers => sub-ElementWrappers httpresp._content = _feed_file resp = adapter.read('NetworkBridge') self.assertEqual(sess.traits, resp.adapter.traits) nblist = net.NetBridge.wrap(resp) for nb in nblist: self.assertIsInstance(nb, net.NetBridge) self.assertEqual(sess.traits, nb.traits) seas = nblist[0].seas for sea in seas: self.assertIsInstance(sea, net.SEA) self.assertEqual(sess.traits, sea.traits) trunk = seas[0].primary_adpt self.assertIsInstance(trunk, net.TrunkAdapter) self.assertEqual(sess.traits, trunk.traits) # Response => Entry => EntryWrapper => sub-EntryWrappers # => sub-sub-ElementWrapper httpresp._content = _entry_file resp = adapter.read('VolumeGroup', root_id='abc123') self.assertEqual(sess.traits, resp.adapter.traits) vgent = stor.VG.wrap(resp) self.assertIsInstance(vgent, stor.VG) self.assertEqual(sess.traits, vgent.traits) pvs = vgent.phys_vols for pvent in pvs: self.assertIsInstance(pvent, stor.PV) self.assertEqual(sess.traits, pvent.traits) # Building raw wrappers from scratch class MyEntryWrapper(ewrap.EntryWrapper): schema_type = 'SomeObject' @classmethod def bld(cls, adpt): return super(MyEntryWrapper, cls)._bld(adpt) mew = MyEntryWrapper.bld(adapter) self.assertIsInstance(mew, MyEntryWrapper) self.assertEqual(sess.traits, mew.traits) class MyElementWrapper(ewrap.ElementWrapper): schema_type = 'SomeObject' @classmethod def bld(cls, adpt): return super(MyElementWrapper, cls)._bld(adpt) mew = MyElementWrapper.bld(adapter) self.assertIsInstance(mew, MyElementWrapper) self.assertEqual(sess.traits, mew.traits) if __name__ == '__main__': unittest.main() pypowervm-1.1.24/pypowervm/tests/helpers/0000775000175000017500000000000013571367172020125 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/helpers/__init__.py0000664000175000017500000000000013571367171022223 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/helpers/test_sample.py0000664000175000017500000000376113571367171023025 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import mock import testtools import pypowervm.adapter as adp import pypowervm.exceptions as pvmex import pypowervm.helpers.sample_helper as smpl_hlp import pypowervm.tests.test_fixtures as fx class TestSampleHelper(testtools.TestCase): def setUp(self): super(TestSampleHelper, self).setUp() self.sess = self.useFixture(fx.SessionFx()).sess @mock.patch('time.sleep') def test_sample_helper(self, mock_sleep): helpers = smpl_hlp.sample_retry_helper fake_resp1 = adp.Response( 'GET', '/some/path', 200, 'OK', ['headers'], body='Some Text HSCL3205 More Text') self.sess.request.side_effect = pvmex.Error('yo', response=fake_resp1) adpt = adp.Adapter(self.sess, helpers=helpers) self.assertRaises( pvmex.Error, adpt._request, 'method', 'path', body='the body') # Test that the request method was called twice and sleep was called self.assertEqual(self.sess.request.call_count, 2) mock_sleep.assert_called_once_with(5 * 1) hlp = functools.partial(smpl_hlp.sample_retry_helper, max_retries=5) self.sess.reset_mock() try: adpt._request('method', 'path', body='the body', helpers=hlp) except Exception: # Should have tried 6 times total self.assertEqual(self.sess.request.call_count, 6) pypowervm-1.1.24/pypowervm/tests/helpers/test_vios_busy.py0000664000175000017500000000663613571367171023572 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import unittest import mock import pypowervm.adapter as adp import pypowervm.exceptions as pvmex from pypowervm.helpers import vios_busy from pypowervm.tests.test_utils import pvmhttp HTTPRESP_FILE = "fake_httperror.txt" HTTPRESP_SA_FILE = "fake_httperror_service_unavail.txt" class TestVIOSBusyHelper(unittest.TestCase): def setUp(self): super(TestVIOSBusyHelper, self).setUp() self.http_error = pvmhttp.load_pvm_resp(HTTPRESP_FILE) self.http_error_sa = pvmhttp.load_pvm_resp(HTTPRESP_SA_FILE) @mock.patch('pypowervm.adapter.Session') @mock.patch('pypowervm.helpers.vios_busy.SLEEP') def test_vios_busy_helper(self, mock_sleep, mock_sess): # Try with 1 retries hlp = functools.partial(vios_busy.vios_busy_retry_helper, max_retries=1) error = pvmex.Error('yo', response=self.http_error.response) mock_sess.request.side_effect = error adpt = adp.Adapter(mock_sess, helpers=hlp) self.assertRaises( pvmex.Error, adpt._request, 'method', 'path', body='the body') # Test that the request method was called twice and sleep was called self.assertEqual(mock_sess.request.call_count, 2) mock_sleep.assert_called_once_with(5 * 1) # Test with more retries and sleep values retries = 10 hlp = functools.partial(vios_busy.vios_busy_retry_helper, max_retries=retries, delay=15) mock_sess.reset_mock() self.assertRaises(pvmex.Error, adpt._request, 'method', 'path', body='the body', helpers=hlp) # Should have tried 'retries' times plus the initial one self.assertEqual(mock_sess.request.call_count, retries+1) # Test with None response mock_sess.reset_mock() error = pvmex.Error('yo', response=None) mock_sess.request.side_effect = error hlp = functools.partial(vios_busy.vios_busy_retry_helper, max_retries=1, delay=15) self.assertRaises(pvmex.Error, adpt._request, 'method', 'path', body='the body', helpers=hlp) # There should be no retries since the response was None self.assertEqual(mock_sess.request.call_count, 1) # Test with a Service Unavailable exception mock_sess.reset_mock() hlp = functools.partial(vios_busy.vios_busy_retry_helper, max_retries=1) error = pvmex.Error('yo', response=self.http_error_sa.response) mock_sess.request.side_effect = error adpt = adp.Adapter(mock_sess, helpers=hlp) self.assertRaises( pvmex.Error, adpt._request, 'method', 'path', body='the body') self.assertEqual(mock_sess.request.call_count, 2) pypowervm-1.1.24/pypowervm/tests/helpers/test_loghelper.py0000664000175000017500000000764213571367171023527 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import logging as base_logging import mock import testtools import pypowervm.adapter as adp import pypowervm.const as c import pypowervm.exceptions as pvmex import pypowervm.helpers.log_helper as log_hlp import pypowervm.tests.test_fixtures as fx # Testing by hand it's useful to enable the next line instead of the following # logging.basicConfig(level=logging.INFO) base_logging.basicConfig() class TestLogHelper(testtools.TestCase): def setUp(self): super(TestLogHelper, self).setUp() self.sess = self.useFixture(fx.SessionFx()).sess @mock.patch('pypowervm.helpers.log_helper.LOG') def test_log_helper(self, mock_log): helpers = log_hlp.log_helper response = adp.Response('GET', '/some/path', 200, 'OK', ['headers']) self.sess.request.return_value = response adpt = adp.Adapter(self.sess, helpers=helpers) # Test that we get the response we expect passed back unharmed self.assertEqual(response, adpt._request('method', 'path', body='the body')) # Should be 1 req/resp in the log now, which would be 4 info messages mock_log.reset_mock() log_hlp._write_thread_log() self.assertEqual(mock_log.info.call_count, 4) # Should be empty now mock_log.reset_mock() log_hlp._write_thread_log() self.assertEqual(mock_log.info.call_count, 0) # Test that we limit the number of entries mock_log.reset_mock() for x in range(0, 30): adpt._request('method1', 'path', body='the body %d' % x) log_hlp._write_thread_log() # Each req/resp pair is 2 log entries but headers and body # are logged separately, so with maxlogs=3, it's 3 * 2 * 2. self.assertEqual(mock_log.info.call_count, (3 * 2 * 2)) mock_log.reset_mock() # Add a few records adpt._request('method1', 'path', body='the body') # Ensure a 412 (special case) doesn't dump, but does raise self.sess.request.side_effect = pvmex.HttpError(mock.Mock( status=c.HTTPStatus.ETAG_MISMATCH)) self.assertRaises( pvmex.HttpError, adpt._request, 'method2', 'path', body='the body') self.assertEqual(0, mock_log.info.call_count) # Ensure a non-412 exception dumps the logs and is then raised self.sess.request.side_effect = pvmex.HttpError(mock.Mock( status=c.HTTPStatus.INTERNAL_ERROR)) mock_log.reset_mock() self.assertRaises( pvmex.Error, adpt._request, 'method', 'path', body='the body') # Should be 10 entries. 4 * 2 req/resp, 2 for this req. self.assertEqual(mock_log.info.call_count, 10) # Ensure the log storage is initialized correctly, and we can change # the default value hlp_size = functools.partial(log_hlp.log_helper, max_logs=12) adpt1 = adp.Adapter(self.sess, helpers=hlp_size) self.sess.request.side_effect = None with mock.patch('pypowervm.helpers.log_helper.' '_init_thread_stg') as mock_init: adpt1._request('method1', 'path', body='the body') # Should be called with 24 since 12 * 2 entries. self.assertEqual(mock_init.call_args_list, [mock.call(max_entries=24)]) pypowervm-1.1.24/pypowervm/tests/tasks/0000775000175000017500000000000013571367172017610 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/tasks/test_scsi_mapper.py0000664000175000017500000007634713571367171023546 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from pypowervm import exceptions as exc from pypowervm.tasks import scsi_mapper from pypowervm.tests.tasks import util as tju from pypowervm.tests import test_fixtures as fx from pypowervm.wrappers import storage as pvm_stor from pypowervm.wrappers import virtual_io_server as pvm_vios VIO_MULTI_MAP_FILE = 'vio_multi_vscsi_mapping.txt' VIO_MULTI_MAP_FILE2 = 'fake_vios_mappings.txt' LPAR_UUID = '42AD4FD4-DC64-4935-9E29-9B7C6F35AFCC' class TestSCSIMapper(testtools.TestCase): def setUp(self): super(TestSCSIMapper, self).setUp() # Common Adapter self.adpt = self.useFixture(fx.AdapterFx()).adpt # Don't really sleep self.useFixture(fx.SleepFx()) # Fake URI mock_crt_href_p = mock.patch('pypowervm.wrappers.virtual_io_server.' 'VSCSIMapping.crt_related_href') self.mock_crt_href = mock_crt_href_p.start() self.addCleanup(mock_crt_href_p.stop) href = ('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/' 'c5d782c7-44e4-3086-ad15-b16fb039d63b/LogicalPartition/' + LPAR_UUID) self.mock_crt_href.return_value = href # Mock the delay function, by overriding the sleep mock_delay_p = mock.patch('time.sleep') self.mock_delay = mock_delay_p.start() self.addCleanup(mock_delay_p.stop) self.v1resp = tju.load_file(VIO_MULTI_MAP_FILE, self.adpt) self.v1wrap = pvm_vios.VIOS.wrap(self.v1resp) self.v2resp = tju.load_file(VIO_MULTI_MAP_FILE2, self.adpt) self.v2wrap = pvm_vios.VIOS.wrap(self.v2resp) def test_mapping(self): # Mock Data self.adpt.read.return_value = self.v1resp # Validate that the mapping was added to existing def validate_update(*kargs, **kwargs): vios_w = kargs[0] self.assertEqual(6, len(vios_w.scsi_mappings)) self.assertEqual(vios_w.scsi_mappings[0].client_adapter, vios_w.scsi_mappings[4].client_adapter) self.assertEqual(vios_w.scsi_mappings[0].server_adapter, vios_w.scsi_mappings[4].server_adapter) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Create the new storage dev pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Run the code scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) # And the VIOS was "looked up" self.assertEqual(1, self.adpt.read.call_count) # Now do it again, but passing the vios wrapper self.adpt.update_by_path.reset_mock() self.adpt.read.reset_mock() scsi_mapper.add_vscsi_mapping('host_uuid', self.v1wrap, LPAR_UUID, pv) # Since the mapping already existed, our update mock was not called self.assertEqual(0, self.adpt.update_by_path.call_count) # And the VIOS was not "looked up" self.assertEqual(0, self.adpt.read.call_count) def test_mapping_retry(self): """Tests that a mapping function will be retried.""" # Mock Data. Need to load this once per retry, or else the mappings # get appended with each other. self.adpt.read.side_effect = [ tju.load_file(VIO_MULTI_MAP_FILE, self.adpt), tju.load_file(VIO_MULTI_MAP_FILE, self.adpt), tju.load_file(VIO_MULTI_MAP_FILE, self.adpt)] global attempt_count attempt_count = 0 # Validate that the mapping was added to existing. First few times # through loop, force a retry exception def validate_update(*kargs, **kwargs): global attempt_count attempt_count += 1 if attempt_count == 3: vios_w = kargs[0] self.assertEqual(6, len(vios_w.scsi_mappings)) return vios_w.entry else: tju.raiseRetryException() self.adpt.update_by_path.side_effect = validate_update # Create the new storage dev pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Run the code scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv) # Make sure that our validation code above was invoked self.assertEqual(3, self.adpt.update_by_path.call_count) self.assertEqual(3, attempt_count) def test_mapping_new_mapping(self): """Fuse limit, slot number, LUA via add_vscsi_mapping.""" # Mock Data self.adpt.read.return_value = self.v1resp # Validate that the mapping was added to existing def validate_update(*args, **kwargs): vios_w = args[0] self.assertEqual(6, len(vios_w.scsi_mappings)) new_map = vios_w.scsi_mappings[5] # Make sure that the adapters do not match self.assertNotEqual(vios_w.scsi_mappings[0].client_adapter, new_map.client_adapter) self.assertNotEqual(vios_w.scsi_mappings[0].server_adapter, new_map.server_adapter) # Make sure we got the right slot number and LUA self.assertEqual(23, new_map.client_adapter.lpar_slot_num) self.assertEqual('the_lua', new_map.target_dev.lua) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Create the new storage dev pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Run the code # While we're here, make sure lpar_slot_num and lua go through. This # validates those kwargs in build_vscsi_mapping too. scsi_mapper.add_vscsi_mapping( 'host_uuid', 'vios_uuid', LPAR_UUID, pv, fuse_limit=5, lpar_slot_num=23, lua='the_lua') # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) def test_add_vscsi_mapping_root_uri(self): # Use root lpar URI href = ('https://9.1.2.3:12443/rest/api/uom/LogicalPartition/' + LPAR_UUID) self.mock_crt_href.return_value = href self.adpt.read.return_value = self.v2resp # Validate that mapping was modified def validate_update(*kargs, **kwargs): vios_w = kargs[0] # Assert that the new mapping is using the root URI self.assertEqual(href, vios_w.scsi_mappings[-1].client_lpar_href) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Add the vscsi mapping scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) def test_add_map(self): """Tests the add_map method.""" pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') scsi_map = scsi_mapper.build_vscsi_mapping('host_uuid', self.v1wrap, LPAR_UUID, pv, lpar_slot_num=23) # Get the original count orig_mappings = len(self.v1wrap.scsi_mappings) # Add the actual mapping resp1 = scsi_mapper.add_map(self.v1wrap, scsi_map) self.assertIsNotNone(resp1) self.assertIsInstance(resp1, pvm_vios.VSCSIMapping) # Assert that the desired client slot number was set self.assertEqual(resp1.client_adapter.lpar_slot_num, 23) # The mapping should return as None, as it is already there. resp2 = scsi_mapper.add_map(self.v1wrap, scsi_map) self.assertIsNone(resp2) # Make sure only one was added. self.assertEqual(orig_mappings + 1, len(self.v1wrap.scsi_mappings)) # Now make sure the mapping added can be found found = scsi_mapper.find_maps(self.v1wrap.scsi_mappings, LPAR_UUID, stg_elem=pv) self.assertEqual(1, len(found)) self.assertEqual(scsi_map, found[0]) def test_remap_storage_vopt(self): # Mock data self.adpt.read.return_value = self.v1resp # Validate that mapping was modified def validate_update(*kargs, **kwargs): vios_w = kargs[0] return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Run modify code using media name media_name = 'bldr1_dfe05349_kyleh_config.iso' vopt = pvm_stor.VOptMedia.bld(self.adpt, 'new_media.iso', size=1) vios, mod_map = scsi_mapper.modify_vopt_mapping( self.adpt, 'fake_vios_uuid', 2, new_media=vopt, media_name=media_name) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertIsNotNone(mod_map) self.assertIsInstance(mod_map.backing_storage, pvm_stor.VOptMedia) self.assertEqual(mod_map.backing_storage.name, vopt.name) # And the VIOS was "looked up" self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(self.v1resp.atom, vios.entry) # Ensure exceptions raised correctly vopt2 = pvm_stor.VOptMedia.bld(self.adpt, 'new_media2.iso', size=1) vopt3 = pvm_stor.VOptMedia.bld(self.adpt, 'new_media3.iso', size=1) scsi_mapper.add_vscsi_mapping( 'host_uuid', 'vios_uuid', LPAR_UUID, vopt3) self.adpt.update_by_path.reset_mock() self.adpt.read.reset_mock() # Zero matching maps found self.assertRaises( exc.SingleMappingNotFoundRemapError, scsi_mapper.modify_vopt_mapping, self.adpt, 'fake_vios_uuid', 2, new_media=vopt, media_name="no_matches.iso") self.assertEqual(0, self.adpt.update_py_path.call_count) # More than one matching maps found self.assertRaises( exc.SingleMappingNotFoundRemapError, scsi_mapper.modify_vopt_mapping, self.adpt, 'fake_vios_uuid', 2, new_media=vopt2) self.assertEqual(0, self.adpt.update_py_path.call_count) # New storage element already mapped self.assertRaises( exc.StorageMapExistsRemapError, scsi_mapper.modify_vopt_mapping, self.adpt, 'fake_vios_uuid', 2, new_media=vopt3, media_name=vopt.name) self.assertEqual(0, self.adpt.update_py_path.call_count) # Run modify code using VIOS wrapper and media udid media_udid = '0ebldr1_dfe05349_kyleh_config.iso' vios_wrap = pvm_vios.VIOS.wrap( tju.load_file(VIO_MULTI_MAP_FILE, self.adpt)) self.adpt.read.reset_mock() vios, mod_map = scsi_mapper.modify_vopt_mapping( self.adpt, vios_wrap, LPAR_UUID, new_media=vopt, udid=media_udid) self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertIsNotNone(mod_map) self.assertIsInstance(mod_map.backing_storage, pvm_stor.VOptMedia) self.assertEqual(mod_map.backing_storage.name, vopt.name) # But the VIOS was not "looked up" self.assertEqual(0, self.adpt.read.call_count) self.assertEqual(vios_wrap.entry, vios.entry) def test_remove_storage_vopt(self): # Mock Data self.adpt.read.return_value = self.v1resp # Validate that the mapping was removed from existing def validate_update(*kargs, **kwargs): vios_w = kargs[0] self.assertEqual(4, len(vios_w.scsi_mappings)) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Run the code media_name = 'bldr1_dfe05349_kyleh_config.iso' vios, remel = scsi_mapper.remove_vopt_mapping( self.adpt, 'fake_vios_uuid', 2, media_name=media_name) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, len(remel)) self.assertIsInstance(remel[0], pvm_stor.VOptMedia) # And the VIOS was "looked up" self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(self.v1resp.atom, vios.entry) # Now do it again, but passing the vios wrapper and the client UUID. # Match by UDID this time. media_udid = '0ebldr1_dfe05349_kyleh_config.iso' vios_wrap = pvm_vios.VIOS.wrap( tju.load_file(VIO_MULTI_MAP_FILE, self.adpt)) self.adpt.update_by_path.reset_mock() self.adpt.read.reset_mock() vios, remel = scsi_mapper.remove_vopt_mapping( self.adpt, vios_wrap, LPAR_UUID, udid=media_udid) self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, len(remel)) self.assertIsInstance(remel[0], pvm_stor.VOptMedia) # But the VIOS was not "looked up" self.assertEqual(0, self.adpt.read.call_count) self.assertEqual(vios_wrap.entry, vios.entry) def test_remove_storage_vopt_no_name_specified(self): # Mock Data self.adpt.read.return_value = self.v1resp # Validate that the mapping was removed from existing def validate_update(*kargs, **kwargs): vios_w = kargs[0] self.assertEqual(4, len(vios_w.scsi_mappings)) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Run the code vios, remel = scsi_mapper.remove_vopt_mapping( self.adpt, 'fake_vios_uuid', 2, media_name=None) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, len(remel)) self.assertIsInstance(remel[0], pvm_stor.VOptMedia) self.assertEqual(self.v1resp.atom, vios.entry) def test_remove_storage_vopt_retry(self): """Tests removing the storage vOpt with multiple retries.""" # Mock Data. The retry will call this three times. They have to # be indepdent loads, otherwise the data gets re-used and the remove # will not be properly invoked. self.adpt.read.side_effect = [ tju.load_file(VIO_MULTI_MAP_FILE, self.adpt), tju.load_file(VIO_MULTI_MAP_FILE, self.adpt), tju.load_file(VIO_MULTI_MAP_FILE, self.adpt)] global attempt_count attempt_count = 0 # Validate that the mapping was removed from existing. First few # loops, force a retry def validate_update(*kargs, **kwargs): global attempt_count attempt_count += 1 if attempt_count == 3: vios_w = kargs[0] self.assertEqual(4, len(vios_w.scsi_mappings)) return vios_w.entry else: tju.raiseRetryException() self.adpt.update_by_path.side_effect = validate_update # Run the code media_name = 'bldr1_dfe05349_kyleh_config.iso' remel = scsi_mapper.remove_vopt_mapping( self.adpt, 'fake_vios_uuid', 2, media_name=media_name)[1] # Make sure that our validation code above was invoked self.assertEqual(3, self.adpt.update_by_path.call_count) self.assertEqual(3, attempt_count) self.assertEqual(1, len(remel)) self.assertIsInstance(remel[0], pvm_stor.VOptMedia) def _test_remove_storage_vdisk(self, *args, **kwargs): """Helper to test remove_storage_vdisk with various arguments.""" # Mock Data self.adpt.read.return_value = self.v1resp # Validate that the mapping was removed from existing def validate_update(*kargs, **kwa): vios_w = kargs[0] self.assertEqual(4, len(vios_w.scsi_mappings)) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Run the code vios, remel = scsi_mapper.remove_vdisk_mapping(*args, **kwargs) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, len(remel)) self.assertIsInstance(remel[0], pvm_stor.VDisk) self.assertEqual(self.v1resp.atom, vios.entry) def test_remove_storage_vdisk_name(self): self._test_remove_storage_vdisk( self.adpt, 'fake_vios_uuid', 2, disk_names=['Ubuntu1410']) def test_remove_storage_vdisk_udid(self): self._test_remove_storage_vdisk( self.adpt, 'fake_vios_uuid', 2, udids=['0300025d4a00007a000000014b36d9deaf.1']) def _test_remove_storage_lu(self, *args, **kwargs): # Mock Data self.adpt.read.return_value = self.v1resp # Validate that the mapping was removed from existing def validate_update(*kargs, **kwa): vios_w = kargs[0] self.assertEqual(4, len(vios_w.scsi_mappings)) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Run the code vios, remel = scsi_mapper.remove_lu_mapping(*args, **kwargs) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, len(remel)) self.assertIsInstance(remel[0], pvm_stor.LU) self.assertEqual(self.v1resp.atom, vios.entry) def test_remove_storage_lu_all(self): self._test_remove_storage_lu(self.adpt, 'fake_vios_uuid', 2) def test_remove_storage_lu_udid(self): self._test_remove_storage_lu( self.adpt, 'fake_vios_uuid', 2, udids=['270c88f8e2d36711e490ce40f2e95daf30a6d61c0dee5ec6f6a011b300' 'b9d0830d']) def _test_remove_pv_mapping(self, *args, **kwargs): # Mock Data self.adpt.read.return_value = self.v1resp # Validate that the mapping was removed to existing def validate_update(*kargs, **kwa): vios_w = kargs[0] self.assertEqual(4, len(vios_w.scsi_mappings)) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Run the code vios, remel = scsi_mapper.remove_pv_mapping(*args, **kwargs) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, len(remel)) self.assertIsInstance(remel[0], pvm_stor.PV) self.assertEqual(self.v1resp.atom, vios.entry) def test_remove_pv_mapping_name(self): self._test_remove_pv_mapping(self.adpt, 'fake_vios_uuid', 2, 'hdisk10') def test_remove_pv_mapping_udid(self): self._test_remove_pv_mapping( self.adpt, 'fake_vios_uuid', 2, None, udid='01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDA2MA' '==') def test_detach_storage(self): """Detach storage from some mappings.""" # In v1wrap, all five maps are associated with LPAR 2 num_matches = 5 self.assertEqual(num_matches, len(self.v1wrap.scsi_mappings)) # Beforehand, four of them have storage and one does not. self.assertEqual(4, len( [sm.backing_storage for sm in self.v1wrap.scsi_mappings if sm.backing_storage is not None])) removals = scsi_mapper.detach_storage(self.v1wrap, 2) # The number of mappings is the same afterwards. self.assertEqual(num_matches, len(self.v1wrap.scsi_mappings)) # But now the mappings have no storage for smap in self.v1wrap.scsi_mappings: self.assertIsNone(smap.backing_storage) # The return list contains all the mappings self.assertEqual(num_matches, len(removals)) # The return list members contain the storage (the four that had it # beforehand). self.assertEqual(4, len([sm.backing_storage for sm in removals if sm .backing_storage is not None])) # In v2wrap, there are four VOptMedia mappings num_matches = 4 match_class = pvm_stor.VOptMedia # Number of mappings should be the same before and after. len_before = len(self.v2wrap.scsi_mappings) self.assertEqual( num_matches, len([1 for sm in self.v2wrap.scsi_mappings if isinstance(sm.backing_storage, match_class)])) removals = scsi_mapper.detach_storage( self.v2wrap, None, match_func=scsi_mapper.gen_match_func( match_class)) self.assertEqual(num_matches, len(removals)) # The number of mappings is the same as beforehand. self.assertEqual(len_before, len(self.v2wrap.scsi_mappings)) # Now there should be no mappings with VOptMedia self.assertEqual( 0, len([1 for sm in self.v2wrap.scsi_mappings if isinstance(sm.backing_storage, match_class)])) # The removals contain the storage self.assertEqual( num_matches, len([1 for sm in removals if isinstance(sm.backing_storage, match_class)])) def test_find_maps(self): """find_maps() tests not covered elsewhere.""" maps = self.v1wrap.scsi_mappings # Specifying both match_func and stg_elem raises ValueError self.assertRaises(ValueError, scsi_mapper.find_maps, maps, 1, match_func=isinstance, stg_elem='foo') # Omitting match_func and stg_elem matches all entries with specified # LPAR ID. # For LPAR ID 2, that should be all of 'em. matches = scsi_mapper.find_maps(maps, 2) self.assertEqual(len(maps), len(matches)) for exp, act in zip(maps, matches): self.assertEqual(exp, act) # For the right LPAR UUID, that should be all of 'em. matches = scsi_mapper.find_maps(maps, LPAR_UUID) self.assertEqual(len(maps), len(matches)) for exp, act in zip(maps, matches): self.assertEqual(exp, act) # For the wrong LPAR ID, it should be none of 'em. matches = scsi_mapper.find_maps(maps, 1) self.assertEqual(0, len(matches)) # For the wrong LPAR UUID, it should be none of 'em. matches = scsi_mapper.find_maps(maps, LPAR_UUID[:35] + '0') self.assertEqual(0, len(matches)) # Specific storage element generates match func for that element. matches = scsi_mapper.find_maps(maps, 2, stg_elem=maps[2].backing_storage) self.assertEqual(1, len(matches)) self.assertEqual(maps[2], matches[0]) # Test find maps when client lpar id is not specified and backing # storage is given matches = scsi_mapper.find_maps(maps, None, stg_elem=maps[2].backing_storage) self.assertEqual(1, len(matches)) self.assertEqual(maps[2], matches[0]) # All the mappings in VIO_MULTI_MAP_FILE are "complete". Now play with # some that aren't. maps = self.v2wrap.scsi_mappings # Map 0 has only a server adapter. We should find it if we specify the # LPAR ID... matches = scsi_mapper.find_maps(maps, 27, include_orphans=True) self.assertEqual(maps[0], matches[0]) # ...but only if allowing orphans matches = scsi_mapper.find_maps(maps, 27, include_orphans=False) self.assertEqual(0, len(matches)) # Matching by LPAR UUID. Maps 12, 25, and 26 have this UUID... uuid = '0C0A6EBE-7BF4-4707-8780-A140F349E42E' matches = scsi_mapper.find_maps(maps, uuid, include_orphans=True) self.assertEqual(3, len(matches)) self.assertEqual(maps[12], matches[0]) self.assertEqual(maps[25], matches[1]) self.assertEqual(maps[26], matches[2]) # ...but 25 is an orphan (no client adapter). uuid = '0C0A6EBE-7BF4-4707-8780-A140F349E42E' matches = scsi_mapper.find_maps(maps, uuid) self.assertEqual(2, len(matches)) self.assertEqual(maps[12], matches[0]) self.assertEqual(maps[26], matches[1]) def test_separate_mappings(self): # Test with child URI client_href = ('https://9.1.2.3:12443/rest/api/uom/ManagedSystem/' '726e9cb3-6576-3df5-ab60-40893d51d074/LogicalPartition/' '0C0A6EBE-7BF4-4707-8780-A140F349E42E') sep = scsi_mapper._separate_mappings(self.v2wrap, client_href) self.assertEqual(2, len(sep)) self.assertEqual( {'1eU8246.L2C.0604C7A-V1-C13', '1eU8246.L2C.0604C7A-V1-C25'}, set(sep.keys())) self.assertEqual(sep['1eU8246.L2C.0604C7A-V1-C13'][0], self.v2wrap.scsi_mappings[-2]) # Test with root URI client_href = ('https://9.1.2.3:12443/rest/api/uom/LogicalPartition/' '0C0A6EBE-7BF4-4707-8780-A140F349E42E') sep = scsi_mapper._separate_mappings(self.v2wrap, client_href) self.assertEqual(2, len(sep)) self.assertEqual( {'1eU8246.L2C.0604C7A-V1-C13', '1eU8246.L2C.0604C7A-V1-C25'}, set(sep.keys())) self.assertEqual(sep['1eU8246.L2C.0604C7A-V1-C13'][0], self.v2wrap.scsi_mappings[-2]) def test_index_mappings(self): idx = scsi_mapper.index_mappings(self.v2wrap.scsi_mappings) self.assertEqual({ 'by-lpar-id', 'by-lpar-uuid', 'by-storage-udid'}, set(idx.keys())) exp_lpar_ids = ('2', '5', '6', '7', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '27', '28', '29', '33', '35', '36', '39', '40', str(pvm_stor.ANY_SLOT)) self.assertEqual(set(exp_lpar_ids), set(idx['by-lpar-id'].keys())) # Each mapping has a different LPAR ID, so each LPAR ID only has one # mapping for lpar_id in exp_lpar_ids: maplist = idx['by-lpar-id'][lpar_id] self.assertEqual(1, len(maplist)) self.assertIsInstance(maplist[0], pvm_vios.VSCSIMapping) self.assertEqual(lpar_id, str(maplist[0].server_adapter.lpar_id)) # Not all mappings have client_lpar_href, so this list is shorter. exp_lpar_uuids = ('0C0A6EBE-7BF4-4707-8780-A140F349E42E', '0FB69DD7-4B93-4C09-8916-8BC9821ABAAC', '263EE77B-AD6E-4920-981A-4B7D245B8571', '292ACAF5-C96B-447A-8C7E-7503D80AA33E', '32AA6AA5-CCE6-4523-860C-0852455036BE', '3CE30EC6-C98A-4A58-A764-09DAC7C324BC', '615C9134-243D-4A11-93EB-C0556664B761', '7CFDD55B-E0D7-4B8C-8254-9305E31BB1DC') self.assertEqual(set(exp_lpar_uuids), set(idx['by-lpar-uuid'].keys())) # Of ten mappings with client_lpar_href, three have the same UUID. for lpar_uuid in exp_lpar_uuids: maplist = idx['by-lpar-uuid'][lpar_uuid] for smap in maplist: self.assertIsInstance(smap, pvm_vios.VSCSIMapping) self.assertTrue(smap.client_lpar_href.endswith(lpar_uuid)) if lpar_uuid == '0C0A6EBE-7BF4-4707-8780-A140F349E42E': self.assertEqual(3, len(maplist)) else: self.assertEqual(1, len(maplist)) # Only six mappings have storage, and all are different self.assertEqual(6, len(idx['by-storage-udid'].keys())) for sudid in idx['by-storage-udid']: self.assertEqual(1, len(idx['by-storage-udid'][sudid])) def test_gen_match_func(self): """Tests for gen_match_func.""" # Class must match mfunc = scsi_mapper.gen_match_func(str) self.assertFalse(mfunc(1)) self.assertTrue(mfunc('foo')) # Match names elem = mock.Mock() elem.name = 'foo' # 'False' names/prefixes ignored mfunc = scsi_mapper.gen_match_func(mock.Mock, names=[]) self.assertTrue(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, prefixes=[]) self.assertTrue(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, names=[], prefixes=[]) self.assertTrue(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, names=['bar', 'baz']) self.assertFalse(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, names=['bar', 'foobar', 'baz']) self.assertFalse(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, names=['bar', 'foo', 'baz']) self.assertTrue(mfunc(elem)) # Prefixes are ignored if names specified mfunc = scsi_mapper.gen_match_func(mock.Mock, prefixes='x', names=['bar', 'foo', 'baz']) self.assertTrue(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, names=['bar', 'baz'], prefixes=['f']) self.assertFalse(mfunc(elem)) # Prefixes mfunc = scsi_mapper.gen_match_func(mock.Mock, prefixes=['f']) self.assertTrue(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, prefixes=['foo']) self.assertTrue(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, prefixes=['foo', 'x']) self.assertTrue(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, prefixes=['x']) self.assertFalse(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, prefixes=['xfoo', 'foox', 'xfoox']) self.assertFalse(mfunc(elem)) # Alternate key for the name property elem = mock.Mock(alt_name='foo') mfunc = scsi_mapper.gen_match_func(mock.Mock, name_prop='alt_name', names=[]) self.assertTrue(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, names=['bar', 'baz']) self.assertFalse(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, name_prop='alt_name', names=['bar', 'baz']) self.assertFalse(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, names=['bar', 'foo', 'baz']) self.assertFalse(mfunc(elem)) mfunc = scsi_mapper.gen_match_func(mock.Mock, name_prop='alt_name', names=['bar', 'foo', 'baz']) self.assertTrue(mfunc(elem)) pypowervm-1.1.24/pypowervm/tests/tasks/test_vterm.py0000664000175000017500000005103713571367171022363 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six import testtools import pypowervm.entities as ent import pypowervm.exceptions as pexc from pypowervm.tasks import vterm import pypowervm.tests.test_fixtures as fx class TestVterm(testtools.TestCase): """Unit Tests for LPAR vterm.""" def setUp(self): super(TestVterm, self).setUp() self.adpt = self.useFixture( fx.AdapterFx(traits=fx.LocalPVMTraits)).adpt @mock.patch('pypowervm.wrappers.job.Job.run_job') def test_close_vterm_non_local(self, mock_run_job): """Performs a close LPAR vterm test.""" mock_resp = mock.MagicMock() mock_resp.entry = ent.Entry( {}, ent.Element('Dummy', self.adpt), self.adpt) self.adpt.read.return_value = mock_resp vterm._close_vterm_non_local(self.adpt, '12345') self.assertEqual(1, mock_run_job.call_count) self.assertEqual(1, self.adpt.read.call_count) # test exception path mock_run_job.side_effect = pexc.LPARNotFound( lpar_name='12345') self.assertRaises(pexc.LPARNotFound, vterm._close_vterm_non_local, self.adpt, '12345') mock_run_job.reset_mock() @mock.patch('pypowervm.tasks.vterm._get_lpar_id') @mock.patch('pypowervm.tasks.vterm._run_proc') def test_open_vnc_vterm(self, mock_run_proc, mock_get_lpar_id): """Validates the output from the mkvterm if a vterm is not active.""" mock_get_lpar_id.return_value = '4' std_out = '5903' std_err = ('VNC is started on port 5903 for localhost access ' 'only. Use \'rmvterm --id 4\' to close it.') mock_run_proc.return_value = (0, std_out, std_err) resp = vterm.open_localhost_vnc_vterm(self.adpt, 'lpar_uuid') mock_run_proc.assert_called_once_with(['mkvterm', '--id', '4', '--vnc', '--local']) self.assertEqual(5903, resp) @mock.patch('pypowervm.tasks.vterm._get_lpar_id') @mock.patch('pypowervm.tasks.vterm._run_proc') def test_open_vnc_vterm_existing(self, mock_run_proc, mock_get_lpar_id): """Validates the output from the mkvterm if a VNC vterm is active.""" mock_get_lpar_id.return_value = '4' std_out = '5903' std_err = ('\nVNC server is already started on port 5903. Use ' '\'rmvterm --id 4\' to close it.') mock_run_proc.return_value = (3, std_out, std_err) resp = vterm.open_localhost_vnc_vterm(self.adpt, 'lpar_uuid') mock_run_proc.assert_called_once_with(['mkvterm', '--id', '4', '--vnc', '--local']) self.assertEqual(5903, resp) @mock.patch('subprocess.Popen') @mock.patch('pypowervm.tasks.vterm._get_lpar_id') def test_open_vnc_vterm_nonascii(self, mock_get_lpar_id, mock_popen): """Validates errors in non-ascii encodings are handled properly""" proc_mock = mock.Mock(returncode=3) mock_get_lpar_id.return_value = '4' mock_popen.return_value = proc_mock proc_mock.communicate.return_value = ('', '\xd0\x92') # Make sure we don't get some sort of encoding error on a failure case self.assertRaises(pexc.VNCBasedTerminalFailedToOpen, vterm.open_localhost_vnc_vterm, self.adpt, '1', True) @mock.patch('pypowervm.tasks.vterm.close_vterm') @mock.patch('pypowervm.tasks.vterm._get_lpar_id') @mock.patch('pypowervm.tasks.vterm._run_proc') def test_open_vnc_vterm_nonvnc_force(self, mock_run_proc, mock_get_lpar_id, mock_close): """Validates the output from mkvterm if non-vnc active and force.""" mock_get_lpar_id.return_value = '4' std_out_1 = "" std_err_1 = ("The vterm is currently in use by process 120352. " "Use 'rmvterm --id 4' to close it.") std_out_2 = '5903' std_err_2 = ('VNC is started on port 5903 for localhost access ' 'only. Use \'rmvterm --id 4\' to close it.') mock_run_proc.side_effect = [(3, std_out_1, std_err_1), (0, std_out_2, std_err_2)] resp = vterm.open_localhost_vnc_vterm(self.adpt, 'lpar_uuid', force=True) # Validation mock_close.assert_called_once_with(self.adpt, 'lpar_uuid') mock_run_proc.assert_any_call(['mkvterm', '--id', '4', '--vnc', '--local']) self.assertEqual(2, mock_run_proc.call_count) self.assertEqual(5903, resp) @mock.patch('pypowervm.tasks.vterm.close_vterm') @mock.patch('pypowervm.tasks.vterm._get_lpar_id') @mock.patch('pypowervm.tasks.vterm._run_proc') def test_open_vnc_vterm_nonvnc_noforce(self, mock_run_proc, mock_get_lpar_id, mock_close): """Validates the output from mkvterm if non-vnc active and no force.""" mock_get_lpar_id.return_value = '4' std_out = "" std_err = ("The vterm is currently in use by process 120352. " "Use 'rmvterm --id 4' to close it.") mock_run_proc.return_value = (3, std_out, std_err) self.assertRaises(pexc.VNCBasedTerminalFailedToOpen, vterm.open_localhost_vnc_vterm, self.adpt, 'lpar_uuid') # Validation mock_close.assert_not_called() mock_run_proc.assert_called_with(['mkvterm', '--id', '4', '--vnc', '--local']) self.assertEqual(1, mock_run_proc.call_count) @mock.patch('pypowervm.tasks.vterm.close_vterm') @mock.patch('pypowervm.tasks.vterm._get_lpar_id') @mock.patch('pypowervm.tasks.vterm._run_proc') def test_open_vnc_vterm_force_bad_error(self, mock_run_proc, mock_get_lpar_id, mock_close): """Validates the output from mkvterm if force but unexpected error.""" mock_get_lpar_id.return_value = '4' std_out = "" std_err = ("The mkvterm command failed for an unexpected reason") mock_run_proc.return_value = (2, std_out, std_err) self.assertRaises(pexc.VNCBasedTerminalFailedToOpen, vterm.open_localhost_vnc_vterm, self.adpt, 'lpar_uuid', force=True) # Validation mock_close.assert_not_called() mock_run_proc.assert_called_with(['mkvterm', '--id', '4', '--vnc', '--local']) self.assertEqual(1, mock_run_proc.call_count) @mock.patch('pypowervm.tasks.vterm._get_lpar_id') @mock.patch('pypowervm.tasks.vterm._run_proc') def test_close_vterm_local(self, mock_run_proc, mock_get_lpar_id): mock_get_lpar_id.return_value = '2' vterm._close_vterm_local(self.adpt, '5') mock_run_proc.assert_called_once_with(['rmvterm', '--id', '2']) class TestVNCSocketListener(testtools.TestCase): """Unit Tests for _VNCSocketListener vterm.""" def setUp(self): super(TestVNCSocketListener, self).setUp() self.adpt = self.useFixture( fx.AdapterFx(traits=fx.LocalPVMTraits)).adpt self.srv = vterm._VNCSocketListener( self.adpt, '5901', '1.2.3.4', True, remote_ips=['1.2.3.5']) self.srv_no_verify = vterm._VNCSocketListener( self.adpt, '5800', '1.2.3.4', False, remote_ips=['1.2.3.5']) self.srv_6 = vterm._VNCSocketListener( self.adpt, '5901', 'fe80:1234', True, remote_ips=['fe80:7890']) self.rptr = vterm._VNCRepeaterServer(self.adpt, 'uuid', '5800') vterm._VNC_LOCAL_PORT_TO_REPEATER['5800'] = self.rptr vterm._VNC_PATH_TO_UUID['path'] = 'uuid' vterm._VNC_PATH_TO_UUID['test'] = 'uuid' vterm._VNC_UUID_TO_LOCAL_PORT['uuid'] = '5800' def tearDown(self): """Tear down the Session instance.""" vterm._VNC_PATH_TO_UUID['path'] = None vterm._VNC_PATH_TO_UUID['test'] = None vterm._VNC_UUID_TO_LOCAL_PORT['1.2.3.4'] = None vterm._VNC_LOCAL_PORT_TO_REPEATER['5800'] = None self.rptr = None super(TestVNCSocketListener, self).tearDown() def test_stop(self): self.assertTrue(self.srv.alive) self.srv.stop() self.assertFalse(self.srv.alive) @mock.patch('socket.socket') def test_new_client_no_verify(self, mock_sock): mock_srv = mock.MagicMock() mock_s_sock, mock_c_sock = mock.MagicMock(), mock.MagicMock() mock_sock.return_value = mock_s_sock mock_srv.accept.return_value = mock_c_sock, ('1.2.3.5', '40675') mock_c_sock.recv.return_value = "CONNECT path HTTP/1.8\r\n\r\n" self.srv_no_verify._new_client(mock_srv) mock_s_sock.connect.assert_called_once_with(('127.0.0.1', '5800')) self.assertEqual({mock_c_sock: mock_s_sock, mock_s_sock: mock_c_sock}, self.rptr.peers) @mock.patch('select.select') @mock.patch('socket.socket') def test_new_client(self, mock_sock, mock_select): mock_srv = mock.MagicMock() mock_s_sock, mock_c_sock = mock.MagicMock(), mock.MagicMock() mock_sock.return_value = mock_s_sock mock_select.return_value = [mock_c_sock], None, None mock_srv.accept.return_value = mock_c_sock, ('1.2.3.5', '40675') mock_c_sock.recv.return_value = "CONNECT path HTTP/1.8\r\n\r\n" self.srv._new_client(mock_srv) mock_c_sock.sendall.assert_called_once_with( "HTTP/1.8 200 OK\r\n\r\n") mock_s_sock.connect.assert_called_once_with(('127.0.0.1', '5800')) self.assertEqual({mock_c_sock: mock_s_sock, mock_s_sock: mock_c_sock}, self.rptr.peers) @mock.patch('select.select') @mock.patch('socket.socket') def test_new_client_6(self, mock_sock, mock_select): mock_srv = mock.MagicMock() mock_s_sock, mock_c_sock = mock.MagicMock(), mock.MagicMock() mock_sock.return_value = mock_s_sock mock_select.return_value = [mock_c_sock], None, None mock_srv.accept.return_value = mock_c_sock, ('fe80:7890', '40675') mock_c_sock.recv.return_value = "CONNECT path HTTP/1.8\r\n\r\n" self.srv_6._new_client(mock_srv) mock_c_sock.sendall.assert_called_once_with( "HTTP/1.8 200 OK\r\n\r\n") mock_s_sock.connect.assert_called_once_with(('127.0.0.1', '5800')) self.assertEqual({mock_c_sock: mock_s_sock, mock_s_sock: mock_c_sock}, self.rptr.peers) def test_check_http_connect(self): sock = mock.MagicMock() sock.recv.return_value = "INVALID" uuid, http_code = self.srv._check_http_connect(sock) self.assertIsNone(uuid) self.assertEqual('1.1', http_code) # Test a good string sock.reset_mock() sock.recv.return_value = 'CONNECT test HTTP/2.0\r\n\r\n' uuid, http_code = self.srv._check_http_connect(sock) self.assertEqual('uuid', uuid) self.assertEqual('2.0', http_code) def test_new_client_bad_ip(self): """Tests that a new client will be rejected if a bad IP.""" mock_srv = mock.MagicMock() mock_c_sock = mock.MagicMock() mock_srv.accept.return_value = mock_c_sock, ('1.2.3.8', '40675') self.srv._new_client(mock_srv) self.assertEqual(self.rptr.peers, {}) self.assertEqual(1, mock_c_sock.close.call_count) def test_new_client_bad_ip6(self): mock_srv = mock.MagicMock() mock_c_sock = mock.MagicMock() mock_srv.accept.return_value = mock_c_sock, ('fe80:5678', '40675') self.srv_6._new_client(mock_srv) self.assertEqual(self.rptr.peers, {}) self.assertEqual(1, mock_c_sock.close.call_count) @mock.patch('select.select') def test_new_client_validation_checks(self, mock_select): mock_srv = mock.MagicMock() mock_c_sock = mock.MagicMock() mock_select.return_value = None, None, None mock_srv.accept.return_value = mock_c_sock, ('1.2.3.5', '40675') # This mock has no 'socket ready'. self.srv._new_client(mock_srv) self.assertEqual(self.rptr.peers, {}) mock_c_sock.sendall.assert_called_with( "HTTP/1.1 400 Bad Request\r\n\r\n") self.assertEqual(1, mock_c_sock.close.call_count) # Reset the select so that the validation check fails mock_c_sock.reset_mock() mock_select.return_value = [mock_c_sock], None, None mock_c_sock.recv.return_value = 'bad_check' self.srv._new_client(mock_srv) self.assertEqual(self.rptr.peers, {}) mock_c_sock.sendall.assert_called_with( "HTTP/1.1 400 Bad Request\r\n\r\n") self.assertEqual(1, mock_c_sock.close.call_count) @mock.patch('pypowervm.tasks.vterm._close_vterm_local') def test_close_client(self, mock_close): client, server = mock.Mock(), mock.Mock() self.rptr.add_socket_connection_pair(client, server) with mock.patch('time.sleep'): self.rptr._close_client(client) self.assertTrue(client.close.called) self.assertTrue(server.close.called) self.assertEqual({}, self.rptr.peers) # Get the killer off the thread and wait for it to complete. self.rptr.vnc_killer.join() mock_close.assert_called_once_with(self.adpt, 'uuid') @mock.patch('pypowervm.tasks.vterm._VNCSocketListener._new_client') @mock.patch('select.select') @mock.patch('socket.socket') def test_run_new_client(self, mock_socket, mock_select, mock_new_client): mock_server = mock.MagicMock() mock_socket.return_value = mock_server mock_server.count = 0 # Used to make sure we don't loop indefinitely. bad_select = mock.MagicMock() # Since we are only listening on the server socket, we will have it # return that same socket the first 2 times, and then also return a # bad socket just to make sure we appropriately broke out before then mock_select.side_effect = [([mock_server], [], []), ([mock_server], [], []), ([bad_select], [], [])] def new_client(server): # Keep track of a counter so we make sure we break out of the # loop after the 2nd call to make sure it works twice at least if server.count == 1: self.srv.alive = False # We are setting alive to false after the 2nd # so we want to make sure it doesn't get here if server == bad_select: raise Exception('Invalid Loop Call') server.count += 1 mock_new_client.side_effect = new_client # If this runs...we've pretty much validated. Because it will end. # If it doesn't end...the fail in the 'select' side effect shoudl catch # it. self.srv.run() # Make sure the close was called on all of the sockets. self.assertTrue(mock_server.close.called) # Make sure the select was called with a timeout. mock_select.assert_called_with(mock.ANY, mock.ANY, mock.ANY, 1) @mock.patch('select.select') @mock.patch('ssl.wrap_socket', mock.Mock()) def test_enable_x509_authentication(self, mock_select): mock_select.return_value = [mock.Mock()], None, None csock, ssock = _FakeSocket(), _FakeSocket() ssock.recv_buffer = b'RFB 003.008\n\x01\x01' csock.recv_buffer = b'RFB 003.007\n\x13\x00\x02\x00\x00\x01\x04' # Test the method to do the handshake to enable VeNCrypt Authentication self.srv.set_x509_certificates('cacert1', 'cert1', 'key1') nsock = self.srv._enable_x509_authentication(csock, ssock) # Make sure that we didn't get an error and the TLS Socket was created self.assertIsNotNone(nsock, 'The TLS Socket was not created') # Verify that the data sent to the Client Socket matches expected csocksnd = b'RFB 003.008\n\x01\x13\x00\x02\x00\x01\x00\x00\x01\x04\x01' self.assertEqual(csock.send_buffer, csocksnd, 'Did not send to the client socket what was expected') # Verify that the data sent to the Server Socket matches expected self.assertEqual(ssock.send_buffer, b'RFB 003.007\n\x01', 'Did not send to the server socket what was expected') @mock.patch('select.select') def test_enable_x509_authentication_bad_auth_type(self, mock_select): mock_select.return_value = [mock.Mock()], None, None csock, ssock = _FakeSocket(), _FakeSocket() ssock.recv_buffer = b'RFB 003.008\n\x01\x01' csock.recv_buffer = b'RFB 003.007\n\x14\x00\x02\x00\x00\x01\x04' # Test the method to do the handshake to enable VeNCrypt Authentication self.srv.set_x509_certificates('cacert1', 'cert1', 'key1') nsock = self.srv._enable_x509_authentication(csock, ssock) # Make sure that we got an error and it didn't create the TLS Socket self.assertIsNone(nsock, 'Expected an error validating auth type') # Verify that the data sent to the Client Socket matches expected csocksnd = b'RFB 003.008\n\x01\x13\x00\x02\x01' self.assertEqual(csock.send_buffer, csocksnd, 'Did not send to the client socket what was expected') @mock.patch('select.select') def test_enable_x509_authentication_bad_auth_version(self, mock_select): mock_select.return_value = [mock.Mock()], None, None csock, ssock = _FakeSocket(), _FakeSocket() ssock.recv_buffer = b'RFB 003.008\n\x01\x01' csock.recv_buffer = b'RFB 003.007\n\x13\x00\x01\x00\x00\x01\x04' # Test the method to do the handshake to enable VeNCrypt Authentication self.srv.set_x509_certificates('cacert1', 'cert1', 'key1') nsock = self.srv._enable_x509_authentication(csock, ssock) # Make sure that we got an error and it didn't create the TLS Socket self.assertIsNone(nsock, 'Expected an error validating auth version') # Verify that the data sent to the Client Socket matches expected csocksnd = b'RFB 003.008\n\x01\x13\x00\x02\x01' self.assertEqual(csock.send_buffer, csocksnd, 'Did not send to the client socket what was expected') @mock.patch('select.select') def test_enable_x509_authentication_bad_auth_subtype(self, mock_select): mock_select.return_value = [mock.Mock()], None, None csock, ssock = _FakeSocket(), _FakeSocket() ssock.recv_buffer = b'RFB 003.008\n\x01\x01' csock.recv_buffer = b'RFB 003.007\n\x13\x00\x02\x00\x00\x01\x03' # Test the method to do the handshake to enable VeNCrypt Authentication self.srv.set_x509_certificates('cacert1', 'cert1', 'key1') nsock = self.srv._enable_x509_authentication(csock, ssock) # Make sure that we got an error and it didn't create the TLS Socket self.assertIsNone(nsock, 'Expected an error validating auth sub-type') # Verify that the data sent to the Client Socket matches expected csocksnd = b'RFB 003.008\n\x01\x13\x00\x02\x00\x01\x00\x00\x01\x04\x00' self.assertEqual(csock.send_buffer, csocksnd, 'Did not send to the client socket what was expected') class _FakeSocket(object): def __init__(self): self.recv_buffer, self.send_buffer = b'', b'' self.recv_bytes, self.send_bytes = 0, 0 def recv(self, bufsize): bufsize = bufsize if isinstance(bufsize, int) else ord(bufsize) chunk = self.recv_buffer[self.recv_bytes:self.recv_bytes+bufsize] if not isinstance(chunk, six.binary_type): chunk = six.binary_type(chunk, 'utf-8') self.recv_bytes += bufsize return chunk def sendall(self, string): if not isinstance(string, six.binary_type): string = six.binary_type(string, 'utf-8') self.send_buffer += string self.send_bytes += len(string) pypowervm-1.1.24/pypowervm/tests/tasks/util.py0000664000175000017500000000443513571367171021144 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pypowervm import adapter as adpt from pypowervm import const as c from pypowervm import exceptions as pvm_exc from pypowervm.tests.test_utils import pvmhttp import pypowervm.wrappers.job as job def load_file(file_name, adapter=None): """Helper method to load the responses from a given location.""" return pvmhttp.load_pvm_resp(file_name, adapter).get_response() def raiseRetryException(): """Used for other tests wishing to raise an exception to a force retry.""" resp = adpt.Response('reqmethod', 'reqpath', c.HTTPStatus.ETAG_MISMATCH, 'reason', 'headers') http_exc = pvm_exc.HttpError(resp) raise http_exc def get_parm_checker(test_obj, exp_uuid, exp_job_parms, exp_job_mappings=[], exp_timeout=None): # Utility method to return a dynamic parameter checker for tests # Build the expected job parameter strings exp_job_parms_str = [job.Job.create_job_parameter(k, v).toxmlstring() for k, v in exp_job_parms] exp_job_parms_str += [ job.Job.create_job_parameter(k, ",".join(v)).toxmlstring() for k, v in exp_job_mappings] def parm_checker(uuid, job_parms=None, timeout=None): # Check simple parms test_obj.assertEqual(exp_uuid, uuid) test_obj.assertEqual(exp_timeout, timeout) # Check the expected and actual number of job parms are equal test_obj.assertEqual(len(exp_job_parms_str), len(job_parms)) # Ensure each parameter is in the list of expected. for parm in job_parms: test_obj.assertIn(parm.toxmlstring(), exp_job_parms_str) # We return our custom checker return parm_checker pypowervm-1.1.24/pypowervm/tests/tasks/test_slot_map.py0000664000175000017500000013601613571367171023045 0ustar neoneo00000000000000# Copyright 2016, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test pypowervm.tasks.slot_map.""" import mock import six import testtools from pypowervm import exceptions as pv_e from pypowervm.tasks import slot_map from pypowervm.tests.test_utils import pvmhttp from pypowervm.utils import lpar_builder as lb from pypowervm.wrappers import iocard as ioc from pypowervm.wrappers import network as net from pypowervm.wrappers import storage as stor from pypowervm.wrappers import virtual_io_server as vios def loadf(wcls, fname): return wcls.wrap(pvmhttp.load_pvm_resp(fname).get_response()) # Load data files just once, since the wrappers will be read-only vio1 = loadf(vios.VIOS, 'fake_vios_ssp_npiv.txt') vio2 = loadf(vios.VIOS, 'fake_vios_mappings.txt') cnafeed1 = loadf(net.CNA, 'cna_feed1.txt') vswitchfeed = loadf(net.VSwitch, 'vswitch_feed.txt') vnicfeed = loadf(ioc.VNIC, 'vnic_feed.txt') class SlotMapTestImplLegacy(slot_map.SlotMapStore): """Legacy subclass overriding load/save/delete directly.""" def __init__(self, inst_key, load=True, load_ret=None): self._load_ret = load_ret super(SlotMapTestImplLegacy, self).__init__(inst_key, load=load) def load(self): return self._load_ret def save(self): pass def delete(self): pass class SlotMapTestImpl(slot_map.SlotMapStore): """New-style subclass overriding _load/_save/_delete.""" def __init__(self, inst_key, load=True, load_ret=None): self._load_ret = load_ret super(SlotMapTestImpl, self).__init__(inst_key, load=load) def _load(self, key): return self._load_ret def _save(self, key, blob): pass def _delete(self, key): pass class TestSlotMapStoreLegacy(testtools.TestCase): """Test slot_map.SlotMapStore with a legacy impl.""" def __init__(self, *args, **kwargs): """Initialize with a legacy SlotMapStore implementation.""" super(TestSlotMapStoreLegacy, self).__init__(*args, **kwargs) self.smt_impl = SlotMapTestImplLegacy def test_ioclass_consts(self): """Make sure the IOCLASS constants are disparate.""" constl = [key for key in dir(slot_map.IOCLASS) if not key.startswith('_')] self.assertEqual(len(constl), len(set(constl))) def test_init_calls_load(self): """Ensure SlotMapStore.__init__ calls load or not based on the parm.""" with mock.patch.object(self.smt_impl, 'load') as mock_load: mock_load.return_value = None loads = self.smt_impl('foo') mock_load.assert_called_once_with() self.assertEqual('foo', loads.inst_key) mock_load.reset_mock() doesnt_load = self.smt_impl('bar', load=False) self.assertEqual('bar', doesnt_load.inst_key) mock_load.assert_not_called() @mock.patch('pickle.loads') def test_init_deserialize(self, mock_unpickle): """Ensure __init__ deserializes or not based on what's loaded.""" # By default, load returns None, so nothing to unpickle doesnt_unpickle = self.smt_impl('foo') mock_unpickle.assert_not_called() self.assertEqual({}, doesnt_unpickle.topology) unpickles = self.smt_impl('foo', load_ret='abc123') mock_unpickle.assert_called_once_with('abc123') self.assertEqual(mock_unpickle.return_value, unpickles.topology) @mock.patch('pickle.dumps') @mock.patch('pypowervm.tasks.slot_map.SlotMapStore.topology', new_callable=mock.PropertyMock) def test_serialized(self, mock_topo, mock_pickle): """Validate the serialized property.""" mock_pickle.return_value = 'abc123' smt = self.smt_impl('foo') self.assertEqual('abc123', smt.serialized) mock_pickle.assert_called_once_with(mock_topo.return_value, protocol=2) mock_topo.assert_called_once() @mock.patch('pypowervm.wrappers.managed_system.System.get') @mock.patch('pypowervm.wrappers.network.VSwitch.get') def test_vswitch_id2name(self, mock_vsw_get, mock_sys_get): """Ensure _vswitch_id2name caches, and gets the right content.""" mock_vsw_get.return_value = vswitchfeed mock_sys_get.return_value = ['sys'] smt = self.smt_impl('foo') # We didn't cache yet mock_vsw_get.assert_not_called() mock_sys_get.assert_not_called() map1 = smt._vswitch_id2name('adap') # Now we grabbed the REST data mock_vsw_get.assert_called_once_with('adap', parent='sys') mock_sys_get.assert_called_once_with('adap') mock_vsw_get.reset_mock() mock_sys_get.reset_mock() map2 = smt._vswitch_id2name('adap2') # The same data is returned each time self.assertEqual(map2, map1) # The second call didn't re-fetch from REST mock_vsw_get.assert_not_called() mock_sys_get.assert_not_called() # Make sure the data is in the right shape self.assertEqual({0: 'ETHERNET0', 1: 'MGMTSWITCH'}, map1) @mock.patch('pypowervm.wrappers.managed_system.System.get') @mock.patch('pypowervm.wrappers.network.VSwitch.get') @mock.patch('warnings.warn') def test_register_cna(self, mock_warn, mock_vsw_get, mock_sys_get): """Test deprecated register_cna.""" mock_vsw_get.return_value = vswitchfeed mock_sys_get.return_value = ['sys'] smt = self.smt_impl('foo') for cna in cnafeed1: smt.register_cna(cna) self.assertEqual({3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}}, 4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}}, smt.topology) # The vswitch_map is cached in the slot_map, so these only get # called once self.assertEqual(mock_vsw_get.call_count, 1) self.assertEqual(mock_sys_get.call_count, 1) self.assertEqual(mock_warn.call_count, 3) @mock.patch('warnings.warn') def test_drop_cna(self, mock_warn): """Test deprecated drop_cna.""" smt = self.smt_impl('foo') smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}}, 4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}} # Drop the first CNA and verify it was removed smt.drop_cna(cnafeed1[0]) self.assertEqual({4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}}, smt.topology) # Drop all remaining CNAs, including a redundant drop on index 0 for cna in cnafeed1: smt.drop_cna(cna) self.assertEqual({}, smt.topology) self.assertEqual(mock_warn.call_count, 4) @mock.patch('pypowervm.wrappers.managed_system.System.get') @mock.patch('pypowervm.wrappers.network.VSwitch.get') def test_register_vnet(self, mock_vsw_get, mock_sys_get): """Test register_vnet.""" mock_vsw_get.return_value = vswitchfeed mock_sys_get.return_value = ['sys'] smt = self.smt_impl('foo') for vnic in vnicfeed: smt.register_vnet(vnic) for cna in cnafeed1: smt.register_vnet(cna) self.assertEqual({3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}}, 4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}, 7: {'VNIC': {'AE7A25E59A07': None}}, 8: {'VNIC': {'AE7A25E59A08': None}}}, smt.topology) # The vswitch_map is cached in the slot_map, so these only get # called once self.assertEqual(mock_vsw_get.call_count, 1) self.assertEqual(mock_sys_get.call_count, 1) def test_register_vnet_exception(self): """Test register_vnet raises exception without CNA or VNIC.""" smt = self.smt_impl('foo') self.assertRaises(pv_e.InvalidVirtualNetworkDeviceType, smt.register_vnet, None) def test_drop_vnet(self): """Test drop_vnet.""" smt = self.smt_impl('foo') smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}}, 4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}, 7: {'VNIC': {'AE7A25E59A07': None}}, 8: {'VNIC': {'AE7A25E59A08': None}}} # Drop the first CNA and VNIC and verify it was removed smt.drop_vnet(cnafeed1[0]) smt.drop_vnet(vnicfeed[0]) self.assertEqual({4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}, 8: {'VNIC': {'AE7A25E59A08': None}}}, smt.topology) # Drop all remaining VNICs for vnic in vnicfeed: smt.drop_vnet(vnic) self.assertEqual({4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}}, smt.topology) # Drop all remaining CNAs for cna in cnafeed1: smt.drop_vnet(cna) self.assertEqual({}, smt.topology) def test_drop_vnet_exception(self): """Test drop_vnet raises exception without CNA or VNIC.""" smt = self.smt_impl('foo') self.assertRaises(pv_e.InvalidVirtualNetworkDeviceType, smt.drop_vnet, None) def test_register_vfc_mapping(self): """Test register_vfc_mapping.""" smt = self.smt_impl('foo') i = 1 for vio in (vio1, vio2): for vfcmap in vio.vfc_mappings: smt.register_vfc_mapping(vfcmap, 'fab%d' % i) i += 1 self.assertEqual({3: {'VFC': {'fab1': None, 'fab10': None, 'fab11': None, 'fab12': None, 'fab13': None, 'fab14': None, 'fab15': None, 'fab16': None, 'fab17': None, 'fab18': None, 'fab19': None, 'fab20': None, 'fab21': None, 'fab22': None, 'fab23': None, 'fab24': None, 'fab25': None, 'fab26': None, 'fab28': None, 'fab29': None, 'fab3': None, 'fab30': None, 'fab31': None, 'fab32': None, 'fab33': None, 'fab4': None, 'fab5': None, 'fab6': None, 'fab7': None, 'fab8': None, 'fab9': None}}, 6: {'VFC': {'fab2': None}}, 8: {'VFC': {'fab27': None}}}, smt.topology) def test_drop_vfc_mapping(self): """Test drop_vfc_mapping.""" # Init data to test with mock_server_adapter = mock.Mock(lpar_slot_num=3) vfcmap = mock.Mock(server_adapter=mock_server_adapter) smt = self.smt_impl('foo') smt._slot_topo = {3: {'VFC': {'fab1': None, 'fab10': None, 'fab7': None, 'fab8': None, 'fab9': None}}, 6: {'VFC': {'fab2': None}}, 8: {'VFC': {'fab27': None}}} # Drop a single slot entry and verify it is removed smt.drop_vfc_mapping(vfcmap, 'fab1') self.assertEqual({3: {'VFC': {'fab10': None, 'fab7': None, 'fab8': None, 'fab9': None}}, 6: {'VFC': {'fab2': None}}, 8: {'VFC': {'fab27': None}}}, smt.topology) # Drop remaining LPAR 3 slot entries and verify they are removed for i in range(7, 11): smt.drop_vfc_mapping(vfcmap, 'fab%s' % str(i)) self.assertEqual({6: {'VFC': {'fab2': None}}, 8: {'VFC': {'fab27': None}}}, smt.topology) def test_register_vscsi_mappings(self): """Test register_vscsi_mappings.""" smt = self.smt_impl('foo') for vio in (vio1, vio2): for vscsimap in vio.scsi_mappings: smt.register_vscsi_mapping(vscsimap) self.assertEqual( {2: {'LU': {'274d7bb790666211e3bc1a00006cae8b013842794fa0b8e9dd771' 'd6a32accde003': '0x8500000000000000', '274d7bb790666211e3bc1a00006cae8b0148326cf1e5542c583ec' '14327771522b0': '0x8300000000000000', '274d7bb790666211e3bc1a00006cae8b01ac18997ab9bc23fb247' '56e9713a93f90': '0x8400000000000000', '274d7bb790666211e3bc1a00006cae8b01c96f590914bccbc8b7b' '88c37165c0485': '0x8200000000000000'}, 'PV': {'01M0lCTTIxNDUzMTI2MDA1MDc2ODAyODIwQTlEQTgwMDAwMDAwMDA' 'wNTJBOQ==': '0x8600000000000000'}, 'VDisk': {'0300004c7a00007a00000001466c54110f.16': '0x8100000000000000'}, 'VOptMedia': { '0evopt_19bbb46ad15747d79fe08f8464466144': 'vopt_19bbb46ad15747d79fe08f8464466144', '0evopt_2c7aa01349714368a3d040bb0d613a67': 'vopt_2c7aa01349714368a3d040bb0d613a67', '0evopt_2e51e8b4b9f04b159700e654b2436a01': 'vopt_2e51e8b4b9f04b159700e654b2436a01', '0evopt_84d7bfcf44964f398e60254776b94d41': 'vopt_84d7bfcf44964f398e60254776b94d41', '0evopt_de86c46e07004993b412c948bd5047c2': 'vopt_de86c46e07004993b412c948bd5047c2'}}, 3: {'VDisk': {'0300025d4a00007a000000014b36d9deaf.1': '0x8700000000000000'}}, 65535: {'PV': {'01M0lCTUZsYXNoU3lzdGVtLTk4NDA2MDA1MDc2ODA5OEIxMEI' '4MDgwMDAwMDAwNTAwMDAzMA==': '0x81000000000' '00000'}}}, smt.topology) def test_drop_vscsi_mappings(self): """Test drop_vscsi_mappings.""" # Init objects to test with bstor = mock.Mock(stor.LU, udid='274d7bb790666211e3bc1a00006cae8b01c96f59091' '4bccbc8b7b88c37165c0485') mock_server_adapter = mock.Mock(lpar_slot_num=2) vscsimap = mock.Mock(backing_storage=bstor, server_adapter=mock_server_adapter) smt = self.smt_impl('foo') smt._slot_topo = { 2: {'LU': {'274d7bb790666211e3bc1a00006cae8b013842794fa0b8e9dd771' 'd6a32accde003': None, '274d7bb790666211e3bc1a00006cae8b0148326cf1e5542c583ec' '14327771522b0': None, '274d7bb790666211e3bc1a00006cae8b01ac18997ab9bc23fb247' '56e9713a93f90': None, '274d7bb790666211e3bc1a00006cae8b01c96f590914bccbc8b7b' '88c37165c0485': None}, 'PV': {'01M0lCTTIxNDUzMTI2MDA1MDc2ODAyODIwQTlEQTgwMDAwMDAwMDA' 'wNTJBOQ==': None}, 'VDisk': {'0300004c7a00007a00000001466c54110f.16': '0x8100000000000000'}, 'VOptMedia': { '0evopt_19bbb46ad15747d79fe08f8464466144': 'vopt_19bbb46ad15747d79fe08f8464466144', '0evopt_2c7aa01349714368a3d040bb0d613a67': 'vopt_2c7aa01349714368a3d040bb0d613a67', '0evopt_2e51e8b4b9f04b159700e654b2436a01': 'vopt_2e51e8b4b9f04b159700e654b2436a01', '0evopt_84d7bfcf44964f398e60254776b94d41': 'vopt_84d7bfcf44964f398e60254776b94d41', '0evopt_de86c46e07004993b412c948bd5047c2': 'vopt_de86c46e07004993b412c948bd5047c2'}}, 3: {'VDisk': {'0300025d4a00007a000000014b36d9deaf.1': '0x8700000000000000'}} } # Remove a single LU entry and verify it was removed smt.drop_vscsi_mapping(vscsimap) self.assertEqual( {2: {'LU': {'274d7bb790666211e3bc1a00006cae8b013842794fa0b8e9dd771' 'd6a32accde003': None, '274d7bb790666211e3bc1a00006cae8b0148326cf1e5542c583ec' '14327771522b0': None, '274d7bb790666211e3bc1a00006cae8b01ac18997ab9bc23fb247' '56e9713a93f90': None}, 'PV': {'01M0lCTTIxNDUzMTI2MDA1MDc2ODAyODIwQTlEQTgwMDAwMDAwMDA' 'wNTJBOQ==': None}, 'VDisk': {'0300004c7a00007a00000001466c54110f.16': '0x8100000000000000'}, 'VOptMedia': { '0evopt_19bbb46ad15747d79fe08f8464466144': 'vopt_19bbb46ad15747d79fe08f8464466144', '0evopt_2c7aa01349714368a3d040bb0d613a67': 'vopt_2c7aa01349714368a3d040bb0d613a67', '0evopt_2e51e8b4b9f04b159700e654b2436a01': 'vopt_2e51e8b4b9f04b159700e654b2436a01', '0evopt_84d7bfcf44964f398e60254776b94d41': 'vopt_84d7bfcf44964f398e60254776b94d41', '0evopt_de86c46e07004993b412c948bd5047c2': 'vopt_de86c46e07004993b412c948bd5047c2'}}, 3: {'VDisk': {'0300025d4a00007a000000014b36d9deaf.1': '0x8700000000000000'}}}, smt.topology) # Remove all other LPAR 2 LU entries and verify they are removed udids = ['274d7bb790666211e3bc1a00006cae8b013842794fa0b8e9dd771' 'd6a32accde003', '274d7bb790666211e3bc1a00006cae8b0148326cf1e5542c583ec' '14327771522b0', '274d7bb790666211e3bc1a00006cae8b01ac18997ab9bc23fb247' '56e9713a93f90'] for udid in udids: bstor.udid = udid smt.drop_vscsi_mapping(vscsimap) self.assertEqual( {2: {'PV': {'01M0lCTTIxNDUzMTI2MDA1MDc2ODAyODIwQTlEQTgwMDAwMDAwMDA' 'wNTJBOQ==': None}, 'VDisk': {'0300004c7a00007a00000001466c54110f.16': '0x8100000000000000'}, 'VOptMedia': { '0evopt_19bbb46ad15747d79fe08f8464466144': 'vopt_19bbb46ad15747d79fe08f8464466144', '0evopt_2c7aa01349714368a3d040bb0d613a67': 'vopt_2c7aa01349714368a3d040bb0d613a67', '0evopt_2e51e8b4b9f04b159700e654b2436a01': 'vopt_2e51e8b4b9f04b159700e654b2436a01', '0evopt_84d7bfcf44964f398e60254776b94d41': 'vopt_84d7bfcf44964f398e60254776b94d41', '0evopt_de86c46e07004993b412c948bd5047c2': 'vopt_de86c46e07004993b412c948bd5047c2'}}, 3: {'VDisk': {'0300025d4a00007a000000014b36d9deaf.1': '0x8700000000000000'}}}, smt.topology) @mock.patch('pypowervm.wrappers.managed_system.System.get') @mock.patch('pypowervm.wrappers.network.VSwitch.get') def test_serialize_unserialize(self, mock_vsw_get, mock_sys_get): """Ensure that saving/loading doesn't corrupt the data.""" mock_vsw_get.return_value = vswitchfeed mock_sys_get.return_value = ['sys'] # Set up a nice, big, complicated source slot map smt1 = self.smt_impl('foo') for cna in cnafeed1: smt1.register_vnet(cna) for vnic in vnicfeed: smt1.register_vnet(vnic) i = 1 for vio in (vio1, vio2): for vscsimap in vio.scsi_mappings: smt1.register_vscsi_mapping(vscsimap) for vfcmap in vio.vfc_mappings: smt1.register_vfc_mapping(vfcmap, 'fab%d' % i) i += 1 # Serialize, and make a new slot map that loads that serialized data smt2 = self.smt_impl('bar', load_ret=smt1.serialized) # Ensure their topologies are identical self.assertEqual(smt1.topology, smt2.topology) def test_max_vslots(self): """Test setting/getting the max_vslots.""" smt = self.smt_impl('foo') # Starts off unset self.assertIsNone(smt.max_vslots) # Can assign initially smt.register_max_vslots(123) self.assertEqual(123, smt.max_vslots) # Can overwrite smt.register_max_vslots(234) self.assertEqual(234, smt.max_vslots) # Can throw other stuff in there i = 1 for vio in (vio1, vio2): for vfcmap in vio.vfc_mappings: smt.register_vfc_mapping(vfcmap, 'fab%d' % i) i += 1 # max_vslots still set self.assertEqual(234, smt.max_vslots) # Topology not polluted by max_vslots self.assertEqual({3: {'VFC': {'fab1': None, 'fab10': None, 'fab11': None, 'fab12': None, 'fab13': None, 'fab14': None, 'fab15': None, 'fab16': None, 'fab17': None, 'fab18': None, 'fab19': None, 'fab20': None, 'fab21': None, 'fab22': None, 'fab23': None, 'fab24': None, 'fab25': None, 'fab26': None, 'fab28': None, 'fab29': None, 'fab3': None, 'fab30': None, 'fab31': None, 'fab32': None, 'fab33': None, 'fab4': None, 'fab5': None, 'fab6': None, 'fab7': None, 'fab8': None, 'fab9': None}}, 6: {'VFC': {'fab2': None}}, 8: {'VFC': {'fab27': None}}}, smt.topology) class TestSlotMapStore(TestSlotMapStoreLegacy): """Test slot_map.SlotMapStore with a new-style impl.""" def __init__(self, *args, **kwargs): """Initialize with a new-style SlotMapStore implementation.""" super(TestSlotMapStore, self).__init__(*args, **kwargs) self.smt_impl = SlotMapTestImpl self.load_meth_nm = '_load' def test_init_calls_load(self): """Ensure SlotMapStore.__init__ calls load or not based on the parm. This overrides the legacy test of the same name to ensure that _load gets invoked properly. """ with mock.patch.object(self.smt_impl, '_load') as mock_load: mock_load.return_value = None loads = self.smt_impl('foo') mock_load.assert_called_once_with('foo') self.assertEqual('foo', loads.inst_key) mock_load.reset_mock() doesnt_load = self.smt_impl('bar', load=False) self.assertEqual('bar', doesnt_load.inst_key) mock_load.assert_not_called() @mock.patch('pypowervm.tasks.slot_map.SlotMapStore.serialized', new_callable=mock.PropertyMock) def test_save_when_needed(self, mock_ser): """Overridden _save call invoked only when needed.""" with mock.patch.object(self.smt_impl, '_save') as mock_save: smt = self.smt_impl('foo') smt.save() # Nothing changed yet mock_save.assert_not_called() smt.register_vfc_mapping(vio1.vfc_mappings[0], 'fabric') # Not yet... mock_save.assert_not_called() smt.save() # Now it's been called. mock_save.assert_called_once_with('foo', mock_ser.return_value) mock_save.reset_mock() # Saving again has no effect smt.save() mock_save.assert_not_called() # Verify it works on drop too smt.drop_vfc_mapping(vio1.vfc_mappings[0], 'fabric') mock_save.assert_not_called() smt.save() # Now it's been called. mock_save.assert_called_once_with('foo', mock_ser.return_value) mock_save.reset_mock() # Saving again has no effect smt.save() mock_save.assert_not_called() def test_delete(self): """Overridden _delete is called properly when delete is invoked.""" with mock.patch.object(self.smt_impl, '_delete') as mock_delete: smt = self.smt_impl('foo') smt.delete() mock_delete.assert_called_once_with('foo') class TestRebuildSlotMapLegacy(testtools.TestCase): """Test for RebuildSlotMap class with legacy SlotMapStore subclass. Tests BuildSlotMap class's get methods as well. """ def __init__(self, *args, **kwargs): """Initialize with a particular SlotMapStore implementation.""" super(TestRebuildSlotMapLegacy, self).__init__(*args, **kwargs) self.smt_impl = SlotMapTestImplLegacy def setUp(self): super(TestRebuildSlotMapLegacy, self).setUp() self.vio1 = mock.Mock(uuid='vios1') self.vio2 = mock.Mock(uuid='vios2') def test_get_mgmt_vea_slot(self): smt = self.smt_impl('foo') # Make sure it returns the next slot available smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}}, 4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'VFC': {'fab1': None}}} rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, ['fab1']) self.assertEqual((None, 7), rsm.get_mgmt_vea_slot()) # Second call should return the same slot, as there is only one mgmt # vif per VM self.assertEqual((None, 7), rsm.get_mgmt_vea_slot()) # Make sure it returns the existing MGMT switch smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}}, 4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}} rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, []) self.assertEqual(('3AEAC528A7E3', 6), rsm.get_mgmt_vea_slot()) # Make sure it returns None if there is no real data smt._slot_topo = {} rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, []) self.assertEqual((None, None), rsm.get_mgmt_vea_slot()) def test_vea_build_out(self): """Test _vea_build_out.""" # Create a slot topology that will be converted to a rebuild map smt = self.smt_impl('foo') smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}}, 4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}} # Run the actual test rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {}) # Verify rebuild map was created successfully self.assertEqual( {'CNA': {'2A2E57A4DE9C': 4, '5E372CFD9E6D': 3}, 'MGMTCNA': {'mac': '3AEAC528A7E3', 'slot': 6}}, rsm._build_map) # Verify the VEA slot can be read by MAC address self.assertEqual(3, rsm.get_vea_slot('5E372CFD9E6D')) self.assertEqual(4, rsm.get_vea_slot('2A2E57A4DE9C')) self.assertEqual(None, rsm.get_vea_slot('3AEAC528A7E3')) self.assertEqual(('3AEAC528A7E3', 6), rsm.get_mgmt_vea_slot()) def test_vnic_build_out(self): """Test _vnic_build_out.""" smt = self.smt_impl('foo') smt._slot_topo = {5: {'VNIC': {'72AB8C392CD6': None}}, 6: {'VNIC': {'111111111111': None}}, 7: {'VNIC': {'45F16A97BC7E': None}}} rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {}) self.assertEqual( {'VNIC': {'72AB8C392CD6': 5, '111111111111': 6, '45F16A97BC7E': 7}}, rsm._build_map) self.assertEqual(5, rsm.get_vnet_slot('72AB8C392CD6')) self.assertEqual(6, rsm.get_vnet_slot('111111111111')) self.assertEqual(7, rsm.get_vnet_slot('45F16A97BC7E')) def test_max_vslots(self): """Ensure max_vslots returns the set value, or 10 + highest slot.""" # With max_vslots unset and nothing in the topology... smt = self.smt_impl('foo') rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {}) # ...max_vslots defaults to 64 self.assertEqual(lb.DEF_MAX_SLOT, rsm.get_max_vslots()) # When unset, and the highest registered slot is small... smt._slot_topo = {3: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}}, 4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}} rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {}) # ...max_vslots still defaults to 64 self.assertEqual(lb.DEF_MAX_SLOT, rsm.get_max_vslots()) # When unset, and the highest registered slot is big... smt._slot_topo = {62: {'CNA': {'5E372CFD9E6D': 'ETHERNET0'}}, 4: {'CNA': {'2A2E57A4DE9C': 'ETHERNET0'}}, 6: {'CNA': {'3AEAC528A7E3': 'MGMTSWITCH'}}} rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {}) # ...max_vslots derives to 10 + highest self.assertEqual(72, rsm.get_max_vslots()) # With max_vslots set, even if it's lower than 64... smt.register_max_vslots(23) rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], None, {}) # ...max_vslots returns the exact value self.assertEqual(23, rsm.get_max_vslots()) def test_rebuild_fails_w_vopt(self): """Test RebuildSlotMap fails when a Vopt exists in topology.""" smt = self.smt_impl('foo') smt._slot_topo = SCSI_W_VOPT self.assertRaises( pv_e.InvalidHostForRebuildInvalidIOType, slot_map.RebuildSlotMap, smt, [self.vio1, self.vio2], VOL_TO_VIO1, {}) def test_rebuild_w_vdisk(self): """Test RebuildSlotMap deterministic.""" smt = self.smt_impl('foo') smt._slot_topo = SCSI_W_VDISK rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], VOL_TO_VIO1, {}) # Deterministic. vios1 gets slot 1 for udid in rsm._build_map['VDisk']['vios1']: slot, lua = rsm.get_vscsi_slot(self.vio1, udid) self.assertEqual(1, slot) # Make sure we got the right LUA for this UDID self.assertEqual(SCSI_W_VDISK[slot][slot_map.IOCLASS.VDISK][udid], lua) # Deterministic. vios2 gets slot 2 for udid in rsm._build_map['VDisk']['vios2']: slot, lua = rsm.get_vscsi_slot(self.vio2, udid) self.assertEqual(2, slot) # Make sure we got the right LUA for this UDID self.assertEqual(SCSI_W_VDISK[slot][slot_map.IOCLASS.VDISK][udid], lua) # The build map won't actually have these as keys but # the get should return None nicely. slot, lua = rsm.get_vscsi_slot(self.vio1, 'vd_udid3') self.assertIsNone(slot) def test_lu_vscsi_build_out_1(self): """Test RebuildSlotMap deterministic.""" smt = self.smt_impl('foo') smt._slot_topo = SCSI_LU_1 rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], VOL_TO_VIO1, {}) # Deterministic. vios1 gets slot 1 for udid in rsm._build_map['LU']['vios1']: slot, lua = rsm.get_vscsi_slot(self.vio1, udid) self.assertEqual(1, slot) # Make sure we got the right LUA for this UDID self.assertEqual(SCSI_LU_1[slot][slot_map.IOCLASS.LU][udid], lua) # Deterministic. vios2 gets slot 2 for udid in rsm._build_map['LU']['vios2']: slot, lua = rsm.get_vscsi_slot(self.vio2, udid) self.assertEqual(2, slot) # Make sure we got the right LUA for this UDID self.assertEqual(SCSI_LU_1[slot][slot_map.IOCLASS.LU][udid], lua) # The build map won't actually have these as keys but # the get should return None nicely. slot, lua = rsm.get_vscsi_slot(self.vio1, 'lu_udid4') self.assertIsNone(slot) slot, lua = rsm.get_vscsi_slot(self.vio2, 'lu_udid2') self.assertIsNone(slot) def test_pv_vscsi_build_out_1(self): """Test RebuildSlotMap deterministic.""" smt = self.smt_impl('foo') smt._slot_topo = SCSI_PV_1 rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], VOL_TO_VIO1, {}) # Deterministic. vios1 gets slot 1 for udid in rsm._build_map['PV']['vios1']: self.assertEqual( 1, rsm.get_pv_vscsi_slot(self.vio1, udid)) slot, lua = rsm.get_vscsi_slot(self.vio1, udid) self.assertEqual(1, slot) # Make sure we got the right LUA for this UDID self.assertEqual(SCSI_PV_1[slot][slot_map.IOCLASS.PV][udid], lua) # Deterministic. vios2 gets slot 2 for udid in rsm._build_map['PV']['vios2']: self.assertEqual( 2, rsm.get_pv_vscsi_slot(self.vio2, udid)) slot, lua = rsm.get_vscsi_slot(self.vio2, udid) self.assertEqual(2, slot) # Make sure we got the right LUA for this UDID self.assertEqual(SCSI_PV_1[slot][slot_map.IOCLASS.PV][udid], lua) # The build map won't actually have these as keys but # the get should return None nicely. self.assertIsNone( rsm.get_pv_vscsi_slot(self.vio1, 'pv_udid4')) self.assertIsNone( rsm.get_pv_vscsi_slot(self.vio2, 'pv_udid2')) def test_mix_vscsi_build_out_1(self): """Test RebuildSlotMap deterministic.""" smt = self.smt_impl('foo') smt._slot_topo = SCSI_MIX_1 rsm = slot_map.RebuildSlotMap(smt, [self.vio1, self.vio2], VOL_TO_VIO1, {}) # Deterministic. vios1 gets slot 1 for udid in rsm._build_map['PV']['vios1']: slot, lua = rsm.get_vscsi_slot(self.vio1, udid) self.assertEqual(1, slot) # Make sure we got the right LUA for this UDID self.assertEqual(SCSI_MIX_1[slot][slot_map.IOCLASS.PV][udid], lua) for udid in rsm._build_map['LU']['vios1']: slot, lua = rsm.get_vscsi_slot(self.vio1, udid) self.assertEqual(1, slot) # Make sure we got the right LUA for this UDID self.assertEqual(SCSI_MIX_1[slot][slot_map.IOCLASS.LU][udid], lua) # The build map won't actually have these as keys but # the get should return None nicely. slot, lua = rsm.get_vscsi_slot(self.vio2, 'lu_udid2') self.assertIsNone(slot) slot, lua = rsm.get_vscsi_slot(self.vio2, 'pv_udid2') self.assertIsNone(slot) def test_vscsi_build_out_arbitrary_dest_vioses(self): """Test RebuildSlotMap with multiple candidate dest VIOSes.""" smt = self.smt_impl('foo') smt._slot_topo = SCSI_ARB_MAP rsm = slot_map.RebuildSlotMap( smt, [self.vio1, self.vio2], VTV_2V_ARB, {}) # Since this isn't deterministic we want to make sure each UDID # got their slot assigned to one VIOS and not the other. expected_map = {'lu_udid1': 47, 'pv_udid2': 9, 'lu_udid3': 23, 'pv_udid4': 56} for udid, eslot in six.iteritems(expected_map): aslot1, lua1 = rsm.get_vscsi_slot(self.vio1, udid) aslot2, lua2 = rsm.get_vscsi_slot(self.vio2, udid) if aslot1 is None: self.assertEqual(eslot, aslot2) if SCSI_ARB_MAP[eslot].get(slot_map.IOCLASS.LU): self.assertEqual( SCSI_ARB_MAP[eslot][slot_map.IOCLASS.LU][udid], lua2) else: self.assertEqual( SCSI_ARB_MAP[eslot][slot_map.IOCLASS.PV][udid], lua2) else: self.assertEqual(eslot, aslot1) self.assertIsNone(aslot2) if SCSI_ARB_MAP[eslot].get(slot_map.IOCLASS.LU): self.assertEqual( SCSI_ARB_MAP[eslot][slot_map.IOCLASS.LU][udid], lua1) else: self.assertEqual( SCSI_ARB_MAP[eslot][slot_map.IOCLASS.PV][udid], lua1) def test_vscsi_build_out_full_coverage(self): """Test rebuild with 2 slots per udid and 2 candidate VIOSes.""" smt = self.smt_impl('foo') smt._slot_topo = SCSI_PV_2S_2V_MAP rsm = slot_map.RebuildSlotMap( smt, [self.vio1, self.vio2], VTV_2V_ARB, {}) expected_map = {'lu_udid1': [5, 23], 'pv_udid2': [6, 24], 'lu_udid3': [7, 25], 'pv_udid4': [8, 26]} # We know what slots the UDIDs should get but not what VIOSes they'll # belong to. So we'll assert that one VIOS gets 1 slot and the other # VIOS gets the other for each UDID. for udid, (eslot1, eslot2) in six.iteritems(expected_map): if rsm.get_pv_vscsi_slot(self.vio1, udid) != eslot1: self.assertEqual( eslot1, rsm.get_pv_vscsi_slot(self.vio2, udid)) self.assertEqual( eslot2, rsm.get_pv_vscsi_slot(self.vio1, udid)) else: # We already know vio1 got the first slot self.assertEqual( eslot2, rsm.get_pv_vscsi_slot(self.vio2, udid)) aslot1, lua1 = rsm.get_vscsi_slot(self.vio1, udid) aslot2, lua2 = rsm.get_vscsi_slot(self.vio2, udid) if eslot1 == aslot1: self.assertEqual(eslot2, aslot2) self.assertEqual( SCSI_PV_2S_2V_MAP[eslot1][slot_map.IOCLASS.PV][udid], lua1) self.assertEqual( SCSI_PV_2S_2V_MAP[eslot2][slot_map.IOCLASS.PV][udid], lua2) else: self.assertEqual(eslot1, aslot2) self.assertEqual(eslot2, aslot1) self.assertEqual( SCSI_PV_2S_2V_MAP[eslot1][slot_map.IOCLASS.PV][udid], lua2) self.assertEqual( SCSI_PV_2S_2V_MAP[eslot2][slot_map.IOCLASS.PV][udid], lua1) def test_pv_udid_not_found_on_dest(self): """Test RebuildSlotMap fails when UDID not found on dest.""" smt = self.smt_impl('foo') smt._slot_topo = SCSI_PV_3 self.assertRaises( pv_e.InvalidHostForRebuildNoVIOSForUDID, slot_map.RebuildSlotMap, smt, [self.vio1, self.vio2], BAD_VOL_TO_VIO_FOR_PV_3, {}) def test_more_pv_udids_than_dest_vioses_fails(self): """Test RebuildSlotMap fails when there's not enough VIOSes.""" smt = self.smt_impl('foo') smt._slot_topo = SCSI_PV_1 self.assertRaises( pv_e.InvalidHostForRebuildNotEnoughVIOS, slot_map.RebuildSlotMap, smt, [self.vio1, self.vio2], VOL_TO_VIO_1_VIOS_PV1, {}) def test_npiv_build_out(self): """Test _npiv_build_out.""" # Create a topology that will be converted to a rebuild map smt = self.smt_impl('foo') vios1 = mock.Mock() vios1.get_pfc_wwpns = mock.Mock(return_value=['wwpn1']) vios2 = mock.Mock() vios2.get_pfc_wwpns = mock.Mock(return_value=['wwpn2']) smt._slot_topo = { 3: {'VFC': {'fab1': None}}, 4: {'VFC': {'fab7': None}}, 5: {'VFC': {'fab10': None}}, 6: {'VFC': {'fab8': None}}, 7: {'VFC': {'fab9': None}}, 8: {'VFC': {'fab9': None}}, 9: {'VFC': {'fab1': None}}, 10: {'VFC': {'fab9': None}}, 11: {'VFC': {'fab1': None}}, 12: {'VFC': {'fab7': None}}, 113: {'VFC': {'fab7': None}}, 114: {'VFC': {'fab7': None}}} # Run the actual test and verify an exception is raised self.assertRaises( pv_e.InvalidHostForRebuildFabricsNotFound, slot_map.RebuildSlotMap, smt, [vios1, vios2], None, ['fab1']) # Run the actual test fabrics = ['fab1', 'fab2', 'fab7', 'fab8', 'fab9', 'fab10', 'fab27'] rsm = slot_map.RebuildSlotMap(smt, [vios1, vios2], None, fabrics) # Verify rebuild map was created successfully self.assertEqual({'VFC': {'fab1': [3, 9, 11], 'fab10': [5], 'fab2': [], 'fab27': [], 'fab7': [4, 12, 113, 114], 'fab8': [6], 'fab9': [7, 8, 10]}}, rsm._build_map) # Verify the getters return the slots correctly self.assertEqual([3, 9, 11], rsm.get_vfc_slots('fab1', 3)) self.assertEqual([4, 12, 113, 114], rsm.get_vfc_slots('fab7', 4)) self.assertEqual([6], rsm.get_vfc_slots('fab8', 1)) self.assertEqual([7, 8, 10], rsm.get_vfc_slots('fab9', 3)) self.assertEqual([5], rsm.get_vfc_slots('fab10', 1)) self.assertEqual([], rsm.get_vfc_slots('fab2', 0)) self.assertEqual([], rsm.get_vfc_slots('fab27', 0)) # Check None paths self.assertEqual([], rsm.get_vfc_slots('badfab', 0)) self.assertEqual([None], rsm.get_vfc_slots('badfab', 1)) self.assertEqual([None, None], rsm.get_vfc_slots('badfab', 2)) # Check error path. self.assertRaises(pv_e.InvalidHostForRebuildSlotMismatch, rsm.get_vfc_slots, 'fab1', 2) class TestRebuildSlotMap(TestRebuildSlotMapLegacy): """Test for RebuildSlotMap class with new-style SlotMapStore subclass. Tests BuildSlotMap class's get methods as well. """ def __init__(self, *args, **kwargs): """Initialize with a particular SlotMapStore implementation.""" super(TestRebuildSlotMap, self).__init__(*args, **kwargs) self.smt_impl = SlotMapTestImpl SCSI_W_VOPT = { 1: { slot_map.IOCLASS.VOPT: { slot_map.IOCLASS.VOPT: 'vopt_name' }, slot_map.IOCLASS.PV: { 'pv_udid1': 'pv_lua_1', 'pv_udid2': 'pv_lua_2' } } } SCSI_W_VDISK = { 1: { slot_map.IOCLASS.VDISK: { 'vd_udid1': 'vd_lua_1', 'vd_udid2': 'vd_lua_2' }, slot_map.IOCLASS.PV: { 'pv_udid1': 'pv_lua_1', 'pv_udid2': 'pv_lua_2' } }, 2: { slot_map.IOCLASS.VDISK: { 'vd_udid1': 'vd_lua_1', 'vd_udid2': 'vd_lua_2' } } } SCSI_LU_1 = { 1: { slot_map.IOCLASS.LU: { 'lu_udid1': 'lu_lua_1', 'lu_udid2': 'lu_lua_2', 'lu_udid3': 'lu_lua_3' } }, 2: { slot_map.IOCLASS.LU: { 'lu_udid1': 'lu_lua_1', 'lu_udid3': 'lu_lua_3', 'lu_udid4': 'lu_lua_4' } } } SCSI_PV_1 = { 1: { slot_map.IOCLASS.PV: { 'pv_udid1': 'pv_lua_1', 'pv_udid2': 'pv_lua_2', 'pv_udid3': 'pv_lua_3' } }, 2: { slot_map.IOCLASS.PV: { 'pv_udid1': 'pv_lua_1', 'pv_udid3': 'pv_lua_3', 'pv_udid4': 'pv_lua_4' } } } SCSI_MIX_1 = { 1: { slot_map.IOCLASS.LU: { 'lu_udid1': 'lu_lua_1', 'lu_udid2': 'lu_lua_2' }, slot_map.IOCLASS.PV: { 'pv_udid1': 'pv_lua_1', 'pv_udid2': 'pv_lua_2' } } } SCSI_ARB_MAP = { 47: { slot_map.IOCLASS.LU: { 'lu_udid1': 'lu_lua_1' } }, 9: { slot_map.IOCLASS.PV: { 'pv_udid2': 'pv_lua_2' } }, 23: { slot_map.IOCLASS.LU: { 'lu_udid3': 'lu_lua_3' } }, 56: { slot_map.IOCLASS.PV: { 'pv_udid4': 'pv_lua_4' } } } SCSI_PV_2S_2V_MAP = { 5: { slot_map.IOCLASS.PV: { 'lu_udid1': 'pv_lua_1' } }, 6: { slot_map.IOCLASS.PV: { 'pv_udid2': 'pv_lua_2' } }, 7: { slot_map.IOCLASS.PV: { 'lu_udid3': 'pv_lua_3' } }, 8: { slot_map.IOCLASS.PV: { 'pv_udid4': 'pv_lua_4' } }, 23: { slot_map.IOCLASS.PV: { 'lu_udid1': 'pv_lua_1' } }, 24: { slot_map.IOCLASS.PV: { 'pv_udid2': 'pv_lua_2' } }, 25: { slot_map.IOCLASS.PV: { 'lu_udid3': 'pv_lua_3' } }, 26: { slot_map.IOCLASS.PV: { 'pv_udid4': 'pv_lua_4' } } } SCSI_PV_3 = { 23: { slot_map.IOCLASS.PV: { 'pv_udid1': 'pv_lua_1' } }, 12: { slot_map.IOCLASS.PV: { 'pv_udid2': 'pv_lua_2' } }, 4: { slot_map.IOCLASS.PV: { 'pv_udid3': 'pv_lua_3' } } } BAD_VOL_TO_VIO_FOR_PV_3 = { 'pv_udid1': [ 'vios1', 'vios2' ], 'pv_udid2': [ 'vios1', 'vios2' ] } VOL_TO_VIO1 = { 'lu_udid1': [ 'vios1', 'vios2' ], 'lu_udid2': [ 'vios1' ], 'lu_udid3': [ 'vios1', 'vios2' ], 'lu_udid4': [ 'vios2' ], 'pv_udid1': [ 'vios1', 'vios2' ], 'pv_udid2': [ 'vios1' ], 'pv_udid3': [ 'vios1', 'vios2' ], 'pv_udid4': [ 'vios2' ], 'vd_udid1': [ 'vios1', 'vios2' ], 'vd_udid2': [ 'vios1', 'vios2' ] } VOL_TO_VIO2 = { 'pv_udid1': [ 'vios1', 'vios2' ], 'pv_udid2': [ 'vios1' ], 'pv_udid3': [ 'vios1', 'vios2' ], 'pv_udid4': [ 'vios2' ] } VOL_TO_VIO_1_VIOS_PV1 = { 'pv_udid1': [ 'vios1' ], 'pv_udid2': [ 'vios1' ], 'pv_udid3': [ 'vios1' ], 'pv_udid4': [ 'vios1' ] } VTV_2V_ARB = { 'lu_udid1': [ 'vios1', 'vios2' ], 'pv_udid2': [ 'vios1', 'vios2' ], 'lu_udid3': [ 'vios1', 'vios2' ], 'pv_udid4': [ 'vios1', 'vios2' ] } pypowervm-1.1.24/pypowervm/tests/tasks/test_power.py0000664000175000017500000005522613571367171022366 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock import testtools import pypowervm.exceptions as pexc from pypowervm.tasks import power import pypowervm.tasks.power_opts as popts import pypowervm.tests.test_fixtures as fx import pypowervm.wrappers.base_partition as pvm_bp import pypowervm.wrappers.logical_partition as pvm_lpar class TestPower(testtools.TestCase): def setUp(self): super(TestPower, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt # Make it easier to validate job params: create_job_parameter returns a # simple 'name=value' string. mock_crt_jparm = self.useFixture(fixtures.MockPatch( 'pypowervm.wrappers.job.Job.create_job_parameter')).mock mock_crt_jparm.side_effect = ( lambda name, value, cdata=False: '%s=%s' % (name, value)) # Patch Job.wrap to return a mocked Job wrapper mock_job = mock.Mock() self.useFixture(fixtures.MockPatch( 'pypowervm.wrappers.job.Job.wrap')).mock.return_value = mock_job self.run_job = mock_job.run_job def validate_run(self, part, ex_suff="PowerOff", ex_parms=None, ex_timeout=1800, ex_synch=True, result='', nxt=None): """Return side effect method to validate Adapter.read and Job.run_job. :param part: (Mock) partition wrapper. :param ex_suff: Expected Job suffix - "PowerOn" or "PowerOff" :param ex_parms: Set of expected JobParameter 'name=value' strings. :param ex_timeout: Expected timeout (int, seconds). :param ex_synch: Expected value of the 'synchronous' flag. :param result: The desired result of the run_job call. May be None (the run_job call "succeeded") or an instance of an exception to be raised (either JobRequestTimedOut or JobRequestFailed). :param nxt: When chaining side effects, pass the method to be assigned to the run_job side effect after this side effect runs. Typically the return from another validate_run() call. :return: A method suitable for assigning to self.run_job.side_effect. """ def run_job_seff(uuid, job_parms=None, timeout=None, synchronous=None): # We fetched the Job template with the correct bits of the # partition wrapper and the correct suffix self.adpt.read.assert_called_once_with( part.schema_type, part.uuid, suffix_type='do', suffix_parm=ex_suff) # Reset for subsequent runs self.adpt.reset_mock() self.assertEqual(part.uuid, uuid) # JobParameter order doesn't matter self.assertEqual(ex_parms or set(), set(job_parms)) self.assertEqual(ex_timeout, timeout) self.assertEqual(ex_synch, synchronous) if nxt: self.run_job.side_effect = nxt if result: raise result return run_job_seff @staticmethod def etimeout(): """Returns a JobRequestTimedOut exception.""" return pexc.JobRequestTimedOut(operation_name='foo', seconds=1800) @staticmethod def efail(error='error'): """Returns a JobRequestFailed exception.""" return pexc.JobRequestFailed(operation_name='foo', error=error) def mock_partition(self, env=pvm_bp.LPARType.AIXLINUX, rmc_state=pvm_bp.RMCState.ACTIVE, mgmt=False): """Returns a mocked partition with the specified properties.""" return mock.Mock(adapter=self.adpt, env=env, rmc_state=rmc_state, is_mgmt_partition=mgmt) def test_pwrop_start(self): """Test PowerOp.start.""" part = self.mock_partition() # Default params, success self.run_job.side_effect = self.validate_run(part, ex_suff="PowerOn") power.PowerOp.start(part) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Additional params, timeout self.run_job.side_effect = self.validate_run( part, ex_suff="PowerOn", ex_parms={'foo=bar', 'one=two'}, result=self.etimeout()) self.assertRaises( pexc.VMPowerOnTimeout, power.PowerOp.start, part, opts=popts.PowerOnOpts(legacy_add_parms={'foo': 'bar', 'one': 'two'})) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Asynchronous, failure self.run_job.side_effect = self.validate_run( part, ex_suff="PowerOn", ex_synch=False, result=self.efail()) self.assertRaises(pexc.VMPowerOnFailure, power.PowerOp.start, part, synchronous=False) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Specified timeout, already on self.run_job.side_effect = self.validate_run( part, ex_suff="PowerOn", ex_timeout=10, result=self.efail('HSCL3681')) power.PowerOp.start(part, timeout=10) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() def test_pwrop_stop(self): """Test PowerOp.stop.""" # If RMC is down, VSP normal - make sure the 'immediate' flag goes away part = self.mock_partition(rmc_state=pvm_bp.RMCState.INACTIVE) self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown'}) power.PowerOp.stop( part, opts=popts.PowerOffOpts().immediate().soft_detect(part)) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Default parameters - the method figures out whether to do OS shutdown part = self.mock_partition() self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true'}) power.PowerOp.stop( part, opts=popts.PowerOffOpts().immediate().soft_detect(part)) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Non-default optional params ignored, timeout self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true', 'restart=true'}, ex_timeout=100, ex_synch=False, result=self.etimeout()) self.assertRaises( pexc.VMPowerOffTimeout, power.PowerOp.stop, part, opts=popts.PowerOffOpts(legacy_add_parms={ 'one': 1, 'foo': 'bar'}).os_immediate().restart(), timeout=100, synchronous=False) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # VSP normal, fail self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown'}, result=self.efail()) self.assertRaises( pexc.VMPowerOffFailure, power.PowerOp.stop, part, opts=popts.PowerOffOpts().vsp_normal()) self.assertEqual(1, self.run_job.call_count) def test_pwrop_stop_no_rmc(self): """Test PowerOp.stop with bad RMC state.""" part = self.mock_partition(rmc_state=pvm_bp.RMCState.INACTIVE) self.assertRaises(pexc.OSShutdownNoRMC, power.PowerOp.stop, part, opts=popts.PowerOffOpts().os_normal()) self.run_job.assert_not_called() def test_pwron(self): """Test the power_on method.""" lpar = self.mock_partition() self.run_job.side_effect = self.validate_run(lpar, "PowerOn") power.power_on(lpar, None) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Try optional parameters self.run_job.side_effect = self.validate_run( lpar, "PowerOn", ex_parms={ 'bootmode=sms', 'iIPLsource=a', 'remove_optical_name=testVopt', 'remove_optical_time=30'}, ex_synch=False) power.power_on( lpar, None, add_parms={ power.BootMode.KEY: power.BootMode.SMS, pvm_lpar.IPLSrc.KEY: pvm_lpar.IPLSrc.A, power.RemoveOptical.KEY_TIME: 30, power.RemoveOptical.KEY_NAME: 'testVopt'}, synchronous=False) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Job timeout, IBMi, implicit remove_optical_time ibmi = self.mock_partition(env=pvm_bp.LPARType.OS400) self.run_job.side_effect = self.validate_run( ibmi, "PowerOn", ex_parms={'remove_optical_name=test', 'remove_optical_time=0'}, result=self.etimeout()) self.assertRaises(pexc.VMPowerOnTimeout, power.power_on, ibmi, None, add_parms=power.RemoveOptical.bld_map(name="test")) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Job failure, VIOS partition, explicit remove_optical_time vios = self.mock_partition(env=pvm_bp.LPARType.VIOS) self.run_job.side_effect = self.validate_run( vios, "PowerOn", ex_parms={'remove_optical_name=test2', 'remove_optical_time=25'}, result=self.efail()) self.assertRaises( pexc.VMPowerOnFailure, power.power_on, vios, None, add_parms=power.RemoveOptical.bld_map(name="test2", time=25)) self.assertEqual(1, self.run_job.call_count) def test_pwron_already_on(self): """PowerOn when the system is already powered on.""" part = self.mock_partition() for prefix in power._ALREADY_POWERED_ON_ERRS: self.run_job.side_effect = self.validate_run( part, ex_suff="PowerOn", result=self.efail( error="Something %s Something else" % prefix)) power.power_on(part, None) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() def test_pwroff_force_immed(self): """Test power_off with force_immediate=Force.TRUE.""" # PowerOff with force-immediate works the same regardless of partition # type, RMC state, or management partition status. for env in (pvm_bp.LPARType.OS400, pvm_bp.LPARType.AIXLINUX, pvm_bp.LPARType.VIOS): for rmc in (pvm_bp.RMCState.ACTIVE, pvm_bp.RMCState.BUSY, pvm_bp.RMCState.INACTIVE): for mgmt in (True, False): part = self.mock_partition(env=env, rmc_state=rmc, mgmt=mgmt) self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown', 'immediate=true'}) power.power_off(part, None, force_immediate=power.Force.TRUE) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Restart, timeout, additional params ignored part = self.mock_partition() self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown', 'immediate=true', 'restart=true'}, ex_timeout=10, result=self.etimeout()) self.assertRaises(pexc.VMPowerOffTimeout, power.power_off, part, None, force_immediate=power.Force.TRUE, restart=True, timeout=10, add_parms=dict(one=1)) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Failure self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown', 'immediate=true'}, result=self.efail()) self.assertRaises(pexc.VMPowerOffFailure, power.power_off, part, None, force_immediate=power.Force.TRUE) self.assertEqual(1, self.run_job.call_count) def test_pwroff_soft_ibmi_norm(self): """Soft PowerOff flow, IBMi, normal (no immediate).""" part = self.mock_partition(env=pvm_bp.LPARType.OS400) # This works the same whether intervening Job exceptions are Timeout or # Failure. for exc in (self.etimeout(), self.efail()): self.run_job.side_effect = ( # OS normal self.validate_run( part, ex_parms={'operation=osshutdown'}, ex_timeout=100, result=exc, # OS immediate (timeout is defaulted from this point) nxt=self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true'}, result=exc, # VSP normal nxt=self.validate_run( part, ex_parms={'operation=shutdown'}, result=exc, # VSP hard (default timeout) nxt=self.validate_run( part, ex_parms={ 'operation=shutdown', 'immediate=true'})))) ) # Run it power.power_off(part, None, timeout=100) self.assertEqual(4, self.run_job.call_count) self.run_job.reset_mock() # If one of the interim calls succeeds, the operation succeeds. self.run_job.side_effect = ( # OS normal self.validate_run( part, ex_parms={'operation=osshutdown'}, result=self.efail(), # OS immediate (timeout is defaulted from this point) nxt=self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true'}, result=self.etimeout(), # VSP normal - succeeds nxt=self.validate_run( part, ex_parms={'operation=shutdown'}, # Not reached nxt=self.fail)))) power.power_off(part, None) self.assertEqual(3, self.run_job.call_count) def test_pwroff_soft_standard_timeout(self): """Soft PowerOff flow, non-IBMi, with timeout.""" # When OS shutdown times out, go straight to VSP hard. part = self.mock_partition() self.run_job.side_effect = ( # OS normal. Non-IBMi always adds immediate. self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true'}, ex_timeout=100, result=self.etimeout(), # VSP hard nxt=self.validate_run( part, ex_parms={'operation=shutdown', 'immediate=true'})) ) # Run it power.power_off(part, None, timeout=100) self.assertEqual(2, self.run_job.call_count) self.run_job.reset_mock() # Same if invoked with immediate. But since we're running again, add # restart and another param; make sure restart comes through but the # bogus one is ignored. self.run_job.side_effect = ( # OS immediate (non-IBMi always adds immediate). self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true', 'restart=true'}, ex_timeout=200, result=self.etimeout(), # VSP hard nxt=self.validate_run( part, ex_parms={'operation=shutdown', 'immediate=true', 'restart=true'})) ) # Run it power.power_off(part, None, timeout=200, restart=True, add_parms={'foo': 'bar'}) self.assertEqual(2, self.run_job.call_count) def test_pwroff_soft_no_retry(self): """Soft PowerOff, no retry.""" # When OS shutdown fails with NO_RETRY, fail (no soft flow) # IBMi part = self.mock_partition(env=pvm_bp.LPARType.OS400) self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=osshutdown'}, result=self.efail()) self.assertRaises(pexc.VMPowerOffFailure, power.power_off, part, None, force_immediate=power.Force.NO_RETRY) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # non-IBMi part = self.mock_partition() self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true'}, result=self.efail()) self.assertRaises(pexc.VMPowerOffFailure, power.power_off, part, None, force_immediate=power.Force.NO_RETRY) self.assertEqual(1, self.run_job.call_count) def test_pwroff_soft_standard_fail(self): """Soft PowerOff flow, non-IBMi, with Job failure.""" # When OS shutdown fails (non-timeout), we try VSP normal first. part = self.mock_partition() self.run_job.side_effect = ( # OS immediate (non-IBMi always adds immediate). # Make sure restart percolates through, bogus params ignored. self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true', 'restart=true'}, ex_timeout=300, result=self.efail(), # VSP normal, timeout reset to default nxt=self.validate_run( part, ex_parms={ 'operation=shutdown', 'restart=true'}, result=self.efail(), # VSP hard nxt=self.validate_run( part, ex_parms={'operation=shutdown', 'immediate=true', 'restart=true'}))) ) power.power_off(part, None, timeout=300, restart=True, add_parms={'foo': 'bar'}) self.assertEqual(3, self.run_job.call_count) def test_pwroff_soft_standard_no_rmc_no_retry(self): """Non-IBMi soft PowerOff does VSP normal if RMC is down; no retry.""" # Behavior is the same for INACTIVE or BUSY for rmc in (pvm_bp.RMCState.INACTIVE, pvm_bp.RMCState.BUSY): part = self.mock_partition(rmc_state=rmc) self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown'}, result=self.efail()) self.assertRaises( pexc.VMPowerOffFailure, power.power_off, part, None, force_immediate=power.Force.NO_RETRY) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # Job timeout & failure do the same (except for final exception). self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown'}, result=self.etimeout()) self.assertRaises( pexc.VMPowerOffTimeout, power.power_off, part, None, force_immediate=power.Force.NO_RETRY) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() def test_pwroff_already_off(self): """PowerOff when the system is already powered off.""" part = self.mock_partition() for prefix in power._ALREADY_POWERED_OFF_ERRS: self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true'}, result=self.efail(error="Foo %s bar" % prefix)) power.power_off(part, None) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # If restart was specified, this is a failure. (Force, to KISS) self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown', 'immediate=true', 'restart=true'}, result=self.efail(error="Foo %s bar" % prefix)) self.assertRaises(pexc.VMPowerOffFailure, power.power_off, part, None, restart=True, force_immediate=power.Force.TRUE) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() def test_pwroff_new_opts(self): """Test power_off where add_parms is PowerOffOpts (not legacy).""" part = self.mock_partition() # VSP hard self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown', 'immediate=true'}) power.power_off(part, None, add_parms=popts.PowerOffOpts().vsp_hard()) self.assertEqual(1, self.run_job.call_count) self.run_job.reset_mock() # VSP normal self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=shutdown'}) power.power_off(part, None, add_parms=popts.PowerOffOpts().vsp_normal()) self.run_job.reset_mock() # OS immediate self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=osshutdown', 'immediate=true'}) power.power_off(part, None, add_parms=popts.PowerOffOpts().os_immediate()) self.run_job.reset_mock() # OS normal self.run_job.side_effect = self.validate_run( part, ex_parms={'operation=osshutdown'}) power.power_off(part, None, add_parms=popts.PowerOffOpts().os_normal()) @mock.patch('pypowervm.tasks.power._power_off_progressive') def test_pwroff_progressive(self, mock_prog_internal): # The internal _power_off_progressive is exercised via the existing # tests for power_off. This test just ensures the public # power_off_progressive calls it appropriately. # Default kwargs power.power_off_progressive('part') mock_prog_internal.assert_called_once_with( 'part', 1800, False, ibmi_immed=False) mock_prog_internal.reset_mock() # Non-default kwargs power.power_off_progressive('part', restart=True, ibmi_immed=True, timeout=10) mock_prog_internal.assert_called_once_with( 'part', 10, True, ibmi_immed=True) pypowervm-1.1.24/pypowervm/tests/tasks/test_cluster_ssp.py0000664000175000017500000004543513571367171023601 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock import unittest import uuid import pypowervm.entities as ent import pypowervm.tasks.cluster_ssp as cs import pypowervm.tasks.storage as tsk_st import pypowervm.tests.tasks.util as tju from pypowervm.tests.test_utils import test_wrapper_abc as twrap import pypowervm.util as u import pypowervm.wrappers.cluster as clust import pypowervm.wrappers.job as jwrap import pypowervm.wrappers.storage as stor CREATE_CLUSTER = 'cluster_create_job_template.txt' class TestClusterSSP(unittest.TestCase): @mock.patch('pypowervm.wrappers.job.Job.delete_job') @mock.patch('pypowervm.wrappers.job.Job._monitor_job') @mock.patch('pypowervm.wrappers.job.Job.job_status') @mock.patch('pypowervm.adapter.Adapter') def test_crt_cluster_ssp(self, mock_adp, mock_status, mock_monitor_job, mock_del_job): # Load up GET Cluster/do/Create (job template) mock_adp.read.return_value = tju.load_file(CREATE_CLUSTER, mock_adp) # We'll pretend the job ran and completed successfully mock_monitor_job.return_value = False mock_status.__get__ = mock.Mock( return_value=jwrap.JobStatus.COMPLETED_OK) # Mock Job.create_job to check job parameter values def create_job(job_el, entry_type, *args, **kwargs): self.assertEqual(entry_type, clust.Cluster.schema_type) job = jwrap.Job.wrap(ent.Entry({}, job_el, None)) param_vals = job._get_vals(u.xpath( 'JobParameters', 'JobParameter', 'ParameterValue')) self.assertEqual( param_vals[0], 'clust_namerepos_pv_namevios15XXXXYYYZZZZZZZ') self.assertEqual( param_vals[1], '<' 'uom:Metadata>hdisk1' 'hdisk2hdisk3ssp' '_name') return mock.MagicMock() mock_adp.create_job.side_effect = create_job node = clust.Node.bld( mock_adp, hostname='vios1', lpar_id=5, mtms='XXXX-YYY*ZZZZZZZ', vios_uri='https://a.example.com:12443/rest/api/uom/VirtualIOServe' 'r/12345678-1234-1234-1234-123456789012') repos = stor.PV.bld(mock_adp, name='repos_pv_name') data = [stor.PV.bld(mock_adp, name=n) for n in ( 'hdisk1', 'hdisk2', 'hdisk3')] cs.crt_cluster_ssp('clust_name', 'ssp_name', repos, node, data) # run_job() should run delete_job() at the end self.assertEqual(mock_del_job.call_count, 1) class TestGetOrUploadImageLU(twrap.TestWrapper): file = 'lufeed.txt' wrapper_class_to_test = stor.LUEnt def setUp(self): super(TestGetOrUploadImageLU, self).setUp() self.tier = mock.Mock(spec=stor.Tier) self.mock_luent_srch = self.useFixture(fixtures.MockPatch( 'pypowervm.wrappers.storage.LUEnt.search')).mock self.mock_luent_srch.side_effect = self.luent_search self.useFixture(fixtures.MockPatch( 'uuid.uuid4')).mock.return_value = uuid.UUID('1234abcd-1234-1234-1' '234-abcdbcdecdef') self.mock_crt_lu = self.useFixture(fixtures.MockPatch( 'pypowervm.tasks.storage.crt_lu')).mock self.mock_upload_lu = self.useFixture(fixtures.MockPatch( 'pypowervm.tasks.storage.upload_lu')).mock self.mock_upload_lu.side_effect = self.upload_lu self.mock_sleep = self.useFixture(fixtures.MockPatch( 'time.sleep')).mock self.mock_sleep.side_effect = self.sleep_conflict_finishes self.vios_uuid = 'vios_uuid' self.mock_stream_func = mock.Mock() self.gb_size = 123 self.b_size = self.gb_size * 1024 * 1024 * 1024 # The image LU with the "real" content luname = 'lu_name' self.img_lu = self.bld_lu(luname, self.gb_size) # The marker LU used by *this* thread mkrname = 'part1234abcd' + luname self.mkr_lu = self.bld_lu(mkrname, cs.MKRSZ) # Marker LU used by a conflicting thread. This one will lose the bid. confl_luname_lose = 'part5678cdef' + luname self.confl_mkr_lu_lose = self.bld_lu(confl_luname_lose, cs.MKRSZ) # Marker LU used by a conflicting thread. This one will win the bid. confl_luname_win = 'part0123abcd' + luname self.confl_mkr_lu_win = self.bld_lu(confl_luname_win, cs.MKRSZ) # Always expect to finish with exactly one more LU than we started with self.exp_num_lus = len(self.entries) + 1 def bld_lu(self, luname, gb_size): lu = stor.LUEnt.bld(None, luname, gb_size, typ=cs.IMGTYP) lu._udid('udid_' + luname) lu.delete = mock.Mock() lu.delete.side_effect = lambda: self.entries.remove(lu) return lu def luent_search(self, adapter, parent=None, lu_type=None): """Mock side effect for LUEnt.search, validating arguments. :return: self.entries (the LUEnt feed) """ self.assertEqual(self.tier.adapter, adapter) self.assertEqual(self.tier, parent) self.assertEqual(cs.IMGTYP, lu_type) return self.entries def setup_crt_lu_mock(self, crt_img_lu_se, conflicting_mkr_lu=None): """Set up the mock side effect for crt_lu calls. The marker LU side always creates "my" marker LU. If a conflicting_mkr_lu is specified, also creates that marker LU (to simulate simultaneous attempts from separate hosts). The image LU side behaves as indicated by the crt_img_lu_se parameter. :param crt_img_lu_se: Side effect for crt_lu of the image LU. :param conflicting_mkr_lu: If specified, the resulting mock pretends that some other host created the specified marker LU at the same time we're creating ours. :return: A callable suitable for assigning to self.mock_crt_lu.side_effect. """ def crt_mkr_lu(tier, luname, lu_gb, typ=None): self.assertEqual(self.tier, tier) self.assertEqual(self.mkr_lu.name, luname) self.assertEqual(self.mkr_lu.capacity, lu_gb) self.assertEqual(cs.IMGTYP, typ) self.entries.append(self.mkr_lu) if conflicting_mkr_lu is not None: self.entries.append(conflicting_mkr_lu) # Second time through, creation of the image LU self.mock_crt_lu.side_effect = crt_img_lu_se return tier, self.mkr_lu # First time through, creation of the marker LU self.mock_crt_lu.side_effect = crt_mkr_lu def crt_img_lu(self, tier, luname, lu_gb, typ=None): """Mock side effect for crt_lu of the image LU.""" self.assertEqual(self.tier, tier) self.assertEqual(self.img_lu.name, luname) self.assertEqual(self.img_lu.capacity, lu_gb) self.assertEqual(cs.IMGTYP, typ) self.entries.append(self.img_lu) return tier, self.img_lu def upload_lu(self, vios_uuid, new_lu, stream, b_size, upload_type=None): self.assertEqual(self.vios_uuid, vios_uuid) self.assertEqual(self.img_lu, new_lu) self.assertEqual(self.mock_stream_func, stream) self.assertEqual(self.b_size, b_size) self.assertEqual(tsk_st.UploadType.IO_STREAM_BUILDER, upload_type) def sleep_conflict_finishes(self, sec): """Pretend the conflicting LU finishes while we sleep.""" self.assertTrue(cs.SLEEP_U_MIN <= sec <= cs.SLEEP_U_MAX) # We may have used either conflict marker LU if self.confl_mkr_lu_lose in self.entries: self.entries.remove(self.confl_mkr_lu_lose) if self.confl_mkr_lu_win in self.entries: self.entries.remove(self.confl_mkr_lu_win) if self.img_lu not in self.entries: self.entries.append(self.img_lu) def test_already_exists(self): """The image LU is already there.""" self.entries.append(self.img_lu) self.assertEqual(self.img_lu, cs.get_or_upload_image_lu( self.tier, self.img_lu.name, self.vios_uuid, self.mock_stream_func, self.b_size)) # We only searched once self.assertEqual(1, self.mock_luent_srch.call_count) # We didn't create anything self.mock_crt_lu.assert_not_called() # We didn't upload anything self.mock_upload_lu.assert_not_called() # We didn't delete anything self.mkr_lu.delete.assert_not_called() self.img_lu.delete.assert_not_called() # We didn't sleep self.mock_sleep.assert_not_called() # Stream func not invoked self.mock_stream_func.assert_not_called() # Right number of LUs self.assertEqual(self.exp_num_lus, len(self.entries)) def test_upload_no_conflict(self): """Upload a new LU - no conflict.""" self.setup_crt_lu_mock(self.crt_img_lu) self.assertEqual(self.img_lu, cs.get_or_upload_image_lu( self.tier, self.img_lu.name, self.vios_uuid, self.mock_stream_func, self.b_size)) # Uploaded content self.assertEqual(1, self.mock_upload_lu.call_count) # Removed marker LU self.mkr_lu.delete.assert_called_once_with() # Did not delete image LU self.img_lu.delete.assert_not_called() # I pulled the feed the first time through, and for _upload_conflict self.assertEqual(2, self.mock_luent_srch.call_count) # Right number of LUs self.assertEqual(self.exp_num_lus, len(self.entries)) def test_conflict_not_started(self): """Another upload is about to start when we get there.""" # Note that the conflicting process wins, even though its marker LU # name would lose to ours - because we don't get around to creating # ours. self.entries.append(self.confl_mkr_lu_lose) self.assertEqual(self.img_lu, cs.get_or_upload_image_lu( self.tier, self.img_lu.name, self.vios_uuid, self.mock_stream_func, self.b_size)) # I "waited" for the other guy to complete self.assertEqual(1, self.mock_sleep.call_count) # I did not create, upload, or remove anything self.mock_crt_lu.assert_not_called() self.mock_upload_lu.assert_not_called() self.mkr_lu.delete.assert_not_called() self.img_lu.delete.assert_not_called() # I pulled the feed the first time through, and once after the sleep self.assertEqual(2, self.mock_luent_srch.call_count) # Right number of LUs self.assertEqual(self.exp_num_lus, len(self.entries)) def test_conflict_started(self): """Another upload is in progress when we get there.""" self.entries.append(self.confl_mkr_lu_lose) self.entries.append(self.img_lu) self.assertEqual(self.img_lu, cs.get_or_upload_image_lu( self.tier, self.img_lu.name, self.vios_uuid, self.mock_stream_func, self.b_size)) # I "waited" for the other guy to complete self.assertEqual(1, self.mock_sleep.call_count) # I did not create, upload, or remove anything self.mock_crt_lu.assert_not_called() self.mock_upload_lu.assert_not_called() self.mkr_lu.delete.assert_not_called() self.img_lu.delete.assert_not_called() # I searched the first time through, and once after the sleep self.assertEqual(2, self.mock_luent_srch.call_count) # Right number of LUs self.assertEqual(self.exp_num_lus, len(self.entries)) def test_conflict_I_lose(self): """We both bid at the same time; and I lose.""" self.setup_crt_lu_mock(self.fail, conflicting_mkr_lu=self.confl_mkr_lu_win) self.assertEqual(self.img_lu, cs.get_or_upload_image_lu( self.tier, self.img_lu.name, self.vios_uuid, self.mock_stream_func, self.b_size)) # I tried creating mine because his wasn't there at the start self.assertEqual(1, self.mock_crt_lu.call_count) # I "slept", waiting for the other guy to finish self.assertEqual(1, self.mock_sleep.call_count) # I didn't upload self.mock_upload_lu.assert_not_called() # I did remove my marker from the SSP self.mkr_lu.delete.assert_called_once_with() # I didn't remove the image LU (because I didn't create it) self.img_lu.delete.assert_not_called() # I searched the first time through, once in _upload_conflict, and once # after the sleep self.assertEqual(3, self.mock_luent_srch.call_count) # Right number of LUs self.assertEqual(self.exp_num_lus, len(self.entries)) def test_conflict_I_win(self): """We both bid at the same time; and I win.""" self.setup_crt_lu_mock(self.crt_img_lu, conflicting_mkr_lu=self.confl_mkr_lu_lose) self.assertEqual(self.img_lu, cs.get_or_upload_image_lu( self.tier, self.img_lu.name, self.vios_uuid, self.mock_stream_func, self.b_size)) # I tried creating mine because his wasn't there at the start; and I # also created the image LU. self.assertEqual(2, self.mock_crt_lu.call_count) # Since I won, I did the upload self.assertEqual(1, self.mock_upload_lu.call_count) # I did remove my marker from the SSP self.mkr_lu.delete.assert_called_once_with() # I didn't remove the image LU (because I won) self.img_lu.delete.assert_not_called() # I never slept self.mock_sleep.assert_not_called() # I searched the first time through, and in _upload_conflict self.assertEqual(2, self.mock_luent_srch.call_count) # IRL, the other guy will have removed his marker LU at some point. # Here, we can expect it to remain, so there's one "extra". self.assertEqual(self.exp_num_lus + 1, len(self.entries)) def test_crt_img_lu_raises(self): """Exception during crt_lu of the image LU.""" self.setup_crt_lu_mock(IOError('crt_lu raises on the image LU'), conflicting_mkr_lu=self.confl_mkr_lu_lose) self.assertRaises(IOError, cs.get_or_upload_image_lu, self.tier, self.img_lu.name, self.vios_uuid, self.mock_stream_func, self.b_size) # I didn't get to the upload self.mock_upload_lu.assert_not_called() # I never slept self.mock_sleep.assert_not_called() # I removed my marker self.mkr_lu.delete.assert_called_once_with() # I didn't remove the image LU (because I failed to create it) self.img_lu.delete.assert_not_called() # I searched the first time through, and in _upload_conflict self.assertEqual(2, self.mock_luent_srch.call_count) # We left the SSP as it was (plus the other guy's extra, which would # actually be removed normally). self.assertEqual(self.exp_num_lus, len(self.entries)) def test_upload_raises(self): """I win; upload_lu raises after crt_lu of the image LU.""" self.setup_crt_lu_mock(self.crt_img_lu, conflicting_mkr_lu=self.confl_mkr_lu_lose) self.mock_upload_lu.side_effect = IOError('upload_lu raises.') self.assertRaises(IOError, cs.get_or_upload_image_lu, self.tier, self.img_lu.name, self.vios_uuid, self.mock_stream_func, self.b_size) # I created my marker and the image LU self.assertEqual(2, self.mock_crt_lu.call_count) # Since I won, I tried the upload self.assertEqual(1, self.mock_upload_lu.call_count) # I never slept self.mock_sleep.assert_not_called() # I removed both the real LU and my marker self.mkr_lu.delete.assert_called_once_with() self.img_lu.delete.assert_called_once_with() # I searched the first time through, and in _upload_conflict self.assertEqual(2, self.mock_luent_srch.call_count) # We left the SSP as it was (plus the other guy's extra, which would # actually be removed normally). self.assertEqual(self.exp_num_lus, len(self.entries)) pypowervm-1.1.24/pypowervm/tests/tasks/__init__.py0000664000175000017500000000000013571367171021706 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/tasks/test_migration.py0000664000175000017500000001614213571367171023215 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import pypowervm.entities as ent from pypowervm.tasks import migration as mig import pypowervm.tests.tasks.util as u import pypowervm.tests.test_fixtures as fx class TestMigration(testtools.TestCase): """Unit Tests for Migration.""" def setUp(self): super(TestMigration, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt mock_resp = mock.MagicMock() mock_resp.entry = ent.Entry( {}, ent.Element('Dummy', self.adpt), self.adpt) self.adpt.read.return_value = mock_resp self.lpar_w = mock.MagicMock() self.lpar_w.adapter = self.adpt self.lpar_w.uuid = '1234' @mock.patch('pypowervm.wrappers.job.Job.run_job') def test_migration(self, mock_run_job): # Test simple call mock_run_job.side_effect = u.get_parm_checker( self, '1234', [(mig.TGT_MGD_SYS, 'abc')], exp_timeout=1800 * 4) mig.migrate_lpar(self.lpar_w, 'abc') self.adpt.read.assert_called_once_with('LogicalPartition', '1234', suffix_parm='Migrate', suffix_type='do') # Test all parms self.adpt.read.reset_mock() parm_list = [(mig.TGT_MGD_SYS, 'abc'), (mig.TGT_RMT_HMC, 'host'), (mig.TGT_RMT_HMC_USR, 'usr'), (mig.DEST_MSP, '1.1.1.1,2.2.2.2'), (mig.SRC_MSP, '3.3.3.3,4.4.4.4'), (mig.SPP_ID, '5'), (mig.OVS_OVERRIDE, '2'), (mig.VLAN_BRIDGE_OVERRIDE, '2')] mapping_list = [(mig.VFC_MAPPINGS, ['1/1/1', '3/3/3//3']), (mig.VSCSI_MAPPINGS, ['2/2/2']), (mig.VLAN_MAPPINGS, ['001122334455/4', '001122334466/5/6 7'])] mock_run_job.side_effect = u.get_parm_checker( self, '1234', parm_list, exp_job_mappings=mapping_list, exp_timeout=1800 * 4) mig.migrate_lpar(self.lpar_w, 'abc', tgt_mgmt_svr='host', tgt_mgmt_usr='usr', virtual_fc_mappings=['1/1/1', '3/3/3//3'], virtual_scsi_mappings=['2/2/2'], vlan_mappings=['001122334455/4', '001122334466/5/6 7'], dest_msp_name='1.1.1.1,2.2.2.2', source_msp_name='3.3.3.3,4.4.4.4', spp_id='5', sdn_override=True, vlan_check_override=True) self.adpt.read.assert_called_once_with('LogicalPartition', '1234', suffix_parm='Migrate', suffix_type='do') # Test migrate with affinity flag self.adpt.read.reset_mock() parm_list = [(mig.TGT_MGD_SYS, 'abc'), (mig.TGT_RMT_HMC, 'host'), (mig.TGT_RMT_HMC_USR, 'usr'), (mig.DEST_MSP, '1.1.1.1,2.2.2.2'), (mig.SRC_MSP, '3.3.3.3,4.4.4.4'), (mig.SPP_ID, '5'), (mig.OVS_OVERRIDE, '2'), (mig.VLAN_BRIDGE_OVERRIDE, '2'), (mig.AFFINITY, 'true')] mapping_list = [(mig.VFC_MAPPINGS, ['1/1/1', '3/3/3//3']), (mig.VSCSI_MAPPINGS, ['2/2/2']), (mig.VLAN_MAPPINGS, ['001122334455/4', '001122334466/5/6 7'])] mock_run_job.side_effect = u.get_parm_checker( self, '1234', parm_list, exp_job_mappings=mapping_list, exp_timeout=1800 * 4) mig.migrate_lpar(self.lpar_w, 'abc', tgt_mgmt_svr='host', tgt_mgmt_usr='usr', virtual_fc_mappings=['1/1/1', '3/3/3//3'], virtual_scsi_mappings=['2/2/2'], vlan_mappings=['001122334455/4', '001122334466/5/6 7'], dest_msp_name='1.1.1.1,2.2.2.2', source_msp_name='3.3.3.3,4.4.4.4', spp_id='5', sdn_override=True, vlan_check_override=True, check_affinity_score=True) self.adpt.read.assert_called_once_with('LogicalPartition', '1234', suffix_parm='Migrate', suffix_type='do') # Test simple validation call self.adpt.read.reset_mock() mock_run_job.side_effect = u.get_parm_checker( self, '1234', [(mig.TGT_MGD_SYS, 'abc')], exp_timeout=1800 * 4) mock_run_job.reset_mock() mig.migrate_lpar(self.lpar_w, 'abc', validate_only=True) self.adpt.read.assert_called_once_with('LogicalPartition', '1234', suffix_parm='MigrateValidate', suffix_type='do') @mock.patch('pypowervm.wrappers.job.Job.run_job') def test_migration_recover(self, mock_run_job): # Test simple call mig.migrate_recover(self.lpar_w) self.adpt.read.assert_called_once_with('LogicalPartition', '1234', suffix_parm='MigrateRecover', suffix_type='do') mock_run_job.assert_called_once_with( '1234', job_parms=[], timeout=1800) # Test simple call with force self.adpt.read.reset_mock() mock_run_job.reset_mock() mock_run_job.side_effect = u.get_parm_checker( self, '1234', [('Force', 'true')], exp_timeout=1800) mig.migrate_recover(self.lpar_w, force=True) self.adpt.read.assert_called_once_with('LogicalPartition', '1234', suffix_parm='MigrateRecover', suffix_type='do') @mock.patch('pypowervm.wrappers.job.Job.run_job') def test_migration_abort(self, mock_run_job): # Test simple call mig.migrate_abort(self.lpar_w) self.adpt.read.assert_called_once_with('LogicalPartition', '1234', suffix_parm='MigrateAbort', suffix_type='do') mock_run_job.assert_called_once_with( '1234', job_parms=None, timeout=1800) pypowervm-1.1.24/pypowervm/tests/tasks/test_mgmtconsole.py0000664000175000017500000000644513571367171023560 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from pypowervm import const from pypowervm import exceptions as exc from pypowervm.tasks import management_console as mc_task class TestMgmtCon(testtools.TestCase): """Unit Tests for ManagementConsole tasks.""" def setUp(self): super(TestMgmtCon, self).setUp() self.mc_p = mock.patch('pypowervm.wrappers.management_console.' 'ManagementConsole') self.mc = self.mc_p.start() # Make it easy to address the mock console wrapper self.mc_ws = self.mc.wrap.return_value self.cons_w = self.mc_ws.__getitem__.return_value self.addCleanup(self.mc_p.stop) def test_get_public_key(self): self.cons_w.ssh_public_key = '1234554321' key = mc_task.get_public_key(mock.Mock()) self.assertEqual('1234554321', key) def test_add_auth_key(self): # Test adding a key ('4') to an existing list ('1', '2', '3') self.cons_w.ssh_authorized_keys = ('1', '2', '3') mc_task.add_authorized_key(mock.Mock(), '4') self.assertEqual(['1', '2', '3', '4'], self.cons_w.ssh_authorized_keys) self.cons_w.update.assert_called_once_with() # Test we don't call update when not needed. self.cons_w.reset_mock() mc_task.add_authorized_key(mock.Mock(), '2') self.assertEqual(0, self.cons_w.update.called) # Test the transaction retry self.cons_w.reset_mock() resp = mock.Mock(status=const.HTTPStatus.ETAG_MISMATCH) self.cons_w.update.side_effect = exc.HttpError(resp) # When the transaction decorator refreshes the mgmt console wrapper # then we know it's retrying so just raise an exception and bail self.cons_w.refresh.side_effect = ValueError() self.assertRaises(ValueError, mc_task.add_authorized_key, mock.Mock(), '5') # Ensure it really was refresh that caused the exception self.assertEqual(1, self.cons_w.refresh.call_count) # And that our update was called self.assertEqual(1, self.cons_w.update.call_count) def test_get_auth_keys(self): # Test adding a key ('4') to an existing list ('1', '2', '3') self.cons_w.ssh_authorized_keys = ('1', '2', '3') self.assertEqual(self.cons_w.ssh_authorized_keys, mc_task.get_authorized_keys(mock.Mock())) mc_task.add_authorized_key(mock.Mock(), '4') self.assertEqual(self.cons_w.ssh_authorized_keys, mc_task.get_authorized_keys(mock.Mock())) self.assertEqual(mc_task.get_authorized_keys(mock.Mock()), ['1', '2', '3', '4']) pypowervm-1.1.24/pypowervm/tests/tasks/test_storage.py0000664000175000017500000017504113571367171022674 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import builtins import fixtures import mock import testtools import pypowervm.adapter as adp import pypowervm.exceptions as exc import pypowervm.helpers.vios_busy as vb import pypowervm.tasks.storage as ts import pypowervm.tests.tasks.util as tju import pypowervm.tests.test_fixtures as fx import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.utils.transaction as tx import pypowervm.wrappers.entry_wrapper as ewrap import pypowervm.wrappers.logical_partition as lpar import pypowervm.wrappers.storage as stor import pypowervm.wrappers.vios_file as vf import pypowervm.wrappers.virtual_io_server as vios CLUSTER = "cluster.txt" LU_LINKED_CLONE_JOB = 'cluster_LULinkedClone_job_template.txt' UPLOAD_VOL_GRP_ORIG = 'upload_volgrp.txt' UPLOAD_VOL_GRP_NEW_VDISK = 'upload_volgrp2.txt' VG_FEED = 'fake_volume_group2.txt' UPLOADED_FILE = 'upload_file.txt' VIOS_FEED = 'fake_vios_feed.txt' VIOS_FEED2 = 'fake_vios_hosting_vios_feed.txt' VIOS_ENTRY = 'fake_vios_ssp_npiv.txt' VIOS_ENTRY2 = 'fake_vios_mappings.txt' LPAR_FEED = 'lpar.txt' LU_FEED = 'lufeed.txt' def _mock_update_by_path(ssp, etag, path, timeout=-1): # Spoof adding UDID and defaulting thinness for lu in ssp.logical_units: if not lu.udid: lu._udid('udid_' + lu.name) if lu.is_thin is None: lu._is_thin(True) if lu.lu_type is None: lu._lu_type(stor.LUType.DISK) resp = adp.Response('meth', 'path', 200, 'reason', {'etag': 'after'}) resp.entry = ssp.entry return resp class TestUploadLV(testtools.TestCase): """Unit Tests for Instance uploads.""" def setUp(self): super(TestUploadLV, self).setUp() self.adptfx = self.useFixture(fx.AdapterFx(traits=fx.RemotePVMTraits)) self.adpt = self.adptfx.adpt self.v_uuid = '14B854F7-42CE-4FF0-BD57-1D117054E701' self.vg_uuid = 'b6bdbf1f-eddf-3c81-8801-9859eb6fedcb' @mock.patch('tempfile.mkdtemp') @mock.patch('pypowervm.tasks.storage.os') @mock.patch('pypowervm.util.retry_io_command') @mock.patch('pypowervm.tasks.storage.open') def test_rest_api_pipe(self, mock_open, mock_retry, mock_os, mock_mkdtemp): mock_writer = mock.Mock() with ts._rest_api_pipe(mock_writer) as read_stream: self.assertEqual(mock_retry.return_value, read_stream) mock_mkdtemp.assert_called_once_with() mock_os.path.join.assert_called_once_with(mock_mkdtemp.return_value, 'REST_API_Pipe') mock_os.mkfifo.assert_called_once_with(mock_os.path.join.return_value) mock_writer.assert_called_once_with(mock_os.path.join.return_value) mock_os.remove.assert_called_once_with(mock_os.path.join.return_value) mock_os.rmdir.assert_called_once_with(mock_mkdtemp.return_value) # _eintr_retry_call was invoked once with open and once with close mock_retry.assert_has_calls( [mock.call(mock_open, mock_os.path.join.return_value, 'r')], [mock.call(mock_retry.return_value.close)]) @mock.patch('pypowervm.tasks.storage._rest_api_pipe') def test_upload_stream_api_func(self, mock_rap): """With FUNC, _upload_stream_api uses _rest_api_pipe properly.""" vio_file = mock.Mock() vio_file.adapter.helpers = [vb.vios_busy_retry_helper] ts._upload_stream_api(vio_file, 'io_handle', ts.UploadType.FUNC) mock_rap.assert_called_once_with('io_handle') vio_file.adapter.upload_file.assert_called_once_with( vio_file.element, mock_rap.return_value.__enter__.return_value) self.assertEqual(vio_file.adapter.helpers, [vb.vios_busy_retry_helper]) @mock.patch('pypowervm.tasks.storage._create_file') def test_upload_new_vopt(self, mock_create_file): """Tests the uploads of the virtual disks.""" fake_file = self._fake_meta() fake_file.adapter.helpers = [vb.vios_busy_retry_helper] mock_create_file.return_value = fake_file v_opt, f_wrap = ts.upload_vopt(self.adpt, self.v_uuid, None, 'test2', f_size=50) mock_create_file.assert_called_once_with( self.adpt, 'test2', vf.FileType.MEDIA_ISO, self.v_uuid, None, 50) # Test that vopt was 'uploaded' self.adpt.upload_file.assert_called_with(mock.ANY, None, helpers=[]) self.assertIsNone(f_wrap) self.assertIsNotNone(v_opt) self.assertIsInstance(v_opt, stor.VOptMedia) self.assertEqual('test2', v_opt.media_name) # Ensure cleanup was called self.adpt.delete.assert_called_once_with( 'File', service='web', root_id='6233b070-31cc-4b57-99bd-37f80e845de9') # Test cleanup failure self.adpt.reset_mock() self.adpt.delete.side_effect = exc.Error('Something bad') vopt, f_wrap = ts.upload_vopt(self.adpt, self.v_uuid, None, 'test2', f_size=50) self.adpt.delete.assert_called_once_with( 'File', service='web', root_id='6233b070-31cc-4b57-99bd-37f80e845de9') self.assertIsNotNone(f_wrap) self.assertIsNotNone(vopt) self.assertIsInstance(vopt, stor.VOptMedia) self.assertEqual('test2', v_opt.media_name) @mock.patch.object(ts.LOG, 'warning') @mock.patch('pypowervm.tasks.storage._create_file') def test_upload_vopt_by_filepath(self, mock_create_file, mock_log_warn): """Tests the uploads of the virtual disks with an upload retry.""" fake_file = self._fake_meta() fake_file.adapter.helpers = [vb.vios_busy_retry_helper] mock_create_file.return_value = fake_file self.adpt.upload_file.side_effect = [exc.Error("error"), object()] m = mock.mock_open() with mock.patch.object(builtins, 'open', m): v_opt, f_wrap = ts.upload_vopt( self.adpt, self.v_uuid, 'fake-path', 'test2', f_size=50) # Test that vopt was 'uploaded' self.adpt.upload_file.assert_called_with(mock.ANY, m(), helpers=[]) self.assertIsNone(f_wrap) self.assertIsNotNone(v_opt) self.assertIsInstance(v_opt, stor.VOptMedia) self.assertEqual('test2', v_opt.media_name) # Validate that there was a warning log call and multiple executions # of the upload mock_log_warn.assert_called_once() self.assertEqual(2, self.adpt.upload_file.call_count) # Ensure cleanup was called twice since the first uploads fails. self.adpt.delete.assert_has_calls([mock.call( 'File', service='web', root_id='6233b070-31cc-4b57-99bd-37f80e845de9')]*2) @mock.patch('pypowervm.tasks.storage._create_file') def test_upload_new_vopt_w_fail(self, mock_create_file): """Tests the uploads of the virtual disks with an upload fail.""" mock_create_file.return_value = self._fake_meta() self.adpt.upload_file.side_effect = exc.Error("error") self.assertRaises(exc.Error, ts.upload_vopt, self.adpt, self.v_uuid, None, 'test2', f_size=50) @mock.patch('pypowervm.tasks.storage.rm_vg_storage') @mock.patch('pypowervm.wrappers.storage.VG.get') @mock.patch('pypowervm.tasks.storage._upload_stream') @mock.patch('pypowervm.tasks.storage._create_file') @mock.patch('pypowervm.tasks.storage.crt_vdisk') def test_upload_new_vdisk_failed( self, mock_create_vdisk, mock_create_file, mock_upload_stream, mock_vg_get, mock_rm): """Tests the uploads of the virtual disks.""" # First need to load in the various test responses. mock_vdisk = mock.Mock() mock_create_vdisk.return_value = mock_vdisk mock_create_file.return_value = self._fake_meta() fake_vg = mock.Mock() mock_vg_get.return_value = fake_vg mock_upload_stream.side_effect = exc.ConnectionError('fake error') self.assertRaises( exc.ConnectionError, ts.upload_new_vdisk, self.adpt, self.v_uuid, self.vg_uuid, None, 'test2', 50, d_size=25, sha_chksum='abc123') self.adpt.delete.assert_called_once() mock_rm.assert_called_once_with(fake_vg, vdisks=[mock_vdisk]) @mock.patch('pypowervm.tasks.storage._create_file') def test_upload_new_vdisk(self, mock_create_file): """Tests the uploads of the virtual disks.""" # traits are already set to use the REST API upload # First need to load in the various test responses. vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt) vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt) self.adpt.read.return_value = vg_orig self.adpt.update_by_path.return_value = vg_post_crt mock_create_file.return_value = self._fake_meta() n_vdisk, f_wrap = ts.upload_new_vdisk( self.adpt, self.v_uuid, self.vg_uuid, None, 'test2', 50, d_size=25, sha_chksum='abc123') # Ensure the create file was called mock_create_file.assert_called_once_with( self.adpt, 'test2', vf.FileType.DISK_IMAGE, self.v_uuid, f_size=50, tdev_udid='0300f8d6de00004b000000014a54555cd9.3', sha_chksum='abc123') # Ensure cleanup was called after the upload self.adpt.delete.assert_called_once_with( 'File', service='web', root_id='6233b070-31cc-4b57-99bd-37f80e845de9') self.assertIsNone(f_wrap) self.assertIsNotNone(n_vdisk) self.assertIsInstance(n_vdisk, stor.VDisk) @mock.patch('pypowervm.tasks.storage.crt_vdisk') def test_crt_copy_vdisk(self, mock_crt_vdisk): """Tests the uploads of the virtual disks.""" # traits are already set to use the REST API upload # First need to load in the various test responses. vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt) vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt) self.adpt.read.return_value = vg_orig self.adpt.update_by_path.return_value = vg_post_crt n_vdisk = ts.crt_copy_vdisk( self.adpt, self.v_uuid, self.vg_uuid, 'src', 1073741824, 'test2', d_size=2147483648, file_format=stor.FileFormatType.RAW) self.assertIsNotNone(n_vdisk) mock_crt_vdisk.assert_called_once_with( self.adpt, self.v_uuid, self.vg_uuid, 'test2', 2, base_image='src', file_format=stor.FileFormatType.RAW) @mock.patch('pypowervm.tasks.storage.crt_vdisk') @mock.patch('pypowervm.tasks.storage._create_file') @mock.patch('pypowervm.tasks.storage._upload_stream_api') def test_upload_new_vdisk_func_remote(self, mock_usa, mock_crt_file, mock_crt_vdisk): """With FUNC and non-local, upload_new_vdisk uses REST API upload.""" mock_crt_file.return_value = mock.Mock(schema_type='File') n_vdisk, maybe_file = ts.upload_new_vdisk( self.adpt, 'v_uuid', 'vg_uuid', 'io_handle', 'd_name', 10, upload_type=ts.UploadType.FUNC, file_format=stor.FileFormatType.RAW) mock_crt_vdisk.assert_called_once_with( self.adpt, 'v_uuid', 'vg_uuid', 'd_name', 1.0, file_format=stor.FileFormatType.RAW) mock_crt_file.assert_called_once_with( self.adpt, 'd_name', vf.FileType.DISK_IMAGE, 'v_uuid', f_size=10, tdev_udid=mock_crt_vdisk.return_value.udid, sha_chksum=None) mock_usa.assert_called_once_with( mock_crt_file.return_value, 'io_handle', ts.UploadType.FUNC) mock_crt_file.return_value.adapter.delete.assert_called_once_with( vf.File.schema_type, root_id=mock_crt_file.return_value.uuid, service='web') self.assertEqual(mock_crt_vdisk.return_value, n_vdisk) self.assertIsNone(maybe_file) @mock.patch('pypowervm.tasks.storage._upload_stream_api') @mock.patch('pypowervm.tasks.storage._create_file') def test_upload_stream_via_stream_bld(self, mock_create_file, mock_upload_st): """Tests the uploads of a vDisk - via UploadType.IO_STREAM_BUILDER.""" mock_file = self._fake_meta() # Prove that COORDINATED is gone (uses API upload now) mock_file._enum_type(vf.FileType.DISK_IMAGE_COORDINATED) mock_create_file.return_value = mock_file mock_io_stream = mock.MagicMock() mock_io_handle = mock.MagicMock() mock_io_handle.return_value = mock_io_stream # Run the code ts._upload_stream(mock_file, mock_io_handle, ts.UploadType.IO_STREAM_BUILDER) # Make sure the function was called. mock_io_handle.assert_called_once_with() mock_upload_st.assert_called_once_with( mock_file, mock_io_stream, ts.UploadType.IO_STREAM) @mock.patch('pypowervm.tasks.storage._create_file') def test_upload_new_vdisk_failure(self, mock_create_file): """Tests the failure path for uploading of the virtual disks.""" # First need to load in the various test responses. vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt) vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt) self.adpt.read.return_value = vg_orig self.adpt.update_by_path.return_value = vg_post_crt mock_create_file.return_value = self._fake_meta() self.assertRaises(exc.Error, ts.upload_new_vdisk, self.adpt, self.v_uuid, self.vg_uuid, None, 'test3', 50) # Test cleanup failure self.adpt.delete.side_effect = exc.Error('Something bad') f_wrap = ts.upload_new_vdisk(self.adpt, self.v_uuid, self.vg_uuid, None, 'test2', 50, sha_chksum='abc123') self.adpt.delete.assert_called_once_with( 'File', service='web', root_id='6233b070-31cc-4b57-99bd-37f80e845de9') self.assertIsNotNone(f_wrap) @mock.patch('pypowervm.tasks.storage._create_file') @mock.patch('pypowervm.tasks.storage.crt_lu') def test_upload_new_lu(self, mock_crt_lu, mock_create_file): """Tests create/upload of SSP LU.""" # traits are already set to use the REST API upload ssp = mock.Mock(adapter=mock.Mock(traits=mock.Mock(local_api=True))) interim_lu = mock.Mock(adapter=self.adpt) mock_create_file.return_value = self._fake_meta() mock_crt_lu.return_value = ssp, interim_lu size_b = 1224067890 new_lu, f_wrap = ts.upload_new_lu( self.v_uuid, ssp, None, 'lu1', size_b, d_size=25, sha_chksum='abc123') # The LU created by crt_lu was returned self.assertEqual(interim_lu, new_lu) # crt_lu was called properly # 1224067890 / 1GB = 1.140002059; round up to 2dp mock_crt_lu.assert_called_with(ssp, 'lu1', 1.15, typ=stor.LUType.IMAGE) # Ensure the create file was called mock_create_file.assert_called_once_with( self.adpt, interim_lu.name, vf.FileType.DISK_IMAGE, self.v_uuid, f_size=size_b, tdev_udid=interim_lu.udid, sha_chksum='abc123') # Ensure cleanup was called after the upload self.adpt.delete.assert_called_once_with( 'File', service='web', root_id='6233b070-31cc-4b57-99bd-37f80e845de9') self.assertIsNone(f_wrap) @mock.patch('pypowervm.util.convert_bytes_to_gb') @mock.patch('pypowervm.tasks.storage.crt_lu') @mock.patch('pypowervm.tasks.storage.upload_lu') def test_upload_new_lu_calls(self, mock_upl, mock_crt, mock_b2g): """Various permutations of how to call upload_new_lu.""" mock_crt.return_value = 'ssp_out', 'new_lu' f_size = 10 # No optionals self.assertEqual(('new_lu', mock_upl.return_value), ts.upload_new_lu( 'v_uuid', 'ssp_in', 'd_stream', 'lu_name', f_size)) mock_b2g.assert_called_with(f_size, dp=2) mock_crt.assert_called_with('ssp_in', 'lu_name', mock_b2g.return_value, typ=stor.LUType.IMAGE) mock_upl.assert_called_with('v_uuid', 'new_lu', 'd_stream', f_size, sha_chksum=None, upload_type=ts.UploadType.IO_STREAM) mock_b2g.reset_mock() mock_crt.reset_mock() mock_upl.reset_mock() # d_size < f_size; sha_chksum specified self.assertEqual(('new_lu', mock_upl.return_value), ts.upload_new_lu( 'v_uuid', 'ssp_in', 'd_stream', 'lu_name', f_size, d_size=1, sha_chksum='sha_chksum')) mock_b2g.assert_called_with(10, dp=2) mock_crt.assert_called_with('ssp_in', 'lu_name', mock_b2g.return_value, typ=stor.LUType.IMAGE) mock_upl.assert_called_with('v_uuid', 'new_lu', 'd_stream', f_size, sha_chksum='sha_chksum', upload_type=ts.UploadType.IO_STREAM) mock_b2g.reset_mock() mock_crt.reset_mock() mock_upl.reset_mock() # d_size > f_size; return_ssp specified self.assertEqual(('ssp_out', 'new_lu', mock_upl.return_value), ts.upload_new_lu( 'v_uuid', 'ssp_in', 'd_stream', 'lu_name', f_size, d_size=100, return_ssp=True)) mock_b2g.assert_called_with(100, dp=2) mock_crt.assert_called_with('ssp_in', 'lu_name', mock_b2g.return_value, typ=stor.LUType.IMAGE) mock_upl.assert_called_with('v_uuid', 'new_lu', 'd_stream', f_size, sha_chksum=None, upload_type=ts.UploadType.IO_STREAM) @mock.patch('pypowervm.tasks.storage._create_file') @mock.patch('pypowervm.tasks.storage._upload_stream_api') def test_upload_lu_func_remote(self, mock_usa, mock_crt_file): """With FUNC and non-local, upload_lu uses REST API upload.""" lu = mock.Mock(adapter=self.adpt) self.assertIsNone(ts.upload_lu('v_uuid', lu, 'io_handle', 'f_size', upload_type=ts.UploadType.FUNC)) mock_crt_file.assert_called_once_with( lu.adapter, lu.name, vf.FileType.DISK_IMAGE, 'v_uuid', f_size='f_size', tdev_udid=lu.udid, sha_chksum=None) mock_usa.assert_called_once_with(mock_crt_file.return_value, 'io_handle', ts.UploadType.FUNC) @mock.patch('pypowervm.util.convert_bytes_to_gb') @mock.patch('pypowervm.tasks.storage.crt_lu') @mock.patch('pypowervm.tasks.storage.upload_lu') def test_upload_new_lu_calls_via_func(self, mock_upl, mock_crt, mock_b2g): """Various permutations of how to call upload_new_lu.""" mock_crt.return_value = 'ssp_out', 'new_lu' f_size = 10 # Successful call ssp_in = mock.Mock(adapter=mock.Mock(traits=mock.Mock(local_api=True))) self.assertEqual(('new_lu', mock_upl.return_value), ts.upload_new_lu( 'v_uuid', ssp_in, 'd_stream', 'lu_name', f_size, upload_type=ts.UploadType.FUNC)) mock_b2g.assert_called_with(f_size, dp=2) mock_crt.assert_called_with(ssp_in, 'lu_name', mock_b2g.return_value, typ=stor.LUType.IMAGE) mock_upl.assert_called_with('v_uuid', 'new_lu', 'd_stream', f_size, sha_chksum=None, upload_type=ts.UploadType.FUNC) def test_create_file(self): """Validates that the _create_file builds the Element properly.""" def validate_in(*args, **kwargs): # Validate that the element is built properly wrap = args[0] self.assertEqual('chk', wrap._get_val_str(vf._FILE_CHKSUM)) self.assertEqual(50, wrap.expected_file_size) self.assertEqual('f_name', wrap.file_name) self.assertEqual('application/octet-stream', wrap.internet_media_type) self.assertEqual('f_type', wrap.enum_type) self.assertEqual('v_uuid', wrap.vios_uuid) self.assertEqual('tdev_uuid', wrap.tdev_udid) ret = adp.Response('reqmethod', 'reqpath', 'status', 'reason', {}) ret.entry = ewrap.EntryWrapper._bld(self.adpt, tag='File').entry return ret self.adpt.create.side_effect = validate_in ts._create_file(self.adpt, 'f_name', 'f_type', 'v_uuid', 'chk', 50, 'tdev_uuid') self.assertTrue(self.adpt.create.called) def _fake_meta(self): """Returns a fake meta class for the _create_file mock.""" resp = tju.load_file(UPLOADED_FILE, self.adpt) return vf.File.wrap(resp) class TestVG(twrap.TestWrapper): file = VG_FEED wrapper_class_to_test = stor.VG def setUp(self): super(TestVG, self).setUp() # TestWrapper sets up the VG feed. self.mock_vg_get = self.useFixture(fixtures.MockPatch( 'pypowervm.wrappers.storage.VG.get')).mock self.mock_vg_get.return_value = self.entries # Need a VIOS feed too. self.vios_feed = vios.VIOS.wrap(tju.load_file(VIOS_FEED)) self.mock_vio_get = self.useFixture(fixtures.MockPatch( 'pypowervm.wrappers.virtual_io_server.VIOS.get')).mock self.mock_vio_get.return_value = self.vios_feed self.mock_vio_search = self.useFixture(fixtures.MockPatch( 'pypowervm.wrappers.virtual_io_server.VIOS.search')).mock def test_find_vg_all_vioses(self): ret_vio, ret_vg = ts.find_vg('adap', 'image_pool') self.assertEqual(self.vios_feed[0], ret_vio) self.assertEqual(self.entries[1], ret_vg) self.mock_vio_get.assert_called_once_with('adap') self.mock_vio_search.assert_not_called() self.mock_vg_get.assert_called_once_with( 'adap', parent=self.vios_feed[0]) def test_find_vg_specified_vios(self): self.mock_vio_search.return_value = self.vios_feed[1:] ret_vio, ret_vg = ts.find_vg( 'adap', 'image_pool', vios_name='nimbus-ch03-p2-vios1') self.assertEqual(self.vios_feed[1], ret_vio) self.assertEqual(self.entries[1], ret_vg) self.mock_vio_get.assert_not_called() self.mock_vio_search.assert_called_once_with( 'adap', name='nimbus-ch03-p2-vios1') self.mock_vg_get.assert_called_once_with( 'adap', parent=self.vios_feed[1]) def test_find_vg_no_vios(self): self.mock_vio_search.return_value = [] self.assertRaises(exc.VIOSNotFound, ts.find_vg, 'adap', 'n/a', vios_name='no_such_vios') self.mock_vio_get.assert_not_called() self.mock_vio_search.assert_called_once_with( 'adap', name='no_such_vios') self.mock_vg_get.assert_not_called() def test_find_vg_not_found(self): self.assertRaises(exc.VGNotFound, ts.find_vg, 'adap', 'n/a') self.mock_vio_get.assert_called_once_with('adap') self.mock_vio_search.assert_not_called() self.mock_vg_get.assert_has_calls([ mock.call('adap', parent=self.vios_feed[0]), mock.call('adap', parent=self.vios_feed[1])]) class TestVDisk(testtools.TestCase): def setUp(self): super(TestVDisk, self).setUp() self.adptfx = self.useFixture(fx.AdapterFx(traits=fx.RemotePVMTraits)) self.adpt = self.adptfx.adpt self.v_uuid = '14B854F7-42CE-4FF0-BD57-1D117054E701' self.vg_uuid = 'b6bdbf1f-eddf-3c81-8801-9859eb6fedcb' self.vg_resp = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt) @mock.patch('pypowervm.adapter.Adapter.update_by_path') @mock.patch('pypowervm.adapter.Adapter.read') def test_crt_vdisk(self, mock_read, mock_update): mock_read.return_value = self.vg_resp def _mock_update(*a, **kwa): vg_wrap = a[0] new_vdisk = vg_wrap.virtual_disks[-1] self.assertEqual('vdisk_name', new_vdisk.name) self.assertEqual(10, new_vdisk.capacity) return vg_wrap.entry mock_update.side_effect = _mock_update ret = ts.crt_vdisk( self.adpt, self.v_uuid, self.vg_uuid, 'vdisk_name', 10, file_format=stor.FileFormatType.RAW) self.assertEqual('vdisk_name', ret.name) self.assertEqual(10, ret.capacity) self.assertEqual(stor.FileFormatType.RAW, ret.file_format) def _mock_update_path(*a, **kwa): vg_wrap = a[0] vg_wrap.virtual_disks[-1].name = ('/path/to/' + vg_wrap.virtual_disks[-1].name) new_vdisk = vg_wrap.virtual_disks[-1] self.assertEqual('/path/to/vdisk_name2', new_vdisk.name) self.assertEqual(10, new_vdisk.capacity) return vg_wrap.entry mock_update.side_effect = _mock_update_path ret = ts.crt_vdisk( self.adpt, self.v_uuid, self.vg_uuid, 'vdisk_name2', 10, file_format=stor.FileFormatType.RAW) self.assertEqual('/path/to/vdisk_name2', ret.name) self.assertEqual(10, ret.capacity) self.assertEqual(stor.FileFormatType.RAW, ret.file_format) @mock.patch('pypowervm.wrappers.job.Job.run_job') @mock.patch('pypowervm.adapter.Adapter.read') def test_rescan_vstor(self, mock_adpt_read, mock_run_job): mock_vio = mock.Mock(adapter=None, uuid='vios_uuid') mock_vopt = mock.Mock(adapter=None, udid='stor_udid') mock_adpt_read.return_value = self.vg_resp def verify_run_job(vios_uuid, job_parms=None): self.assertEqual('vios_uuid', vios_uuid) self.assertEqual(1, len(job_parms)) job_parm = (b'' b'VirtualDiskUDID' b'stor_udid' b'') self.assertEqual(job_parm, job_parms[0].toxmlstring()) mock_run_job.side_effect = verify_run_job # Ensure that AdapterNotFound exception is raised correctly self.assertRaises( exc.AdapterNotFound, ts.rescan_vstor, mock_vio, mock_vopt) self.assertEqual(0, self.adpt.read.call_count) self.assertEqual(0, mock_run_job.call_count) # Add valid adapter mock_vio.adapter = self.adpt ts.rescan_vstor(mock_vio, mock_vopt) # Validate method invocations self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(1, mock_run_job.call_count) mock_vio = "vios_uuid" mock_vopt = "stor_udid" ts.rescan_vstor(mock_vio, mock_vopt, adapter=self.adpt) self.assertEqual(2, mock_run_job.call_count) class TestRMStorage(testtools.TestCase): def setUp(self): super(TestRMStorage, self).setUp() self.adptfx = self.useFixture(fx.AdapterFx(traits=fx.RemotePVMTraits)) self.adpt = self.adptfx.adpt self.v_uuid = '14B854F7-42CE-4FF0-BD57-1D117054E701' self.vg_uuid = 'b6bdbf1f-eddf-3c81-8801-9859eb6fedcb' self.vg_resp = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt) def test_rm_dev_by_udid(self): dev1 = mock.Mock(udid=None) # dev doesn't have a UDID with self.assertLogs(ts.__name__, 'WARNING'): self.assertIsNone(ts._rm_dev_by_udid(dev1, None)) dev1.toxmlstring.assert_called_with(pretty=True) # Remove from empty list returns None, and warns (like not-found) dev1.udid = 123 with self.assertLogs(ts.__name__, 'WARNING'): self.assertIsNone(ts._rm_dev_by_udid(dev1, [])) # Works when exact same dev is in the list, devlist = [dev1] self.assertEqual(dev1, ts._rm_dev_by_udid(dev1, devlist)) self.assertEqual([], devlist) # Works when matching-but-not-same dev is in the list. Return is the # one that was in the list, not the one that was passed in. devlist = [dev1] dev2 = mock.Mock(udid=123) # Two different mocks are not equal self.assertNotEqual(dev1, dev2) self.assertEqual(dev1, ts._rm_dev_by_udid(dev2, devlist)) self.assertEqual([], devlist) # Error when multiples found devlist = [dev1, dev2, dev1] self.assertRaises(exc.FoundDevMultipleTimes, ts._rm_dev_by_udid, dev1, devlist) # One more good path with a longer list dev3 = mock.Mock() dev4 = mock.Mock(udid=456) devlist = [dev3, dev2, dev4] self.assertEqual(dev2, ts._rm_dev_by_udid(dev1, devlist)) self.assertEqual([dev3, dev4], devlist) @mock.patch('pypowervm.adapter.Adapter.update_by_path') def test_rm_vdisks(self, mock_update): mock_update.return_value = self.vg_resp vg_wrap = stor.VG.wrap(self.vg_resp) # Remove a valid VDisk valid_vd = vg_wrap.virtual_disks[0] # Removal should hit. vg_wrap = ts.rm_vg_storage(vg_wrap, vdisks=[valid_vd]) # Update happens, by default self.assertEqual(1, mock_update.call_count) self.assertEqual(1, len(vg_wrap.virtual_disks)) self.assertNotEqual(valid_vd.udid, vg_wrap.virtual_disks[0].udid) # Bogus removal doesn't affect vg_wrap, and doesn't update. mock_update.reset_mock() invalid_vd = mock.Mock() invalid_vd.name = 'vdisk_name' invalid_vd.udid = 'vdisk_udid' vg_wrap = ts.rm_vg_storage(vg_wrap, vdisks=[invalid_vd]) # Update doesn't happen, because no changes self.assertEqual(0, mock_update.call_count) self.assertEqual(1, len(vg_wrap.virtual_disks)) # Valid (but sparse) removal; invalid is ignored. mock_update.reset_mock() valid_vd = mock.Mock() valid_vd.name = 'vdisk_name' valid_vd.udid = '0300f8d6de00004b000000014a54555cd9.3' vg_wrap = ts.rm_vg_storage(vg_wrap, vdisks=[valid_vd, invalid_vd]) self.assertEqual(1, mock_update.call_count) self.assertEqual(0, len(vg_wrap.virtual_disks)) @mock.patch('pypowervm.adapter.Adapter.update_by_path') def test_rm_vopts(self, mock_update): mock_update.return_value = self.vg_resp vg_wrap = stor.VG.wrap(self.vg_resp) repo = vg_wrap.vmedia_repos[0] # Remove a valid VOptMedia valid_vopt = repo.optical_media[0] # Removal should hit. vg_wrap = ts.rm_vg_storage(vg_wrap, vopts=[valid_vopt]) # Update happens, by default self.assertEqual(1, mock_update.call_count) repo = vg_wrap.vmedia_repos[0] self.assertEqual(2, len(repo.optical_media)) self.assertNotEqual(valid_vopt.udid, repo.optical_media[0].udid) self.assertNotEqual(valid_vopt.udid, repo.optical_media[1].udid) # Bogus removal doesn't affect vg_wrap, and doesn't update. mock_update.reset_mock() invalid_vopt = stor.VOptMedia.bld(self.adpt, 'bogus') mock_update.reset_mock() vg_wrap = ts.rm_vg_storage(vg_wrap, vopts=[invalid_vopt]) self.assertEqual(0, mock_update.call_count) self.assertEqual(2, len(vg_wrap.vmedia_repos[0].optical_media)) # Valid multiple removal mock_update.reset_mock() vg_wrap = ts.rm_vg_storage(vg_wrap, vopts=repo.optical_media[:]) self.assertEqual(1, mock_update.call_count) self.assertEqual(0, len(vg_wrap.vmedia_repos[0].optical_media)) class TestTier(testtools.TestCase): @mock.patch('pypowervm.wrappers.storage.Tier.search') def test_default_tier_for_ssp(self, mock_srch): ssp = mock.Mock() self.assertEqual(mock_srch.return_value, ts.default_tier_for_ssp(ssp)) mock_srch.assert_called_with(ssp.adapter, parent=ssp, is_default=True, one_result=True) mock_srch.return_value = None self.assertRaises(exc.NoDefaultTierFoundOnSSP, ts.default_tier_for_ssp, ssp) class TestLUEnt(twrap.TestWrapper): file = LU_FEED wrapper_class_to_test = stor.LUEnt def setUp(self): super(TestLUEnt, self).setUp() self.mock_feed_get = self.useFixture(fixtures.MockPatch( 'pypowervm.wrappers.storage.LUEnt.get')).mock self.mock_feed_get.return_value = self.entries self.tier = mock.Mock(spec=stor.Tier, get=mock.Mock( return_value=self.entries)) # Mock out each LUEnt's .delete so I can know I called the right ones. for luent in self.entries: luent.delete = mock.Mock() # This image LU... self.img_lu = self.entries[4] # ...backs these three linked clones self.clone1 = self.entries[9] self.clone2 = self.entries[11] self.clone3 = self.entries[21] self.orig_len = len(self.entries) def test_rm_tier_storage_errors(self): """Test rm_tier_storage ValueErrors.""" # Neither tier nor lufeed provided self.assertRaises(ValueError, ts.rm_tier_storage, self.entries) # Invalid lufeed provided self.assertRaises(ValueError, ts.rm_tier_storage, self.entries, lufeed=[1, 2]) # Same, even if tier provided self.assertRaises(ValueError, ts.rm_tier_storage, self.entries, tier=self.tier, lufeed=[1, 2]) @mock.patch('pypowervm.tasks.storage._rm_lus') def test_rm_tier_storage_feed_get(self, mock_rm_lus): """Verify rm_tier_storage does a feed GET if lufeed not provided.""" # Empty return from _rm_lus so the loop doesn't run mock_rm_lus.return_value = [] lus_to_rm = [mock.Mock()] ts.rm_tier_storage(lus_to_rm, tier=self.tier) self.mock_feed_get.assert_called_once_with(self.tier.adapter, parent=self.tier) mock_rm_lus.assert_called_once_with(self.entries, lus_to_rm, del_unused_images=True) self.mock_feed_get.reset_mock() mock_rm_lus.reset_mock() # Now ensure we don't do the feed get if a valid lufeed is provided. lufeed = [mock.Mock(spec=stor.LUEnt)] # Also test del_unused_images=False ts.rm_tier_storage(lus_to_rm, lufeed=lufeed, del_unused_images=False) self.mock_feed_get.assert_not_called() mock_rm_lus.assert_called_once_with(lufeed, lus_to_rm, del_unused_images=False) def test_rm_tier_storage1(self): """Verify rm_tier_storage removes what it oughtta.""" # Should be able to use either LUEnt or LU clone1 = stor.LU.bld(None, self.clone1.name, 1) clone1._udid(self.clone1.udid) # HttpError doesn't prevent everyone from deleting. clone1.side_effect = exc.HttpError(mock.Mock()) ts.rm_tier_storage([clone1, self.clone2], lufeed=self.entries) self.clone1.delete.assert_called_once_with() self.clone2.delete.assert_called_once_with() # Backing image should not be removed because clone3 still linked. So # final result should be just the two removed. self.assertEqual(self.orig_len - 2, len(self.entries)) # Now if we remove the last clone, the image LU should go too. ts.rm_tier_storage([self.clone3], lufeed=self.entries) self.clone3.delete.assert_called_once_with() self.img_lu.delete.assert_called_once_with() self.assertEqual(self.orig_len - 4, len(self.entries)) class TestLU(testtools.TestCase): def setUp(self): super(TestLU, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt self.adpt.update_by_path = _mock_update_by_path self.adpt.extend_path = lambda x, xag: x self.ssp = stor.SSP.bld(self.adpt, 'ssp1', []) for i in range(5): lu = stor.LU.bld(self.adpt, 'lu%d' % i, i+1) lu._udid('udid_' + lu.name) self.ssp.logical_units.append(lu) self.ssp.entry.properties = { 'links': {'SELF': ['/rest/api/uom/SharedStoragePool/123']}} self.ssp._etag = 'before' @mock.patch('pypowervm.wrappers.storage.LUEnt.bld') @mock.patch('pypowervm.wrappers.storage.Tier.search') def test_crt_lu(self, mock_tier_srch, mock_lu_bld): ssp = mock.Mock(spec=stor.SSP) tier = mock.Mock(spec=stor.Tier) def validate(ret, use_ssp, thin, typ, clone): self.assertEqual(ssp.refresh.return_value if use_ssp else tier, ret[0]) self.assertEqual(mock_lu_bld.return_value.create.return_value, ret[1]) if use_ssp: mock_tier_srch.assert_called_with( ssp.adapter, parent=ssp, is_default=True, one_result=True) mock_lu_bld.assert_called_with( ssp.adapter if use_ssp else tier.adapter, 'lu5', 10, thin=thin, typ=typ, clone=clone) mock_lu_bld.return_value.create.assert_called_with( parent=mock_tier_srch.return_value if use_ssp else tier) mock_lu_bld.reset_mock() # No optionals validate(ts.crt_lu(tier, 'lu5', 10), False, None, None, None) validate(ts.crt_lu(ssp, 'lu5', 10), True, None, None, None) # Thin validate(ts.crt_lu(tier, 'lu5', 10, thin=True), False, True, None, None) validate(ts.crt_lu(ssp, 'lu5', 10, thin=True), True, True, None, None) # Type validate(ts.crt_lu(tier, 'lu5', 10, typ=stor.LUType.IMAGE), False, None, stor.LUType.IMAGE, None) validate(ts.crt_lu(ssp, 'lu5', 10, typ=stor.LUType.IMAGE), True, None, stor.LUType.IMAGE, None) # Clone clone = mock.Mock(udid='cloned_from_udid') validate(ts.crt_lu(tier, 'lu5', 10, clone=clone), False, None, None, clone) validate(ts.crt_lu(ssp, 'lu5', 10, clone=clone), True, None, None, clone) # Exception path mock_tier_srch.return_value = None self.assertRaises(exc.NoDefaultTierFoundOnSSP, ts.crt_lu, ssp, '5', 10) # But that doesn't happen if specifying tier validate(ts.crt_lu(tier, 'lu5', 10), False, None, None, None) def test_rm_lu_by_lu(self): lu = self.ssp.logical_units[2] ssp = ts.rm_ssp_storage(self.ssp, [lu]) self.assertNotIn(lu, ssp.logical_units) self.assertEqual(ssp.etag, 'after') self.assertEqual(len(ssp.logical_units), 4) class TestLULinkedClone(testtools.TestCase): def setUp(self): super(TestLULinkedClone, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt self.adpt.update_by_path = _mock_update_by_path self.adpt.extend_path = lambda x, xag: x self.ssp = stor.SSP.bld(self.adpt, 'ssp1', []) # img_lu1 not cloned self.img_lu1 = self._mk_img_lu(1) self.ssp.logical_units.append(self.img_lu1) # img_lu2 has two clones self.img_lu2 = self._mk_img_lu(2) self.ssp.logical_units.append(self.img_lu2) self.dsk_lu3 = self._mk_dsk_lu(3, 2) self.ssp.logical_units.append(self.dsk_lu3) self.dsk_lu4 = self._mk_dsk_lu(4, 2) self.ssp.logical_units.append(self.dsk_lu4) # img_lu5 has one clone self.img_lu5 = self._mk_img_lu(5) self.ssp.logical_units.append(self.img_lu5) self.dsk_lu6 = self._mk_dsk_lu(6, 5) self.ssp.logical_units.append(self.dsk_lu6) self.dsk_lu_orphan = self._mk_dsk_lu(7, None) self.ssp.logical_units.append(self.dsk_lu_orphan) self.ssp.entry.properties = { 'links': {'SELF': ['/rest/api/uom/SharedStoragePool/123']}} self.ssp._etag = 'before' def _mk_img_lu(self, idx): lu = stor.LU.bld(self.adpt, 'img_lu%d' % idx, 123, typ=stor.LUType.IMAGE) lu._udid('xxabc123%d' % idx) return lu def _mk_dsk_lu(self, idx, cloned_from_idx): lu = stor.LU.bld(self.adpt, 'dsk_lu%d' % idx, 123, typ=stor.LUType.DISK) lu._udid('xxDisk-LU-UDID-%d' % idx) # Allow for "orphan" clones if cloned_from_idx is not None: lu._cloned_from_udid('yyabc123%d' % cloned_from_idx) return lu @mock.patch('warnings.warn') @mock.patch('pypowervm.tasks.storage.crt_lu') def test_crt_lu_linked_clone(self, mock_crt_lu, mock_warn): src_lu = self.ssp.logical_units[0] mock_crt_lu.return_value = ('ssp', 'dst_lu') self.assertEqual(('ssp', 'dst_lu'), ts.crt_lu_linked_clone( self.ssp, 'clust1', src_lu, 'linked_lu')) mock_crt_lu.assert_called_once_with( self.ssp, 'linked_lu', 0, thin=True, typ=stor.LUType.DISK, clone=src_lu) mock_warn.assert_called_once_with(mock.ANY, DeprecationWarning) def test_image_lu_in_use(self): # The orphan will trigger a warning as we cycle through all the LUs # without finding any backed by this image. with self.assertLogs(ts.__name__, 'WARNING'): self.assertFalse(ts._image_lu_in_use(self.ssp.logical_units, self.img_lu1)) self.assertTrue(ts._image_lu_in_use(self.ssp.logical_units, self.img_lu2)) def test_image_lu_for_clone(self): self.assertEqual(self.img_lu2, ts._image_lu_for_clone(self.ssp.logical_units, self.dsk_lu3)) self.dsk_lu3._cloned_from_udid(None) self.assertIsNone(ts._image_lu_for_clone(self.ssp.logical_units, self.dsk_lu3)) def test_rm_ssp_storage(self): lu_names = set(lu.name for lu in self.ssp.logical_units) # This one should remove the disk LU but *not* the image LU ssp = ts.rm_ssp_storage(self.ssp, [self.dsk_lu3], del_unused_images=False) lu_names.remove(self.dsk_lu3.name) self.assertEqual(lu_names, set(lu.name for lu in ssp.logical_units)) # This one should remove *both* the disk LU and the image LU ssp = ts.rm_ssp_storage(self.ssp, [self.dsk_lu4]) lu_names.remove(self.dsk_lu4.name) lu_names.remove(self.img_lu2.name) self.assertEqual(lu_names, set(lu.name for lu in ssp.logical_units)) # This one should remove the disk LU but *not* the image LU, even # though it's now unused. self.assertTrue(ts._image_lu_in_use(self.ssp.logical_units, self.img_lu5)) ssp = ts.rm_ssp_storage(self.ssp, [self.dsk_lu6], del_unused_images=False) lu_names.remove(self.dsk_lu6.name) self.assertEqual(lu_names, set(lu.name for lu in ssp.logical_units)) self.assertFalse(ts._image_lu_in_use(self.ssp.logical_units, self.img_lu5)) # No update if no change self.adpt.update_by_path = lambda *a, **k: self.fail() ssp = ts.rm_ssp_storage(self.ssp, [self.dsk_lu4]) class TestScrub(testtools.TestCase): """Two VIOSes in feed; no VFC mappings; no storage in VSCSI mappings.""" def setUp(self): super(TestScrub, self).setUp() adpt = self.useFixture(fx.AdapterFx()).adpt self.vio_feed = vios.VIOS.wrap(tju.load_file(VIOS_FEED, adpt)) self.txfx = self.useFixture(fx.FeedTaskFx(self.vio_feed)) self.logfx = self.useFixture(fx.LoggingFx()) self.ftsk = tx.FeedTask('scrub', self.vio_feed) @mock.patch('pypowervm.tasks.storage._RemoveStorage.execute') def test_no_matches(self, mock_rm_stg): """When removals have no hits, log debug messages, but no warnings.""" # Our data set has no VFC mappings and no VSCSI mappings with LPAR ID 1 ts.add_lpar_storage_scrub_tasks([1], self.ftsk, lpars_exist=True) self.ftsk.execute() self.assertEqual(0, self.logfx.patchers['warning'].mock.call_count) for vname in (vwrap.name for vwrap in self.vio_feed): self.logfx.patchers['debug'].mock.assert_any_call( mock.ANY, dict(stg_type='VSCSI', lpar_id=1, vios_name=vname)) self.logfx.patchers['debug'].mock.assert_any_call( mock.ANY, dict(stg_type='VFC', lpar_id=1, vios_name=vname)) self.assertEqual(0, self.txfx.patchers['update'].mock.call_count) self.assertEqual(1, mock_rm_stg.call_count) @mock.patch('pypowervm.tasks.vfc_mapper.remove_maps') def test_matches_warn(self, mock_rm_vfc_maps): """When removals hit, log warnings including the removal count.""" # Mock vfc remove_maps with a multi-element list to verify num_maps mock_rm_vfc_maps.return_value = [1, 2, 3] ts.add_lpar_storage_scrub_tasks([32], self.ftsk, lpars_exist=True) self.ftsk.execute() mock_rm_vfc_maps.assert_has_calls( [mock.call(wrp, 32) for wrp in self.vio_feed], any_order=True) for vname in (vwrap.name for vwrap in self.vio_feed): self.logfx.patchers['warning'].mock.assert_any_call( mock.ANY, dict(stg_type='VFC', num_maps=3, lpar_id=32, vios_name=vname)) self.logfx.patchers['warning'].mock.assert_any_call( mock.ANY, dict(stg_type='VSCSI', num_maps=1, lpar_id=32, vios_name='nimbus-ch03-p2-vios1')) self.logfx.patchers['debug'].mock.assert_any_call( mock.ANY, dict(stg_type='VSCSI', lpar_id=32, vios_name='nimbus-ch03-p2-vios2')) self.assertEqual(2, self.txfx.patchers['update'].mock.call_count) # By not mocking _RemoveStorage, prove it shorts out (the mapping for # LPAR ID 32 has no backing storage). @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.wrap') def test_multiple_removals(self, mock_wrap): # Pretend LPAR feed is "empty" so we don't skip any removals. mock_wrap.return_value = [] v1 = self.vio_feed[0] v2 = self.vio_feed[1] v1_map_count = len(v1.scsi_mappings) v2_map_count = len(v2.scsi_mappings) # Zero removals works ts.add_lpar_storage_scrub_tasks([], self.ftsk) self.ftsk.execute() self.assertEqual(0, self.txfx.patchers['update'].mock.call_count) # Removals for which no mappings exist ts.add_lpar_storage_scrub_tasks([71, 72, 76, 77], self.ftsk) self.ftsk.execute() self.assertEqual(0, self.txfx.patchers['update'].mock.call_count) # Remove some from each VIOS self.assertEqual(v1_map_count, len(v1.scsi_mappings)) self.assertEqual(v2_map_count, len(v2.scsi_mappings)) ts.add_lpar_storage_scrub_tasks([3, 37, 80, 7, 27, 85], self.ftsk) self.ftsk.execute() self.assertEqual(2, self.txfx.patchers['update'].mock.call_count) self.assertEqual(v1_map_count - 3, len(v1.scsi_mappings)) self.assertEqual(v2_map_count - 3, len(v2.scsi_mappings)) # Now make the LPAR feed hit some of the removals. They should be # skipped. self.txfx.patchers['update'].mock.reset_mock() v1_map_count = len(v1.scsi_mappings) v2_map_count = len(v2.scsi_mappings) mock_wrap.return_value = [mock.Mock(id=i) for i in (4, 5, 8, 11)] ts.add_lpar_storage_scrub_tasks([4, 5, 6, 8, 11, 12], self.ftsk) self.ftsk.execute() self.assertEqual(2, self.txfx.patchers['update'].mock.call_count) self.assertEqual(v1_map_count - 1, len(v1.scsi_mappings)) self.assertEqual(v2_map_count - 1, len(v2.scsi_mappings)) # Make sure the right ones were ignored v1_map_lids = [sm.server_adapter.lpar_id for sm in v1.scsi_mappings] v2_map_lids = [sm.server_adapter.lpar_id for sm in v2.scsi_mappings] self.assertIn(4, v1_map_lids) self.assertIn(5, v1_map_lids) self.assertIn(8, v2_map_lids) self.assertIn(11, v2_map_lids) # ...and the right ones were removed self.assertNotIn(6, v1_map_lids) self.assertNotIn(12, v2_map_lids) class TestScrub2(testtools.TestCase): """One VIOS in feed; VFC mappings; interesting VSCSI mappings.""" def setUp(self): super(TestScrub2, self).setUp() self.adpt = self.useFixture( fx.AdapterFx(traits=fx.RemotePVMTraits)).adpt self.vio_feed = [vios.VIOS.wrap(tju.load_file(VIOS_ENTRY, self.adpt))] self.txfx = self.useFixture(fx.FeedTaskFx(self.vio_feed)) self.logfx = self.useFixture(fx.LoggingFx()) self.ftsk = tx.FeedTask('scrub', self.vio_feed) @mock.patch('pypowervm.tasks.storage._rm_vdisks') @mock.patch('pypowervm.tasks.storage._rm_vopts') @mock.patch('pypowervm.tasks.storage._rm_lus') def test_lu_vopt_vdisk(self, mock_rm_lu, mock_rm_vopt, mock_rm_vd): def verify_rm_stg_call(exp_list): def _rm_stg(wrapper, stglist, *a, **k): self.assertEqual(len(exp_list), len(stglist)) for exp, act in zip(exp_list, stglist): self.assertEqual(exp.udid, act.udid) return _rm_stg warns = [mock.call( mock.ANY, {'stg_type': 'VSCSI', 'lpar_id': 3, 'num_maps': 3, 'vios_name': self.vio_feed[0].name})] # We should ignore the LUs... mock_rm_lu.side_effect = self.fail # ...but should emit a warning about ignoring them warns.append(mock.call( mock.ANY, {'stg_name': 'volume-boot-8246L1C_0604CAA-salsman66-00000004', 'stg_type': 'LogicalUnit'})) vorm = self.vio_feed[0].scsi_mappings[5].backing_storage mock_rm_vopt.side_effect = verify_rm_stg_call([vorm]) warns.append(mock.call( mock.ANY, {'vocount': 1, 'vios': self.vio_feed[0].name, 'volist' '': ["%s (%s)" % (vorm.name, vorm.udid)]})) vdrm = self.vio_feed[0].scsi_mappings[8].backing_storage mock_rm_vd.side_effect = verify_rm_stg_call([vdrm]) warns.append(mock.call( mock.ANY, {'vdcount': 1, 'vios': self.vio_feed[0].name, 'vdlist' '': ["%s (%s)" % (vdrm.name, vdrm.udid)]})) ts.add_lpar_storage_scrub_tasks([3], self.ftsk, lpars_exist=True) # LPAR ID 45 is not represented in the mappings. Test a) that it is # ignored, b) that we can have two separate LPAR storage scrub tasks # in the same FeedTask (no duplicate 'provides' names). ts.add_lpar_storage_scrub_tasks([45], self.ftsk, lpars_exist=True) self.ftsk.execute() self.assertEqual(2, mock_rm_vopt.call_count) self.assertEqual(2, mock_rm_vd.call_count) self.logfx.patchers['warning'].mock.assert_has_calls( warns, any_order=True) @mock.patch('pypowervm.tasks.storage._rm_vdisks') @mock.patch('pypowervm.tasks.storage._rm_vopts') @mock.patch('pypowervm.tasks.storage._rm_lus') def test_no_remove_storage(self, mock_rm_lu, mock_rm_vopt, mock_rm_vd): ts.add_lpar_storage_scrub_tasks([3], self.ftsk, lpars_exist=True, remove_storage=False) self.ftsk.execute() mock_rm_lu.assert_not_called() mock_rm_vopt.assert_not_called() mock_rm_vd.assert_not_called() @mock.patch('pypowervm.wrappers.logical_partition.LPAR.get') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') def test_find_stale_lpars(self, mock_vios, mock_lpar): mock_vios.return_value = self.vio_feed mock_lpar.return_value = lpar.LPAR.wrap( tju.load_file(LPAR_FEED, adapter=self.adpt)) self.assertEqual({55, 21}, set(ts.find_stale_lpars(self.vio_feed[0]))) class TestScrub3(testtools.TestCase): """One VIOS; lots of orphan VSCSI and VFC mappings.""" def setUp(self): super(TestScrub3, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt self.vio_feed = [vios.VIOS.wrap(tju.load_file(VIOS_ENTRY2, self.adpt))] self.txfx = self.useFixture(fx.FeedTaskFx(self.vio_feed)) self.logfx = self.useFixture(fx.LoggingFx()) self.ftsk = tx.FeedTask('scrub', self.vio_feed) @mock.patch('pypowervm.tasks.storage._rm_vopts') def test_orphan(self, mock_rm_vopts): """Scrub orphan VSCSI and VFC mappings.""" def validate_rm_vopts(vgwrap, vopts, **kwargs): # Two of the VSCSI mappings have storage; both are vopts self.assertEqual(2, len(vopts)) mock_rm_vopts.side_effect = validate_rm_vopts vwrap = self.vio_feed[0] # Save the "before" sizes of the mapping lists vscsi_len = len(vwrap.scsi_mappings) vfc_len = len(vwrap.vfc_mappings) ts.add_orphan_storage_scrub_tasks(self.ftsk) ret = self.ftsk.execute() # One for vscsi maps, one for vfc maps, one for vopt storage self.assertEqual(3, self.logfx.patchers['warning'].mock.call_count) # Pull out the WrapperTask returns from the (one) VIOS wtr = ret['wrapper_task_rets'].popitem()[1] vscsi_removals = wtr['vscsi_removals_orphans'] self.assertEqual(18, len(vscsi_removals)) # Removals are really orphans for srm in vscsi_removals: self.assertIsNone(srm.client_adapter) # The right number of maps remain. self.assertEqual(vscsi_len - 18, len(vwrap.scsi_mappings)) # Assert the "any" adapter still exists in the mappings. self.assertIn(stor.ANY_SLOT, [smp.server_adapter.lpar_slot_num for smp in vwrap.scsi_mappings]) # Remaining maps are not orphans. for smp in vwrap.scsi_mappings: if smp.server_adapter.lpar_slot_num != stor.ANY_SLOT: self.assertIsNotNone(smp.client_adapter) # _RemoveOrphanVfcMaps doesn't "provide", so the following are limited. # The right number of maps remain. self.assertEqual(vfc_len - 19, len(vwrap.vfc_mappings)) # Remaining maps are not orphans. for fmp in vwrap.vfc_mappings: self.assertIsNotNone(fmp.client_adapter) # POST was warranted. self.assertEqual(1, self.txfx.patchers['update'].mock.call_count) # _RemoveStorage invoked _rm_vopts self.assertEqual(1, mock_rm_vopts.call_count) @mock.patch('pypowervm.tasks.storage._rm_vdisks') @mock.patch('pypowervm.tasks.storage._rm_vopts') @mock.patch('pypowervm.tasks.storage.find_stale_lpars') @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.wrap') def test_comprehensive_scrub(self, mock_wrap, mock_stale_lids, mock_rm_vopts, mock_rm_vdisks): # Don't confuse the 'update' call count with the VG POST mock_rm_vopts.return_value = None mock_rm_vdisks.return_value = None # Three "stale" LPARs in addition to the orphans. These LPAR IDs are # represented in both VSCSI and VFC mappings. mock_stale_lids.return_value = [15, 18, 22] # Make sure all our "stale" lpars hit. mock_wrap.return_value = [] vwrap = self.vio_feed[0] # Save the "before" sizes of the mapping lists vscsi_len = len(vwrap.scsi_mappings) vfc_len = len(vwrap.vfc_mappings) ts.ComprehensiveScrub(self.adpt).execute() # The right number of maps remain. self.assertEqual(vscsi_len - 21, len(vwrap.scsi_mappings)) self.assertEqual(vfc_len - 22, len(vwrap.vfc_mappings)) self.assertEqual(1, self.txfx.patchers['update'].mock.call_count) self.assertEqual(1, mock_rm_vopts.call_count) self.assertEqual(1, mock_rm_vdisks.call_count) @staticmethod def count_maps_for_lpar(mappings, lpar_id): """Count the mappings whose client side is the specified LPAR ID. :param mappings: List of VFC or VSCSI mappings to search. :param lpar_id: The client LPAR ID to search for. :return: Integer - the number of mappings whose server_adapter.lpar_id matches the specified lpar_id. """ return len([1 for amap in mappings if amap.server_adapter.lpar_id == lpar_id]) def test_remove_portless_vfc_maps1(self): """Test _remove_portless_vfc_maps with no LPAR ID.""" vwrap = self.vio_feed[0] # Save the "before" size of the VFC mapping list vfc_len = len(vwrap.vfc_mappings) # Count our target LPARs' mappings before lpar24maps = self.count_maps_for_lpar(vwrap.vfc_mappings, 24) lpar124maps = self.count_maps_for_lpar(vwrap.vfc_mappings, 124) ts.ScrubPortlessVFCMaps(self.adpt).execute() # Overall two fewer maps self.assertEqual(vfc_len - 2, len(vwrap.vfc_mappings)) # ...and they were the right ones self.assertEqual(lpar24maps - 1, self.count_maps_for_lpar(vwrap.vfc_mappings, 24)) self.assertEqual(lpar124maps - 1, self.count_maps_for_lpar(vwrap.vfc_mappings, 124)) self.assertEqual(1, self.txfx.patchers['update'].mock.call_count) def test_remove_portless_vfc_maps2(self): """Test _remove_portless_vfc_maps specifying an LPAR ID.""" vwrap = self.vio_feed[0] # Save the "before" size of the VFC mapping list vfc_len = len(vwrap.vfc_mappings) # Count our target LPAR's mappings before lpar24maps = self.count_maps_for_lpar(vwrap.vfc_mappings, 24) ts.ScrubPortlessVFCMaps(self.adpt, lpar_id=24).execute() # Overall one map was scrubbed self.assertEqual(vfc_len - 1, len(vwrap.vfc_mappings)) # ...and it was the right one self.assertEqual(lpar24maps - 1, self.count_maps_for_lpar(vwrap.vfc_mappings, 24)) self.assertEqual(1, self.txfx.patchers['update'].mock.call_count) @mock.patch('pypowervm.tasks.storage._rm_vopts') @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.wrap') def test_orphans_by_lpar_id(self, mock_wrap, mock_rm_vopts): # Don't confuse the 'update' call count with the VG POST mock_rm_vopts.return_value = None mock_wrap.return_value = [] vwrap = self.vio_feed[0] # Save the "before" sizes of the mapping lists vscsi_len = len(vwrap.scsi_mappings) vfc_len = len(vwrap.vfc_mappings) # LPAR 24 has one orphan FC mapping, one portless FC mapping, one legit # FC mapping, and one orphan SCSI mapping (for a vopt). ts.ScrubOrphanStorageForLpar(self.adpt, 24).execute() # The right number of maps remain. self.assertEqual(vscsi_len - 1, len(vwrap.scsi_mappings)) self.assertEqual(vfc_len - 1, len(vwrap.vfc_mappings)) self.assertEqual(1, self.txfx.patchers['update'].mock.call_count) self.assertEqual(1, mock_rm_vopts.call_count) class TestScrub4(testtools.TestCase): """Novalink partition hosting storage for another VIOS partition""" def setUp(self): super(TestScrub4, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt self.vio_feed = vios.VIOS.wrap(tju.load_file(VIOS_FEED2, self.adpt)) self.txfx = self.useFixture(fx.FeedTaskFx(self.vio_feed)) self.logfx = self.useFixture(fx.LoggingFx()) self.ftsk = tx.FeedTask('scrub', [self.vio_feed[0]]) self.mock_lpar = self.useFixture( fixtures.MockPatch('pypowervm.tasks.storage.lpar.LPAR.get')).mock self.mock_vios = self.useFixture( fixtures.MockPatch('pypowervm.tasks.storage.vios.VIOS.get')).mock # Set default mock return values, these may be overridden per test self.mock_lpar.return_value = lpar.LPAR.wrap( tju.load_file(LPAR_FEED), self.adpt) self.mock_vios.return_value = self.vio_feed def test_find_stale_lpars_vios_only(self): self.mock_lpar.return_value = [] self.assertEqual({16, 102}, set(ts.find_stale_lpars(self.vio_feed[0]))) def test_find_stale_lpars_combined(self): self.assertEqual([102], ts.find_stale_lpars(self.vio_feed[0])) @mock.patch('pypowervm.tasks.storage._remove_lpar_maps') def test_orphan_scrub(self, mock_rm_lpar): def client_adapter_data(mappings): return {(smap.server_adapter.lpar_id, smap.server_adapter.lpar_slot_num) for smap in mappings} scsi_maps = client_adapter_data(self.vio_feed[0].scsi_mappings) vfc_maps = client_adapter_data(self.vio_feed[0].vfc_mappings) ts.ComprehensiveScrub(self.adpt).execute() # Assert that stale lpar detection works correctly # (LPAR 102 does not exist) mock_rm_lpar.assert_has_calls([ mock.call(self.vio_feed[0], [102], mock.ANY), mock.call(self.vio_feed[1], [], mock.ANY), mock.call(self.vio_feed[2], [], mock.ANY) ], any_order=True) # Assert that orphan detection removed the correct SCSI mapping # (VSCSI Mapping for VIOS 101, slot 17 has no client adapter) scsi_maps -= client_adapter_data(self.vio_feed[0].scsi_mappings) self.assertEqual({(101, 17)}, scsi_maps) # Assert that orphan detection removed the correct VFC mapping # (VFC Mapping for LP 100 slot 50 has no client adapter) vfc_maps -= client_adapter_data(self.vio_feed[0].vfc_mappings) self.assertEqual({(100, 50)}, vfc_maps) @mock.patch('pypowervm.tasks.storage._remove_lpar_maps') def test_add_lpar_storage_scrub_tasks(self, mock_rm_lpar): # Some of the IDs in "lpar_list" appear in the LPAR feed, # and others appear in the VIOS feed. # IDs in "stale_lpars" do not exist in either the LPAR or VIOS feed. lpar_list = [100, 101, 102, 55, 21, 4, 2, 16] stale_lpars = {102, 55, 21} ts.add_lpar_storage_scrub_tasks(lpar_list, self.ftsk, remove_storage=False) self.ftsk.execute() self.assertEqual(2, mock_rm_lpar.call_count) mock_rm_lpar.assert_has_calls([ mock.call(self.vio_feed[0], stale_lpars, 'VSCSI'), mock.call(self.vio_feed[0], stale_lpars, 'VFC') ], any_order=True) pypowervm-1.1.24/pypowervm/tests/tasks/test_network_bridger.py0000664000175000017500000007345613571367171024426 0ustar neoneo00000000000000# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from pypowervm import adapter as adpt import pypowervm.entities as ent from pypowervm import exceptions as pvm_exc from pypowervm.tasks import network_bridger as net_br import pypowervm.tests.test_fixtures as fx from pypowervm.tests.test_utils import pvmhttp from pypowervm.wrappers import managed_system as pvm_ms from pypowervm.wrappers import network as pvm_net MGR_NET_BR_FAILOVER_FILE = 'nbbr_network_bridge_failover.txt' MGR_NET_BR_FILE = 'nbbr_network_bridge.txt' MGR_NET_BR_PEER_FILE = 'nbbr_network_bridge_peer.txt' MGR_VNET_FILE = 'nbbr_virtual_network.txt' MGR_VSW_FILE = 'nbbr_virtual_switch.txt' ORPHAN_VIOS_FEED = 'fake_vios_feed.txt' ORPHAN_CNA_FEED = 'cna_feed.txt' class TestNetworkBridger(testtools.TestCase): """General tests for the Network Bridger superclass. Subclasses of Network Bridgers should extend this class. """ def setUp(self): super(TestNetworkBridger, self).setUp() self.adptfx = self.useFixture(fx.AdapterFx( traits=fx.LocalPVMTraits)) self.adpt = self.adptfx.adpt def resp(file_name): return pvmhttp.load_pvm_resp( file_name, adapter=self.adpt).get_response() self.mgr_nbr_resp = resp(MGR_NET_BR_FILE) self.mgr_nbr_fo_resp = resp(MGR_NET_BR_FAILOVER_FILE) self.mgr_nbr_peer_resp = resp(MGR_NET_BR_PEER_FILE) self.mgr_vnet_resp = resp(MGR_VNET_FILE) self.mgr_vsw_resp = resp(MGR_VSW_FILE) self.orphan_vio_resp = resp(ORPHAN_VIOS_FEED) self.orphan_cna_feed = resp(ORPHAN_CNA_FEED) self.host_uuid = 'c5d782c7-44e4-3086-ad15-b16fb039d63b' self.nb_uuid = 'b6a027a8-5c0b-3ac0-8547-b516f5ba6151' self.nb_uuid_peer = '9af89d52-5892-11e5-885d-feff819cdc9f' def test_ensure_vlan_on_nb(self): """This does a happy path test. Assumes VLAN on NB already. No subclass invocation. """ self.adpt.read.return_value = self.mgr_nbr_resp net_br.ensure_vlan_on_nb(self.adpt, self.host_uuid, self.nb_uuid, 2227) self.assertEqual(1, self.adpt.read.call_count) @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger' '._validate_orphan_on_ensure') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger' '._remove_vlan_from_nb_synch') def test_ensure_vlan_on_nb_wrong_peer(self, mock_remove, mock_orphan): """Test moving vlan from one peer to another. No subclass invocation. """ self.adpt.read.side_effect = [ self.mgr_nbr_peer_resp, self.mgr_vsw_resp, self.mgr_vnet_resp] def validate_of_update_nb(*kargs, **kwargs): # Validate args nb = kargs[0] self.assertEqual(self.nb_uuid, nb.uuid) return nb.entry self.adpt.update_by_path.side_effect = validate_of_update_nb net_br.ensure_vlan_on_nb(self.adpt, self.host_uuid, self.nb_uuid, 1001) mock_remove.assert_called_once_with( self.nb_uuid_peer, 1001, fail_if_pvid=True, existing_nbs=mock.ANY) def test_is_arbitrary_vid(self): nbs = pvm_net.NetBridge.wrap(self.mgr_nbr_resp) bridger = net_br.NetworkBridger(self.adpt, self.host_uuid) self.assertTrue(bridger._is_arbitrary_vid(4094, nbs)) self.assertFalse(bridger._is_arbitrary_vid(2227, nbs)) def test_find_new_arbitrary_vid(self): nbs = pvm_net.NetBridge.wrap(self.mgr_nbr_resp) bridger = net_br.NetworkBridger(self.adpt, self.host_uuid) self.assertEqual(4093, bridger._find_new_arbitrary_vid(nbs)) self.assertEqual(4092, bridger._find_new_arbitrary_vid(nbs, others=[4093])) def test_remove_vlan_from_nb_bad_vid(self): """Attempt to remove a VID that can't be taken off NB.""" # Mock Data self.adpt.read.return_value = self.mgr_nbr_resp # Should fail if fail_if_pvid set to True self.assertRaises(pvm_exc.PvidOfNetworkBridgeError, net_br.remove_vlan_from_nb, self.adpt, self.host_uuid, self.nb_uuid, 2227, True) # Should not fail if fail_if_pvid set to False, but shouldn't call # update either. net_br.remove_vlan_from_nb(self.adpt, self.host_uuid, self.nb_uuid, '2227') self.assertEqual(0, self.adpt.update.call_count) def _setup_reassign_arbitrary_vid(self): vsw_p = mock.patch('pypowervm.wrappers.network.VSwitch.search') self.mock_vsw = vsw_p.start() self.addCleanup(vsw_p.stop) vnet = pvm_net.VNet._bld(self.adpt).entry resp1 = adpt.Response('reqmethod', 'reqpath', 'status', 'reason', {}) resp1.feed = ent.Feed({}, [vnet]) self.adpt.read.return_value = resp1 self.adpt.read_by_href.return_value = vnet nb = pvm_net.NetBridge.wrap(self.mgr_nbr_resp)[0] resp2 = adpt.Response('reqmethod', 'reqpath', 'status', 'reason', {}) resp2.entry = nb.entry self.adpt.update.return_value = resp2 vsw = pvm_net.VSwitch.wrap(self.mgr_vsw_resp)[0] self.mock_vsw.return_value = vsw return nb def test_build_orphan_map(self): self.adpt.read.side_effect = [self.orphan_vio_resp] bridger = net_br.NetworkBridger(self.adpt, self.host_uuid) orphan_map = bridger._build_orphan_map() expected_map = { 0: {'nimbus-ch03-p2-vios2': {'ent11': [4092, 2018, 2019]}}, 1: {'nimbus-ch03-p2-vios2': {'ent12': [2800, 2801]}} } self.assertEqual(expected_map, orphan_map) def test_validate_orphan_on_ensure(self): """Tests the _validate_orphan_on_ensure method.""" self.adpt.read.side_effect = [self.orphan_vio_resp] bridger = net_br.NetworkBridger(self.adpt, self.host_uuid) # Test the Trunk Path - PVID and then an additional self.assertRaises( pvm_exc.OrphanVLANFoundOnProvision, bridger._validate_orphan_on_ensure, 4092, 0) self.assertRaises( pvm_exc.OrphanVLANFoundOnProvision, bridger._validate_orphan_on_ensure, 2018, 0) # Different vSwitch self.assertRaises( pvm_exc.OrphanVLANFoundOnProvision, bridger._validate_orphan_on_ensure, 2800, 1) # Shouldn't fail on a good vlan bridger._validate_orphan_on_ensure(2, 0) bridger._validate_orphan_on_ensure(1, 0) bridger._validate_orphan_on_ensure(4094, 1) def test_get_orphan_vlans(self): """Tests the _get_orphan_vlans method.""" self.adpt.read.side_effect = [self.orphan_vio_resp] bridger = net_br.NetworkBridger(self.adpt, self.host_uuid) self.assertListEqual([], bridger._get_orphan_vlans(2)) self.assertListEqual([2018, 2019, 4092], bridger._get_orphan_vlans(0)) self.assertListEqual([2800, 2801], bridger._get_orphan_vlans(1)) class TestNetworkBridgerVNet(TestNetworkBridger): """General tests for the network bridge super class and the VNet impl.""" def setUp(self): super(TestNetworkBridgerVNet, self).setUp() self.adptfx.set_traits(fx.RemoteHMCTraits) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_get_orphan_vlans') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_validate_orphan_on_ensure') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_reassign_arbitrary_vid') @mock.patch('pypowervm.wrappers.network.NetBridge.supports_vlan') def test_ensure_vlan_on_nb_reassign( self, mock_support_vlan, mock_reassign, mock_orphan_validate, mock_orphans, mock_lock): """Validates that after update, we support the VLAN.""" # Have the response self.adpt.read.return_value = self.mgr_nbr_resp # First call, say that we don't support the VLAN (which is true). # Second call, fake out that we now do. # Works in pairs, as there are two VLANs we're working through. mock_support_vlan.side_effect = [False, False, True, True] mock_orphans.return_value = [] # Invoke net_br.ensure_vlans_on_nb(self.adpt, self.host_uuid, self.nb_uuid, [4093, 4094]) self.assertEqual(2, self.adpt.read.call_count) self.assertEqual(1, mock_reassign.call_count) self.assertEqual(1, mock_lock.call_count) # Should be called re-assigning 4094 (old) to 4092. Shouldn't be # 4093 as that is also an additional VLAN. mock_reassign.assert_called_once_with(4094, 4092, mock.ANY) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_validate_orphan_on_ensure') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_find_or_create_vnet') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_is_arbitrary_vid') def test_ensure_vlan_on_nb_new_vlan(self, mock_arb_vid, mock_find_vnet, mock_orphan_validate, mock_lock): """Validates new VLAN on existing Load Group.""" # Build the responses self.adpt.read.side_effect = [self.mgr_nbr_resp, self.mgr_vsw_resp, self.mgr_vnet_resp] mock_arb_vid.return_value = False mock_vnet = mock.MagicMock() mock_vnet.related_href = 'fake_href' mock_find_vnet.return_value = mock_vnet def validate_of_update_nb(*kargs, **kwargs): # Validate args nb = kargs[0] self.assertIsNotNone(nb) self.assertEqual(1, len(nb.load_grps[0].vnet_uri_list)) self.assertEqual(2, len(nb.load_grps[1].vnet_uri_list)) self.assertEqual(self.nb_uuid, nb.uuid) return nb.entry self.adpt.update_by_path.side_effect = validate_of_update_nb # Invoke net_br.ensure_vlan_on_nb(self.adpt, self.host_uuid, self.nb_uuid, 2000) # Validate the calls self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, mock_lock.call_count) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_validate_orphan_on_ensure') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_find_or_create_vnet') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_is_arbitrary_vid') def test_ensure_vlans_on_nb_new_vlan(self, mock_arb_vid, mock_find_vnet, mock_orphan_validate, mock_lock): """Validates new VLAN on existing Load Group.""" # Build the responses self.adpt.read.side_effect = [self.mgr_nbr_resp, self.mgr_vsw_resp, self.mgr_vnet_resp] mock_arb_vid.return_value = False mock_vnet = mock.MagicMock() mock_vnet.related_href = 'fake_href' mock_find_vnet.return_value = mock_vnet def validate_of_update_nb(*kargs, **kwargs): # Validate args nb = kargs[0] self.assertEqual(1, len(nb.load_grps[0].vnet_uri_list)) self.assertEqual(2, len(nb.load_grps[1].vnet_uri_list)) self.assertEqual(self.nb_uuid, nb.uuid) return nb.entry self.adpt.update_by_path.side_effect = validate_of_update_nb # Invoke. VLAN 2227 should be on there already. net_br.ensure_vlans_on_nb(self.adpt, self.host_uuid, self.nb_uuid, [2227, 2000]) # Validate the calls self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, mock_lock.call_count) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_get_orphan_vlans') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_validate_orphan_on_ensure') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_find_or_create_vnet') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_find_available_ld_grp') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_is_arbitrary_vid') def test_ensure_vlan_on_nb_new_lg( self, mock_arb_vid, mock_avail_lg, mock_find_vnet, mock_orphan_validate, mock_orphan_vlans, mock_lock): """Validates new VLAN on new Load Group.""" # Build the responses self.adpt.read.side_effect = [self.mgr_nbr_resp, self.mgr_vsw_resp, self.mgr_vnet_resp] mock_arb_vid.return_value = False mock_avail_lg.return_value = None mock_orphan_vlans.return_value = [] # Make the fake virtual networks (the new, then the arb vid) mock_vnet = mock.MagicMock() mock_vnet.related_href = 'fake_href' mock_vnet_avid = mock.MagicMock() mock_vnet_avid.related_href = 'fake_avid_href' mock_find_vnet.side_effect = [mock_vnet, mock_vnet_avid] def validate_of_update_nb(*kargs, **kwargs): # Validate args nb = kargs[0] self.assertIsNotNone(nb) self.assertEqual(1, len(nb.load_grps[0].vnet_uri_list)) self.assertEqual(2, len(nb.load_grps[2].vnet_uri_list)) self.assertEqual(self.nb_uuid, nb.uuid) return nb.entry self.adpt.update_by_path.side_effect = validate_of_update_nb # Invoke net_br.ensure_vlan_on_nb(self.adpt, self.host_uuid, self.nb_uuid, 2000) # Validate the calls self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, mock_lock.call_count) @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_find_or_create_vnet') def test_reassign_arbitrary_vid(self, mock_find_vnet): nb = self._setup_reassign_arbitrary_vid() mock_find_vnet.return_value = mock.MagicMock() mock_find_vnet.return_value.related_href = 'other' # Make this function return itself. def return_self(*kargs, **kwargs): return kargs[0].entry self.adpt.update_by_path.side_effect = return_self bridger = net_br.NetworkBridgerVNET(self.adpt, self.host_uuid) bridger._reassign_arbitrary_vid(4094, 4093, nb) # Make sure the mocks were called self.mock_vsw.assert_called_with(self.adpt, parent_type=pvm_ms.System, parent_uuid=self.host_uuid, one_result=True, switch_id=0) self.assertEqual(1, mock_find_vnet.call_count) self.assertEqual(2, self.adpt.update_by_path.call_count) @mock.patch('oslo_concurrency.lockutils.lock') def test_remove_vlan_from_nb(self, mock_lock): """Happy path testing of the remove VLAN from NB.""" # Mock Data self.adpt.read.return_value = self.mgr_nbr_resp def validate_update(*kargs, **kwargs): # Make sure the load groups are down to just 1 now. nb = kargs[0] self.assertEqual(1, len(nb.load_grps)) return nb.entry self.adpt.update_by_path.side_effect = validate_update net_br.remove_vlan_from_nb(self.adpt, self.host_uuid, self.nb_uuid, 1000) self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertEqual(1, mock_lock.call_count) @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerVNET.' '_find_vnet_uri_from_lg') def test_remove_vlan_from_nb_lb(self, mock_find_vnet): """Validates a load balance leaves two total LoadGroups.""" # Mock Data mock_find_vnet.return_value = ( 'https://9.1.2.3:12443/rest/api/uom/ManagedSystem/' 'c5d782c7-44e4-3086-ad15-b16fb039d63b/VirtualNetwork/' 'e6c0be9f-b974-35f4-855e-2b7192034fae') net_bridge = pvm_net.NetBridge.wrap(self.mgr_nbr_resp)[0] net_bridge.load_balance = True # Run the remove bridger = net_br.NetworkBridgerVNET(self.adpt, self.host_uuid) bridger._remove_vlan_from_nb(net_bridge, 1000) # Validate that we still have two load groups self.assertEqual(2, len(net_bridge.load_grps)) self.assertEqual(0, len(net_bridge.load_grps[1].vnet_uri_list)) def test_find_or_create_vnet(self): """Validates that a vnet is created (and deleted) as part of find.""" # Load the data vnets = pvm_net.VNet.wrap(self.mgr_vnet_resp) vsw = pvm_net.VSwitch.wrap(self.mgr_vsw_resp)[0] # Set up the mock create resp = pvm_net.VNet.bld(self.adpt, 'FakeName', 4094, vsw.href, True) mock_resp = adpt.Response('rm', 'rp', 200, 'reason', {}) mock_resp.entry = resp.entry self.adpt.create.return_value = mock_resp # Run the code bridger = net_br.NetworkBridgerVNET(self.adpt, self.host_uuid) ret_val = bridger._find_or_create_vnet(vnets, 4094, vsw) # Equality check self.assertEqual(4094, ret_val.vlan) self.assertTrue(ret_val.tagged) # Make sure the delete was called self.assertEqual(1, self.adpt.delete_by_href.call_count) def test_find_available_lg(self): nb = pvm_net.NetBridge.wrap(self.mgr_nbr_resp)[0] bridger = net_br.NetworkBridgerVNET(self.adpt, self.host_uuid) lg = bridger._find_available_ld_grp(nb) self.assertIsNotNone(lg) def test_find_available_lg_load_balance(self): """Tests finding the Load Group with load balancing enabled.""" # Set load balancing to True nb = pvm_net.NetBridge.wrap(self.mgr_nbr_fo_resp)[0] nb.load_balance = True bridger = net_br.NetworkBridgerVNET(self.adpt, self.host_uuid) # Even though there is a free VEA, it should come back as None. This # is because there is only one free VEA, but we need to balance across # two. lg = bridger._find_available_ld_grp(nb) self.assertIsNone(lg) def test_find_available_min_lg(self): nb = mock.MagicMock() lg_main = mock.MagicMock() lg_first_addl = mock.MagicMock() lg_first_addl.vnet_uri_list = ['a', 'b', 'c'] lg_second_addl = mock.MagicMock() lg_second_addl.vnet_uri_list = ['e', 'f'] nb.load_grps = [lg_main, lg_first_addl, lg_second_addl] bridger = net_br.NetworkBridgerVNET(self.adpt, self.host_uuid) self.assertEqual(lg_second_addl, bridger._find_available_ld_grp(nb)) class TestNetworkBridgerTA(TestNetworkBridger): """General tests for the network bridge super class and the VNet impl.""" def setUp(self): super(TestNetworkBridgerTA, self).setUp() self.adptfx.set_traits(fx.LocalPVMTraits) @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_get_orphan_vlans') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_validate_orphan_on_ensure') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerTA.' '_reassign_arbitrary_vid') @mock.patch('pypowervm.wrappers.network.NetBridge.supports_vlan') def test_ensure_vlan_on_nb_reassign( self, mock_support_vlan, mock_reassign, mock_orphan_validate, mock_orphan_vlans): """Validates that after update, we support the VLAN.""" # Have the response self.adpt.read.return_value = self.mgr_nbr_resp # First call, say that we don't support the VLAN (which is true). # Second call, fake out that we now do. # Need pairs, as there are two VLANs we are passing in. mock_support_vlan.side_effect = [False, False, True, True] mock_orphan_vlans.return_value = [] # Invoke net_br.ensure_vlans_on_nb(self.adpt, self.host_uuid, self.nb_uuid, ['4093', 4094]) self.assertEqual(2, self.adpt.read.call_count) self.assertEqual(1, mock_reassign.call_count) # Should be called re-assigning 4094 (old) to 4092. Shouldn't be # 4093 as that is also an additional VLAN. mock_reassign.assert_called_once_with(4094, 4092, mock.ANY) @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_validate_orphan_on_ensure') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerTA.' '_is_arbitrary_vid') def test_ensure_vlan_on_nb_new_vlan(self, mock_arb_vid, mock_orphan_validate): """Validates new VLAN on existing Trunk Adapter.""" # Build the responses self.adpt.read.side_effect = [self.mgr_nbr_resp, self.mgr_vsw_resp, self.mgr_vnet_resp] mock_arb_vid.return_value = False def validate_of_update_nb(*kargs, **kwargs): # Validate args nb = kargs[0] self.assertIsNotNone(nb) self.assertEqual(0, len(nb.seas[0].primary_adpt.tagged_vlans)) self.assertEqual(2, len(nb.seas[0].addl_adpts[0].tagged_vlans)) self.assertEqual(self.nb_uuid, nb.uuid) return nb.entry self.adpt.update_by_path.side_effect = validate_of_update_nb # Invoke net_br.ensure_vlan_on_nb(self.adpt, self.host_uuid, self.nb_uuid, 2000) # Validate the calls self.assertEqual(1, self.adpt.update_by_path.call_count) @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_validate_orphan_on_ensure') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerTA.' '_is_arbitrary_vid') def test_ensure_vlans_on_nb_new_vlan(self, mock_arb_vid, mock_orphan_validate): """Validates new VLAN on existing Load Group.""" # Build the responses self.adpt.read.side_effect = [self.mgr_nbr_resp, self.mgr_vsw_resp, self.mgr_vnet_resp] mock_arb_vid.return_value = False def validate_of_update_nb(*kargs, **kwargs): # Validate args nb = kargs[0] self.assertEqual(0, len(nb.seas[0].primary_adpt.tagged_vlans)) self.assertEqual(2, len(nb.seas[0].addl_adpts[0].tagged_vlans)) self.assertEqual(self.nb_uuid, nb.uuid) return nb.entry self.adpt.update_by_path.side_effect = validate_of_update_nb # Invoke. VLAN 2227 should be on there already. net_br.ensure_vlans_on_nb(self.adpt, self.host_uuid, self.nb_uuid, [2227, 2000]) # Validate the calls self.assertEqual(1, self.adpt.update_by_path.call_count) @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_get_orphan_vlans') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridger.' '_validate_orphan_on_ensure') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerTA.' '_find_available_trunks') @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerTA.' '_is_arbitrary_vid') def test_ensure_vlan_on_nb_new_trunk( self, mock_arb_vid, mock_avail_trunks, mock_orphan_validate, mock_orphan_vlans): """Validates new VLAN on new Load Group.""" # Build the responses self.adpt.read.side_effect = [self.mgr_nbr_resp, self.mgr_vsw_resp, self.mgr_vnet_resp] mock_arb_vid.return_value = False mock_avail_trunks.return_value = None mock_orphan_vlans.return_value = [] def validate_of_update_nb(*kargs, **kwargs): # Validate args nb = kargs[0] self.assertIsNotNone(nb) self.assertEqual(0, len(nb.seas[0].primary_adpt.tagged_vlans)) self.assertEqual(2, len(nb.seas[0].addl_adpts)) self.assertEqual(self.nb_uuid, nb.uuid) return nb.entry self.adpt.update_by_path.side_effect = validate_of_update_nb # Invoke net_br.ensure_vlan_on_nb(self.adpt, self.host_uuid, self.nb_uuid, 2000) # Validate the calls self.assertEqual(1, self.adpt.update_by_path.call_count) def test_reassign_arbitrary_vid(self): nb = self._setup_reassign_arbitrary_vid() # Make this function return itself. def return_self(*kargs, **kwargs): nb_wrap = pvm_net.NetBridge.wrap(kargs[0].entry) self.assertEqual(4093, nb_wrap.seas[0].addl_adpts[0].pvid) return kargs[0].entry self.adpt.update_by_path.side_effect = return_self bridger = net_br.NetworkBridgerTA(self.adpt, self.host_uuid) bridger._reassign_arbitrary_vid(4094, 4093, nb) # Make sure the mocks were called. Only one update needed. self.assertEqual(1, self.adpt.update_by_path.call_count) def test_remove_vlan_from_nb(self): """Happy path testing of the remove VLAN from NB.""" # Mock Data self.adpt.read.return_value = self.mgr_nbr_resp def validate_update(*kargs, **kwargs): # Make sure the load groups are down to just 1 now. nb = kargs[0] self.assertEqual(0, len(nb.seas[0].addl_adpts)) return nb.entry self.adpt.update_by_path.side_effect = validate_update net_br.remove_vlan_from_nb(self.adpt, self.host_uuid, self.nb_uuid, 1000) self.assertEqual(1, self.adpt.update_by_path.call_count) def test_find_available_trunks(self): nb = pvm_net.NetBridge.wrap(self.mgr_nbr_resp)[0] bridger = net_br.NetworkBridgerTA(self.adpt, self.host_uuid) trunks = bridger._find_available_trunks(nb) self.assertIsNotNone(trunks) def test_find_available_trunks_load_balance(self): """Tests finding the trunk with load balancing enabled.""" # Set load balancing to True nb = pvm_net.NetBridge.wrap(self.mgr_nbr_fo_resp)[0] nb.load_balance = True bridger = net_br.NetworkBridgerTA(self.adpt, self.host_uuid) # Even though there is a free VEA, it should come back as None. This # is because there is only one free VEA, but we need to balance across # two. trunks = bridger._find_available_trunks(nb) self.assertIsNone(trunks) @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerTA._trunk_list') def test_find_available_min_trunk(self, mock_trunk_list): nb = mock.MagicMock() trunk_addl = mock.MagicMock() trunk_addl.tagged_vlans = ['a', 'b', 'c'] trunk_addl2 = mock.MagicMock() trunk_addl2.tagged_vlans = ['e', 'f'] trunk_addl3 = mock.MagicMock() trunk_addl3.tagged_vlans = ['g', 'h', 'i'] sea = mock.MagicMock() sea.addl_adpts = [trunk_addl, trunk_addl2, trunk_addl3] nb.seas = [sea] bridger = net_br.NetworkBridgerTA(self.adpt, self.host_uuid) bridger._find_available_trunks(nb) # Validate the trunk list is called with the second additional adapter mock_trunk_list.assert_called_with(nb, trunk_addl2) def test_find_peer_trunk(self): bridger = net_br.NetworkBridgerTA(self.adpt, self.host_uuid) # No failover, shouldn't have a peer nbs = pvm_net.NetBridge.wrap(self.mgr_nbr_resp) resp = bridger._find_peer_trunk(nbs[0], nbs[0].seas[0].primary_adpt) self.assertIsNone(resp) # Failover, should have a peer nbs = pvm_net.NetBridge.wrap(self.mgr_nbr_fo_resp) resp = bridger._find_peer_trunk(nbs[0], nbs[0].seas[0].primary_adpt) self.assertEqual(nbs[0].seas[1].primary_adpt, resp) @mock.patch('pypowervm.tasks.network_bridger.NetworkBridgerTA.' '_reassign_arbitrary_vid') def test_remove_vlan_from_nb_arb_vid(self, mock_reassign): """Attempt to remove an arbitrary VID off the network bridge.""" # Mock Data self.adpt.read.return_value = self.mgr_nbr_fo_resp # Run the remove of the VLAN. Make sure it is invoked with a new # valid arbitrary PVID. net_br.remove_vlan_from_nb(self.adpt, self.host_uuid, self.nb_uuid, '4094') self.assertEqual(1, mock_reassign.call_count) mock_reassign.assert_called_with(4094, 4093, mock.ANY) def test_remove_vlan_from_nb_lb(self): """Validates a load balance remove leaves an additional adpt.""" # Mock Data net_bridge = pvm_net.NetBridge.wrap(self.mgr_nbr_resp)[0] net_bridge.load_balance = True # Run the remove bridger = net_br.NetworkBridgerTA(self.adpt, self.host_uuid) bridger._remove_vlan_from_nb(net_bridge, 1000) # Validate that we left the trunk but no new additional VLANs self.assertEqual(1, len(net_bridge.seas[0].addl_adpts)) self.assertEqual(0, len(net_bridge.seas[0].addl_adpts[0].tagged_vlans)) pypowervm-1.1.24/pypowervm/tests/tasks/test_client_storage.py0000664000175000017500000000476413571367171024235 0ustar neoneo00000000000000# Copyright 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pypowervm.tasks import client_storage as clstor from pypowervm.tests.test_utils import test_wrapper_abc as twrap from pypowervm.wrappers import virtual_io_server as vios class TestClientStorage(twrap.TestWrapper): file = 'fake_vios_mappings.txt' wrapper_class_to_test = vios.VIOS def test_udid2scsi(self): """Test udid_to_scsi_mapping.""" maps = self.dwrap.scsi_mappings # 2nd mapping has no client adapter lpar_id = maps[1].server_adapter.lpar_id # Default: ignore orphan self.assertIsNone(clstor.udid_to_scsi_mapping( self.dwrap, maps[1].backing_storage.udid, lpar_id)) # Don't ignore orphan self.assertEqual(maps[1], clstor.udid_to_scsi_mapping( self.dwrap, maps[1].backing_storage.udid, lpar_id, ignore_orphan=False)) # Doesn't work if the LPAR ID is wrong self.assertIsNone(clstor.udid_to_scsi_mapping( self.dwrap, maps[1].backing_storage.udid, 123, ignore_orphan=False)) # 4th mapping has client adapter but no backing storage self.assertIsNone(clstor.udid_to_scsi_mapping(self.dwrap, 'bogus', 22)) def test_c_wwpn_to_vfc(self): """Test c_wwpn_to_vfc_mapping.""" # Since the first two VFC mappings have no client adapter, this test # proves we skip those properly. self.assertEqual(self.dwrap.vfc_mappings[5], clstor.c_wwpn_to_vfc_mapping( self.dwrap, 'C05076065A7C02E3')) # This works with (limited) craziness in the WWPN format self.assertEqual(self.dwrap.vfc_mappings[5], clstor.c_wwpn_to_vfc_mapping( self.dwrap, 'c0:50:76:06:5a:7c:02:e3')) # Not found self.assertIsNone(clstor.c_wwpn_to_vfc_mapping( self.dwrap, 'ab:cd:ef:01:23:45:67:89')) pypowervm-1.1.24/pypowervm/tests/tasks/test_vopt.py0000664000175000017500000001744513571367171022223 0ustar neoneo00000000000000# Copyright 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from pypowervm import exceptions as pvm_ex from pypowervm.tasks import vopt from pypowervm.tests import test_fixtures as pvm_fx class TestVOpt(testtools.TestCase): """Tests the vopt file.""" def setUp(self): super(TestVOpt, self).setUp() self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt # Wipe out the static variables, so that the re-validate is called vopt._cur_vios_uuid = None vopt._cur_vg_uuid = None @mock.patch('pypowervm.wrappers.storage.VG.get') @mock.patch('pypowervm.tasks.partition.get_active_vioses') def test_validate_vopt_vg1(self, mock_vios_get, mock_vg_get): """One VIOS, rootvg found; locals are set.""" # Init objects to test with mock_vg = mock.Mock() mock_vg.configure_mock(name='rootvg', uuid='1e46bbfd-73b6-3c2a-aeab-a1d3f065e92f', vmedia_repos=['repo']) mock_vg_get.return_value = [mock_vg] mock_vios = mock.Mock() mock_vios.configure_mock(name='the_vios', uuid='vios_uuid', rmc_state='active') mock_vios_get.return_value = [mock_vios] # Run vio_ret_uuid, vg_ret_uuid = vopt.validate_vopt_repo_exists(self.apt) self.assertEqual('vios_uuid', vio_ret_uuid) self.assertEqual('1e46bbfd-73b6-3c2a-aeab-a1d3f065e92f', vg_ret_uuid) # Validate self.assertEqual('1e46bbfd-73b6-3c2a-aeab-a1d3f065e92f', vopt._cur_vg_uuid) self.assertEqual('vios_uuid', vopt._cur_vios_uuid) @mock.patch('pypowervm.tasks.partition.get_active_vioses') @mock.patch('pypowervm.wrappers.storage.VG.get') @mock.patch('pypowervm.wrappers.storage.VMediaRepos.bld') def test_validate_vopt_vg2(self, mock_vmr_bld, mock_vg_get, mock_vios_get): """Dual VIOS, multiple VGs, repos on non-rootvg.""" vwrap1 = mock.Mock() vwrap1.configure_mock(name='vio1', rmc_state='active', uuid='vio_id1', is_mgmt_partition=False) vwrap2 = mock.Mock() vwrap2.configure_mock(name='vio2', rmc_state='active', uuid='vio_id2', is_mgmt_partition=False) mock_vios_get.return_value = [vwrap1, vwrap2] vg1 = mock.Mock() vg1.configure_mock(name='rootvg', vmedia_repos=[], uuid='vg1') vg2 = mock.Mock() vg2.configure_mock(name='other1vg', vmedia_repos=[], uuid='vg2') vg3 = mock.Mock() vg3.configure_mock(name='rootvg', vmedia_repos=[], uuid='vg3') vg4 = mock.Mock() vg4.configure_mock(name='other2vg', vmedia_repos=[1], uuid='vg4') # 1: Find the media repos on non-rootvg on the second VIOS mock_vg_get.side_effect = [[vg1, vg2], [vg3, vg4]] vio_ret_uuid, vg_ret_uuid = vopt.validate_vopt_repo_exists(self.apt) self.assertEqual('vio_id2', vio_ret_uuid) self.assertEqual('vg4', vg_ret_uuid) mock_vios_get.reset_mock() mock_vg_get.reset_mock() # 2: At this point, the statics are set. If we validate again, and the # VG.get returns the right one, we should bail out early. mock_vg_get.side_effect = None mock_vg_get.return_value = vg4 vio_ret_uuid, vg_ret_uuid = vopt.validate_vopt_repo_exists(self.apt) self.assertEqual('vio_id2', vio_ret_uuid) self.assertEqual('vg4', vg_ret_uuid) # Statics unchanged self.assertEqual('vg4', vopt._cur_vg_uuid) self.assertEqual('vio_id2', vopt._cur_vios_uuid) # We didn't have to query the VIOS mock_vios_get.assert_not_called() # We only did VG.get once self.assertEqual(1, mock_vg_get.call_count) mock_vg_get.reset_mock() # 3: Same again, but this time the repos is somewhere else. We should # find it. vg4.vmedia_repos = [] vg2.vmedia_repos = [1] # The first VG.get is looking for the already-set repos. The second # will be the feed from the first VIOS. There should be no third call, # since we should find the repos on VIOS 2. mock_vg_get.side_effect = [vg4, [vg1, vg2]] vio_ret_uuid, vg_ret_uuid = vopt.validate_vopt_repo_exists(self.apt) self.assertEqual('vio_id1', vio_ret_uuid) self.assertEqual('vg2', vg_ret_uuid) # And the static values self.assertEqual('vg2', vopt._cur_vg_uuid) self.assertEqual('vio_id1', vopt._cur_vios_uuid) mock_vg_get.reset_mock() mock_vios_get.reset_mock() # 4: No repository anywhere - need to create one. The default VG name # (rootvg) exists in multiple places. Ensure we create in the first # one, for efficiency. vg2.vmedia_repos = [] mock_vg_get.side_effect = [vg1, [vg1, vg2], [vg3, vg4]] vg1.update.return_value = vg1 vio_ret_uuid, vg_ret_uuid = vopt.validate_vopt_repo_exists(self.apt) self.assertEqual('vio_id1', vio_ret_uuid) self.assertEqual('vg1', vg_ret_uuid) self.assertEqual('vg1', vopt._cur_vg_uuid) self.assertEqual('vio_id1', vopt._cur_vios_uuid) self.assertEqual([mock_vmr_bld.return_value], vg1.vmedia_repos) mock_vg_get.reset_mock() mock_vios_get.reset_mock() vg1 = mock.MagicMock() # 5: No repos, need to create one. But not on the mgmt partition. vwrap1.configure_mock(name='vio1', rmc_state='active', uuid='vio_id1', is_mgmt_partition=True) vg3.vmedia_repos = [] mock_vg_get.side_effect = [vg1, [vg1, vg2], [vg3, vg4]] vg3.update.return_value = vg3 vio_ret_uuid, vg_ret_uuid = vopt.validate_vopt_repo_exists(self.apt) self.assertEqual('vio_id2', vio_ret_uuid) self.assertEqual('vg3', vg_ret_uuid) self.assertEqual('vg3', vopt._cur_vg_uuid) self.assertEqual('vio_id2', vopt._cur_vios_uuid) self.assertEqual([mock_vmr_bld.return_value], vg3.vmedia_repos) mock_vg_get.reset_mock() mock_vios_get.reset_mock() vg3 = mock.MagicMock() # 6: No repos, and a configured VG name that doesn't exist vwrap1.configure_mock(name='vio1', rmc_state='active', uuid='vio_id1', is_mgmt_partition=False) vg4.vmedia_repos = [] mock_vg_get.side_effect = [vg1, [vg1, vg2], [vg3, vg4]] self.assertRaises(pvm_ex.NoMediaRepoVolumeGroupFound, vopt.validate_vopt_repo_exists, self.apt, vopt_media_volume_group='mythicalvg') # 7: No repos - need to create. Make sure conf setting is honored. vg1.vmedia_repos = [] mock_vg_get.side_effect = [vg1, [vg1, vg2], [vg3, vg4]] vg4.update.return_value = vg4 vio_ret_uuid, vg_ret_uuid = vopt.validate_vopt_repo_exists( self.apt, vopt_media_volume_group='other2vg') self.assertEqual('vio_id2', vio_ret_uuid) self.assertEqual('vg4', vg_ret_uuid) self.assertEqual('vg4', vopt._cur_vg_uuid) self.assertEqual('vio_id2', vopt._cur_vios_uuid) self.assertEqual([mock_vmr_bld.return_value], vg4.vmedia_repos) vg1.update.assert_not_called() pypowervm-1.1.24/pypowervm/tests/tasks/test_power_opts.py0000664000175000017500000003367713571367171023441 0ustar neoneo00000000000000# Copyright 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import pypowervm.exceptions as exc import pypowervm.tasks.power_opts as popts import pypowervm.wrappers.base_partition as bp class TestPowerOpts(testtools.TestCase): def _test_enum(self, enum): """Validate that an enum class has a KEY and proper ALL_VALUES. :param enum: Enumeration class. """ # Get the public symbols in the enum syms = {sym for sym in dir(enum) if not sym.startswith('_')} # Must have a KEY self.assertIn('KEY', syms) self.assertIsNotNone(getattr(enum, 'KEY')) syms.remove('KEY') # Must have ALL_VALUES self.assertIn('ALL_VALUES', syms) syms.remove('ALL_VALUES') # ALL_VALUES must include all the values that aren't KEY or ALL_VALUES self.assertEqual({getattr(enum, sym) for sym in syms}, set(enum.ALL_VALUES)) def test_enums(self): self._test_enum(popts.IPLSrc) self._test_enum(popts.BootMode) self._test_enum(popts.KeylockPos) self._test_enum(popts.IBMiOperationType) self._test_enum(popts.PowerOffOperation) def test_remove_optical(self): knm = popts.RemoveOptical.KEY_NAME ktm = popts.RemoveOptical.KEY_TIME # Default time self.assertEqual({knm: 'name', ktm: 0}, popts.RemoveOptical.bld_map('name')) # Explicit time self.assertEqual({knm: 'name', ktm: 10}, popts.RemoveOptical.bld_map('name', time=10)) def test_power_on_opts(self): # Default init poo = popts.PowerOnOpts() self.assertEqual('PowerOn()', str(poo)) self.assertEqual('PowerOn', poo.JOB_SUFFIX) # Legacy add_parms init poo = popts.PowerOnOpts(legacy_add_parms=dict(foo=1, bar=2)) self.assertEqual('PowerOn(bar=2, foo=1)', str(poo)) # Carry those additional params forward to make sure they don't vanish # Enum validation for meth in ('bootmode', 'keylock_pos', 'ibmi_ipl_source', 'ibmi_op_type'): self.assertRaises(exc.InvalidEnumValue, getattr(poo, meth), 'foo') # Set specific (valid) values # Setter method returns the instance self.assertIs(poo, poo.bootmode(popts.BootMode.NORM)) self.assertEqual('PowerOn(bar=2, bootmode=norm, foo=1)', str(poo)) self.assertIs(poo, poo.keylock_pos(popts.KeylockPos.MANUAL)) self.assertEqual( 'PowerOn(bar=2, bootmode=norm, foo=1, keylock=manual)', str(poo)) self.assertIs(poo, poo.bootstring('canvas cord with aglet')) self.assertEqual( 'PowerOn(bar=2, bootmode=norm, bootstring=canvas cord with aglet, ' 'foo=1, keylock=manual)', str(poo)) # Make sure overwrite works self.assertIs(poo, poo.bootstring('sturdy shoelace')) self.assertEqual( 'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, ' 'keylock=manual)', str(poo)) self.assertIs(poo, poo.force()) self.assertEqual( 'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, ' 'force=true, keylock=manual)', str(poo)) # Turning off force gets rid of the key self.assertIs(poo, poo.force(value=False)) self.assertEqual( 'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, ' 'keylock=manual)', str(poo)) # Remove optical with default time self.assertIs(poo, poo.remove_optical('vopt')) self.assertEqual( 'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, ' 'keylock=manual, remove_optical_name=vopt, remove_optical_time=0)', str(poo)) # Remove optical with explicit time. Values are overwritten. self.assertIs(poo, poo.remove_optical('VOPT', time=5)) self.assertEqual( 'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, ' 'keylock=manual, remove_optical_name=VOPT, remove_optical_time=5)', str(poo)) self.assertIs(poo, poo.ibmi_ipl_source(popts.IPLSrc.A)) self.assertEqual( 'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, ' 'iIPLsource=a, keylock=manual, remove_optical_name=VOPT, ' 'remove_optical_time=5)', str(poo)) self.assertIs(poo, poo.ibmi_op_type(popts.IBMiOperationType.NETBOOT)) self.assertEqual( 'PowerOn(OperationType=netboot, bar=2, bootmode=norm, ' 'bootstring=sturdy shoelace, foo=1, iIPLsource=a, keylock=manual, ' 'remove_optical_name=VOPT, remove_optical_time=5)', str(poo)) # Netboot params. poo = popts.PowerOnOpts().ibmi_netboot_params( 'ip', 'serverip', 'gateway', 'serverdir') self.assertEqual( 'PowerOn(Gateway=gateway, IBMiImageServerDirectory=serverdir, ' 'IPAddress=ip, ServerIPAddress=serverip)', str(poo)) # Optional netboot params, and overwrites self.assertIs(poo, poo.ibmi_netboot_params( 'IP', 'ServerIP', 'Gateway', 'ServerDir', vlanid=2, mtu='mtu', duplex='duplex', connspeed=100, subnet='subnet')) self.assertEqual( 'PowerOn(ConnectionSpeed=100, DuplexMode=duplex, Gateway=Gateway, ' 'IBMiImageServerDirectory=ServerDir, IPAddress=IP, ' 'MaximumTransmissionUnit=mtu, ServerIPAddress=ServerIP, ' 'SubnetMask=subnet, VLANID=2)', str(poo)) def test_power_off_opts(self): # Can OS shutdown? ltyp = bp.LPARType rmcs = bp.RMCState for env, rmc, exp in ((ltyp.AIXLINUX, rmcs.ACTIVE, True), (ltyp.AIXLINUX, rmcs.BUSY, False), (ltyp.AIXLINUX, rmcs.INACTIVE, False), (ltyp.OS400, rmcs.ACTIVE, True), (ltyp.OS400, rmcs.BUSY, True), (ltyp.OS400, rmcs.INACTIVE, True), (ltyp.VIOS, rmcs.ACTIVE, True), (ltyp.VIOS, rmcs.BUSY, False), (ltyp.VIOS, rmcs.INACTIVE, False)): self.assertEqual(exp, popts.PowerOffOpts.can_os_shutdown( mock.Mock(env=env, rmc_state=rmc))) # Default init poo = popts.PowerOffOpts() self.assertEqual('PowerOff()', str(poo)) self.assertEqual('PowerOff', poo.JOB_SUFFIX) self.assertFalse(poo.is_param_set(popts.PowerOffOperation.KEY)) # Legacy add_parms init. Unknown keys are ignored. poo = popts.PowerOffOpts( legacy_add_parms=dict(operation='shutdown', foo=1, restart='true', bar=2, immediate='true')) self.assertEqual( 'PowerOff(immediate=true, operation=shutdown, restart=true)', str(poo)) self.assertTrue(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertFalse(poo.is_os) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # Now an "empty" one poo = popts.PowerOffOpts(legacy_add_parms=dict(foo=1, bar=2)) self.assertEqual('PowerOff()', str(poo)) self.assertFalse(poo.is_immediate) self.assertFalse(poo.is_restart) self.assertFalse(poo.is_os) self.assertFalse(poo.is_param_set(popts.PowerOffOperation.KEY)) # Immediate self.assertIs(poo, poo.immediate()) self.assertEqual('PowerOff(immediate=true)', str(poo)) self.assertTrue(poo.is_immediate) self.assertFalse(poo.is_restart) self.assertFalse(poo.is_os) self.assertFalse(poo.is_param_set(popts.PowerOffOperation.KEY)) # Restart self.assertIs(poo, poo.restart()) self.assertEqual( 'PowerOff(immediate=true, restart=true)', str(poo)) self.assertTrue(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertFalse(poo.is_os) self.assertFalse(poo.is_param_set(popts.PowerOffOperation.KEY)) # Operation self.assertIs(poo, poo.operation(popts.PowerOffOperation.DUMPRESTART)) self.assertEqual( 'PowerOff(immediate=true, operation=dumprestart, restart=true)', str(poo)) self.assertTrue(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertFalse(poo.is_os) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # OS shutdown self.assertIs(poo, poo.operation(popts.PowerOffOperation.OS)) self.assertEqual( 'PowerOff(immediate=true, operation=osshutdown, restart=true)', str(poo)) self.assertTrue(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_os) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # Booleans can be shut off self.assertIs(poo, poo.immediate(value=False)) self.assertEqual('PowerOff(operation=osshutdown, restart=true)', str(poo)) self.assertFalse(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_os) self.assertIs(poo, poo.restart(value=False)) self.assertEqual('PowerOff(operation=osshutdown)', str(poo)) self.assertFalse(poo.is_immediate) self.assertFalse(poo.is_restart) self.assertTrue(poo.is_os) # "Smart" methods. Make sure restart is preserved every time we change poo.restart() # OS immediate self.assertIs(poo, poo.os_immediate()) self.assertEqual('PowerOff(immediate=true, operation=osshutdown, ' 'restart=true)', str(poo)) self.assertTrue(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_os) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # OS normal (wipes out immediate) self.assertIs(poo, poo.os_normal()) self.assertEqual('PowerOff(operation=osshutdown, restart=true)', str(poo)) self.assertFalse(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_os) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # VSP hard self.assertIs(poo, poo.vsp_hard()) self.assertEqual('PowerOff(immediate=true, operation=shutdown, ' 'restart=true)', str(poo)) self.assertTrue(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertFalse(poo.is_os) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # VSP normal (wipes out immediate) self.assertIs(poo, poo.vsp_normal()) self.assertEqual('PowerOff(operation=shutdown, restart=true)', str(poo)) self.assertFalse(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertFalse(poo.is_os) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # Soft detect part = mock.Mock(env=ltyp.AIXLINUX, rmc_state=rmcs.ACTIVE) self.assertIs(poo, poo.soft_detect(part)) self.assertTrue(poo.is_os) self.assertTrue(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # Explicit normal shutdown self.assertIs(poo, poo.soft_detect(part, immed_if_os=False)) self.assertTrue(poo.is_os) self.assertFalse(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # Explicit immediate OS shutdown self.assertIs(poo, poo.soft_detect(part, immed_if_os=True)) self.assertTrue(poo.is_os) self.assertTrue(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # Can't OS shutdown part = mock.Mock(env=ltyp.VIOS, rmc_state=rmcs.BUSY) self.assertIs(poo, poo.soft_detect(part)) self.assertFalse(poo.is_os) self.assertFalse(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # immed_if_os ignored self.assertIs(poo, poo.soft_detect(part, immed_if_os=True)) self.assertFalse(poo.is_os) self.assertFalse(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertIs(poo, poo.soft_detect(part, immed_if_os=False)) self.assertFalse(poo.is_os) self.assertFalse(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertEqual('PowerOff(operation=shutdown, restart=true)', str(poo)) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # IBMi defaults to OS normal part = mock.Mock(env=ltyp.OS400, rmc_state=rmcs.INACTIVE) self.assertIs(poo, poo.soft_detect(part)) self.assertTrue(poo.is_os) self.assertFalse(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) # Explicit immediate self.assertIs(poo, poo.soft_detect(part, immed_if_os=True)) self.assertTrue(poo.is_os) self.assertTrue(poo.is_immediate) self.assertTrue(poo.is_restart) self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY)) pypowervm-1.1.24/pypowervm/tests/tasks/test_sriov.py0000775000175000017500000006205513571367171022375 0ustar neoneo00000000000000# Copyright 2016, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for pypowervm.tasks.sriov.""" import mock import testtools import pypowervm.exceptions as ex import pypowervm.tasks.sriov as tsriov import pypowervm.tests.test_fixtures as fx import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.wrappers.iocard as card import pypowervm.wrappers.managed_system as ms def fake_sriov(mode, state, sriov_adap_id, phys_ports): return mock.Mock(mode=mode, state=state, sriov_adap_id=sriov_adap_id, phys_loc_code='sriov_loc%d' % sriov_adap_id, phys_ports=phys_ports) def fake_pport(sriov_adap_id, port_id, cfg_lps, alloc_cap): return mock.Mock(sriov_adap_id=sriov_adap_id, port_id=port_id, loc_code='pport_loc%d' % port_id, min_granularity=float(port_id) / 1000, cfg_max_lps=20, cfg_lps=cfg_lps, allocated_capacity=alloc_cap, link_status=True) def good_sriov(sriov_adap_id, pports): return fake_sriov(card.SRIOVAdapterMode.SRIOV, card.SRIOVAdapterState.RUNNING, sriov_adap_id, pports) ded_sriov = fake_sriov(card.SRIOVAdapterMode.DEDICATED, None, 86, []) down_sriov = fake_sriov(card.SRIOVAdapterMode.SRIOV, card.SRIOVAdapterState.FAILED, 68, []) def sys_wrapper(sriovs, vnic_capable=True, vnic_failover_capable=True): mock_sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriovs)) def get_cap(cap): capabilities = { 'vnic_capable': vnic_capable, 'vnic_failover_capable': vnic_failover_capable} return capabilities[cap] mock_sys.get_capability.side_effect = get_cap return mock_sys class TestSriov(testtools.TestCase): def setUp(self): super(TestSriov, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt self.fake_sriovs = [ good_sriov(1, [fake_pport(1, pid, lps, cap) for pid, lps, cap in ( (11, 0, 0.95), (12, 9, 0.0), (13, 5, 0.03), (14, 20, 0.987))]), ded_sriov, good_sriov(2, [fake_pport(2, 21, 19, 0.3)]), down_sriov, good_sriov(3, []), good_sriov(4, [fake_pport(4, pid, 1, cap) for pid, cap in ( (41, 0.02), (42, 0.01))]), good_sriov(5, [fake_pport(5, pid, lps, cap) for pid, lps, cap in ( (51, 17, 0.49), (52, 3, 0.0), (53, 50, 0.05), (54, 11, 0.0), (55, 6, 0.4), (56, 13, 0.1), (57, 0, 0.15), (58, 7, 1.0))])] # Mark link status down on 5/55. self.fake_sriovs[6].phys_ports[4].link_status = False def test_get_good_sriovs(self): """Test _get_good_sriovs helper.""" sriovs = tsriov._get_good_sriovs(self.fake_sriovs) self.assertEqual(5, len(sriovs)) self.assertEqual(['sriov_loc%d' % x for x in range(1, 6)], [sriov.phys_loc_code for sriov in sriovs]) # Error case: none found. self.assertRaises(ex.NoRunningSharedSriovAdapters, tsriov._get_good_sriovs, [ded_sriov, down_sriov]) def test_get_good_pport_list(self): """Test _get_good_pport_list helper.""" def validate_pports(pports, ids): # List of phys locs self.assertSetEqual({'pport_loc%d' % x for x in ids}, {pport.loc_code for pport in pports}) # We added the appropriate sriov_adap_id for pport in pports: # Set up such that port ID xy always sits on adapter with ID x self.assertEqual(pport.port_id // 10, pport.sriov_adap_id) # Base case: no hits self.assertEqual([], tsriov._get_good_pport_list( self.fake_sriovs, ['nowt', 'to', 'see', 'here'], None, 0, False)) # Validate redundancy - same thing but with nonzero redundancy self.assertRaises( ex.InsufficientSRIOVCapacity, tsriov._get_good_pport_list, self.fake_sriovs, ['nothing', 'to', 'see', 'here'], None, 1, False) # Make sure we can get the ones we specify, that are actually there. pports = tsriov._get_good_pport_list( self.fake_sriovs, ['pport_loc%d' % x for x in { 51, 13, 68, 123, 21, 57, 42}], None, 4, False) validate_pports(pports, {42, 13, 57, 21, 51}) # Make sure we can filter by saturation (capacity/LPs). 14, 53, and 58 # should filter themselves - they're already too full for their # min_granularity and/or configured LPs. pports = tsriov._get_good_pport_list( self.fake_sriovs, ['pport_loc%d' % x for x in { 58, 52, 14, 11, 53}], None, 0, False) validate_pports(pports, (52, 11)) # Now specify capacity higher than 11 can handle - it should drop off pports = tsriov._get_good_pport_list( self.fake_sriovs, ['pport_loc%d' % x for x in { 58, 52, 14, 11, 53}], 0.06, 0, False) validate_pports(pports, {52}) # Filter link-down ports. 14, 53, and 58 don't appear because they're # saturated by capacity and/or configured LPs. 55 doesn't appear # because it's link-down. pports = tsriov._get_good_pport_list( self.fake_sriovs, ['pport_loc%d' % x for x in range(60)], None, 0, True) validate_pports(pports, {12, 52, 54, 42, 13, 41, 56, 57, 21, 51, 11}) @mock.patch('pypowervm.wrappers.managed_system.System.get') def test_check_sys_vnic_capabilities(self, mock_sys_get): sys_yes_yes = sys_wrapper(None) sys_yes_no = sys_wrapper(None, vnic_failover_capable=False) sys_no_yes = sys_wrapper(None, vnic_capable=False) sys_no_no = sys_wrapper(None, vnic_capable=False, vnic_failover_capable=False) # With sys param None, does a get; vnic & failover checks pass mock_sys_get.return_value = [sys_yes_yes] self.assertEqual(sys_yes_yes, tsriov._check_sys_vnic_capabilities('adap', None, 2)) mock_sys_get.assert_called_once_with('adap') mock_sys_get.reset_mock() # No get; vnic & !failover ok self.assertEqual( sys_yes_no, tsriov._check_sys_vnic_capabilities('adap', sys_yes_no, 1)) mock_sys_get.assert_not_called() # Same, but 0 is a valid min self.assertEqual( sys_yes_no, tsriov._check_sys_vnic_capabilities('adap', sys_yes_no, 0)) # vnic & !failover !ok self.assertRaises( ex.VNICFailoverNotSupportedSys, tsriov._check_sys_vnic_capabilities, 'adap', sys_yes_no, 2) # !vnic !ok even if failover ok (which would really never happen) self.assertRaises( ex.SystemNotVNICCapable, tsriov._check_sys_vnic_capabilities, 'adap', sys_no_yes, 2) # !vnic !failover !ok self.assertRaises( ex.SystemNotVNICCapable, tsriov._check_sys_vnic_capabilities, 'adap', sys_no_no, 1) @mock.patch('pypowervm.tasks.partition.get_active_vioses') def test_check_and_filter_vioses(self, mock_vioget): vios_yes_yes = mock.Mock(vnic_capable=True, vnic_failover_capable=True) vios_yes_no = mock.Mock(vnic_capable=True, vnic_failover_capable=False) vios_no_yes = mock.Mock(vnic_capable=False, vnic_failover_capable=True) vios_no_no = mock.Mock(vnic_capable=False, vnic_failover_capable=False) # No redundancy, no pre-seeded list. violist = [vios_yes_yes, vios_yes_no, vios_no_yes, vios_no_no] mock_vioget.return_value = violist # Because at least one VIOS was failover-capable, the non-capable one # is excluded. self.assertEqual([vios_yes_yes], tsriov._check_and_filter_vioses('adap', None, 1)) mock_vioget.assert_called_once_with('adap', xag=[], vios_wraps=None, find_min=1) mock_vioget.reset_mock() # With redundancy, pre-seeded list self.assertEqual([vios_yes_yes], tsriov._check_and_filter_vioses( 'adap', violist, 2)) mock_vioget.assert_called_once_with('adap', xag=[], vios_wraps=violist, find_min=1) # None capable violist = [vios_no_yes, vios_no_no] mock_vioget.return_value = violist self.assertRaises(ex.NoVNICCapableVIOSes, tsriov._check_and_filter_vioses, 'adap', None, 1) # None redundancy capable violist = [vios_yes_no, vios_no_yes, vios_no_no] mock_vioget.return_value = violist self.assertRaises(ex.VNICFailoverNotSupportedVIOS, tsriov._check_and_filter_vioses, 'adap', None, 2) @mock.patch('pypowervm.tasks.sriov._check_and_filter_vioses') def test_set_vnic_back_devs_max_less_than_capacity(self, mock_vioget): """Test set_vnic_back_devs with max capacity less than min capacity""" mock_sys = sys_wrapper(self.fake_sriovs) mock_vioget.return_value = [mock.Mock(uuid='vios_uuid1')] self.adpt.build_href.side_effect = lambda *a, **k: str(a[1]) vnic = card.VNIC.bld(self.adpt, pvid=5) self.assertEqual(0, len(vnic.back_devs)) # Should raise ValueError 'Maximum capacity cannot be less # than min capacity' self.assertRaises( ValueError, tsriov.set_vnic_back_devs, vnic, [], sys_w=mock_sys, max_capacity=0.1, capacity=0.5) @mock.patch('pypowervm.tasks.sriov._check_and_filter_vioses') def test_set_vnic_back_devs_max_greaterthan_100(self, mock_vioget): """Test set_vnic_back_devs with max capacity greater than 1""" mock_sys = sys_wrapper(self.fake_sriovs) mock_vioget.return_value = [mock.Mock(uuid='vios_uuid1')] self.adpt.build_href.side_effect = lambda *a, **k: '%s' % a[1] vnic = card.VNIC.bld(self.adpt, pvid=5) self.assertEqual(0, len(vnic.back_devs)) # Should raise ValueError 'Maximum capacity cannot be greater # than 100 percent' self.assertRaises( ValueError, tsriov.set_vnic_back_devs, vnic, [], max_capacity=1.1, sys_w=mock_sys) @mock.patch('pypowervm.tasks.sriov._check_and_filter_vioses') @mock.patch('random.shuffle') def test_set_vnic_back_devs(self, mock_shuffle, mock_vioget): """Test set_vnic_back_devs.""" mock_sys = sys_wrapper(self.fake_sriovs) mock_vioget.return_value = [mock.Mock(uuid='vios_uuid1'), mock.Mock(uuid='vios_uuid2'), mock.Mock(uuid='vios_uuid3')] self.adpt.build_href.side_effect = lambda *a, **k: '%s' % a[1] vnic = card.VNIC.bld(self.adpt, pvid=5) self.assertEqual(0, len(vnic.back_devs)) # Silly case: redundancy of zero tsriov.set_vnic_back_devs(vnic, [], sys_w=mock_sys, redundancy=0) self.assertEqual(0, len(vnic.back_devs)) mock_vioget.assert_called_once_with(self.adpt, None, 0) cap = 0.019 # Things to note about the following: # - VIOSes rotate. 1, 2, 3, repeat. If we hadn't mocked shuffle, the # base order would be random, but they would still rotate in whatever # that shuffled order was. # - The least-used (emptiest) physical ports come first... # - ...except (e.g. 21) we force distribution across cards, so... # - ...cards alternate until exhausted; hence 5 repeated at the end. # - Capacity set across the board according to the parameter. all_back_devs = [('vios_uuid1', 1, 12, cap), ('vios_uuid2', 5, 52, cap), ('vios_uuid3', 4, 42, cap), ('vios_uuid1', 2, 21, cap), ('vios_uuid2', 5, 54, cap), ('vios_uuid3', 4, 41, cap), ('vios_uuid1', 1, 13, cap), ('vios_uuid2', 5, 56, cap), ('vios_uuid3', 1, 11, cap), ('vios_uuid1', 5, 57, cap), ('vios_uuid2', 5, 55, cap), ('vios_uuid3', 5, 51, cap)] # 5/55 is link-down. When it drops off, the last one moves to VIOS 2. live_back_devs = all_back_devs[:10] + [('vios_uuid2', 5, 51, cap)] # Use 'em all tsriov.set_vnic_back_devs(vnic, ['pport_loc%d' % x for x in range(60)], sys_w=mock_sys, capacity=cap, redundancy=12) self.assertEqual(all_back_devs, [(bd.vios_href, bd.sriov_adap_id, bd.pport_id, bd.capacity) for bd in vnic.back_devs]) # Check port status - 55 drops off tsriov.set_vnic_back_devs(vnic, ['pport_loc%d' % x for x in range(60)], sys_w=mock_sys, capacity=cap, redundancy=11, check_port_status=True) self.assertEqual(live_back_devs, [(bd.vios_href, bd.sriov_adap_id, bd.pport_id, bd.capacity) for bd in vnic.back_devs]) # Fail if we can't satisfy redundancy self.assertRaises( ex.InsufficientSRIOVCapacity, tsriov.set_vnic_back_devs, vnic, ['pport_loc%d' % x for x in range(60)], sys_w=mock_sys, capacity=cap, redundancy=13) # The passed-in wrapper isn't modified if the method raises. self.assertEqual(live_back_devs, [(bd.vios_href, bd.sriov_adap_id, bd.pport_id, bd.capacity) for bd in vnic.back_devs]) # Make sure redundancy caps it. # By reusing vnic without resetting its back_devs, we're proving the # documented behavior that the method clears first. tsriov.set_vnic_back_devs(vnic, ['pport_loc%d' % x for x in range(60)], sys_w=mock_sys, capacity=cap, redundancy=5) self.assertEqual(all_back_devs[:5], [(bd.vios_href, bd.sriov_adap_id, bd.pport_id, bd.capacity) for bd in vnic.back_devs]) self.assertEqual(5, mock_shuffle.call_count) # When max capacity is not specified during set_vnic_back_devs, # max_capacity should be None self.assertEqual(None, vnic.back_devs[0].max_capacity) @mock.patch('pypowervm.tasks.sriov._check_and_filter_vioses') def test_set_vnic_back_devs_max_capacity_invoked(self, mock_vioget): mock_sys = sys_wrapper(self.fake_sriovs) mock_vioget.return_value = [mock.Mock(uuid='vios_uuid1')] self.adpt.build_href.side_effect = lambda *a, **k: '%s' % a[1] vnic = card.VNIC.bld(self.adpt, pvid=5) self.assertEqual(0, len(vnic.back_devs)) tsriov.set_vnic_back_devs(vnic, ['pport_loc%d' % x for x in range(60)], sys_w=mock_sys, capacity=0.02, redundancy=1, max_capacity=0.75) # Ensure that the max and min capacities attached to back_devices as # expected self.assertEqual(0.02, vnic.back_devs[0].capacity) self.assertEqual(0.75, vnic.back_devs[0].max_capacity) @mock.patch('pypowervm.wrappers.managed_system.System.get') def test_find_pports_for_portlabel(self, mock_sys_get): physnet = 'default' sriov_adaps = [ mock.Mock(phys_ports=[ mock.Mock(loc_code='port1', label='default'), mock.Mock(loc_code='port3', label='data1')]), mock.Mock(phys_ports=[ mock.Mock(loc_code='port4', label='data2'), mock.Mock(loc_code='port2', label='default')])] sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriov_adaps)) mock_sys_get.return_value = [sys] pports = tsriov.find_pports_for_portlabel(physnet, sriov_adaps) self.assertEqual({'port1', 'port2'}, {pport.loc_code for pport in pports}) @mock.patch('pypowervm.wrappers.managed_system.System.get') def test_find_pports_for_portlabel_blank(self, mock_sys_get): physnet = 'default' sriov_adaps = [ mock.Mock(phys_ports=[ mock.Mock(loc_code='port1', label=''), mock.Mock(loc_code='port3', label='data1')]), mock.Mock(phys_ports=[ mock.Mock(loc_code='port4', label='data2'), mock.Mock(loc_code='port2', label='')])] sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=sriov_adaps)) mock_sys_get.return_value = [sys] pports = tsriov.find_pports_for_portlabel(physnet, sriov_adaps) self.assertEqual({'port1', 'port2'}, {pport.loc_code for pport in pports}) class TestSafeUpdatePPort(testtools.TestCase): @mock.patch('pypowervm.tasks.partition.get_partitions') @mock.patch('pypowervm.wrappers.iocard.VNIC.get') def test_get_lpar_vnics(self, mock_vnics, mock_get_pars): lpars = ['lpar1', 'lpar2', 'lpar3'] mock_get_pars.return_value = lpars mock_vnics.side_effect = ['list1', 'list2', 'list3'] self.assertEqual({'lpar%d' % i: 'list%d' % i for i in (1, 2, 3)}, tsriov.get_lpar_vnics('adap')) mock_get_pars.assert_called_once_with('adap', lpars=True, vioses=False) for lpar in lpars: mock_vnics.assert_any_call('adap', parent=lpar) def test_vnics_using_pport(self): lpar1 = mock.Mock() lpar1.configure_mock(name='lpar1', uuid='lpar_uuid1') lpar2 = mock.Mock() lpar2.configure_mock(name='lpar2', uuid='lpar_uuid2') vnic1 = mock.Mock(uuid='vnic_uuid1', back_devs=[ mock.Mock(sriov_adap_id=1, pport_id=1), mock.Mock(sriov_adap_id=2, pport_id=2)]) vnic2 = mock.Mock(uuid='vnic_uuid2', back_devs=[ mock.Mock(sriov_adap_id=1, pport_id=2), mock.Mock(sriov_adap_id=2, pport_id=1)]) vnic3 = mock.Mock(uuid='vnic_uuid3', back_devs=[ mock.Mock(sriov_adap_id=3, pport_id=1), mock.Mock(sriov_adap_id=4, pport_id=2)]) vnic4 = mock.Mock(uuid='vnic_uuid4', back_devs=[ mock.Mock(sriov_adap_id=1, pport_id=2), mock.Mock(sriov_adap_id=4, pport_id=2)]) lpar2vnics = {lpar1: [vnic1, vnic2], lpar2: [vnic3, vnic4]} # Not in use self.assertEqual([], tsriov._vnics_using_pport(mock.Mock( sriov_adap_id=1, port_id=3, loc_code='not_used'), lpar2vnics)) # Used once ret = tsriov._vnics_using_pport(mock.Mock( sriov_adap_id=1, port_id=1, loc_code='loc1'), lpar2vnics) self.assertEqual(1, len(ret)) # loc1 backs vNIC for LPAR lpar1 (lpar_uuid1 / vnic_uuid1) self.assertIn('loc1', ret[0]) self.assertIn('lpar1', ret[0]) self.assertIn('lpar_uuid1', ret[0]) self.assertIn('vnic_uuid1', ret[0]) # Used twice ret = tsriov._vnics_using_pport(mock.Mock( sriov_adap_id=1, port_id=2, loc_code='loc2'), lpar2vnics) self.assertEqual(2, len(ret)) # Order of the return is not deterministic. Reverse if necessary if 'lpar1' not in ret[0]: ret = ret[::-1] # loc2 backs vNIC for LPAR lpar1 (lpar_uuid1 / vnic_uuid2) self.assertIn('loc2', ret[0]) self.assertIn('lpar1', ret[0]) self.assertIn('lpar_uuid1', ret[0]) self.assertIn('vnic_uuid2', ret[0]) # loc2 backs vNIC for LPAR lpar2 (lpar_uuid2 / vnic_uuid4) self.assertIn('loc2', ret[1]) self.assertIn('lpar2', ret[1]) self.assertIn('lpar_uuid2', ret[1]) self.assertIn('vnic_uuid4', ret[1]) @mock.patch('pypowervm.tasks.sriov.get_lpar_vnics') @mock.patch('pypowervm.tasks.sriov._vnics_using_pport') def test_vet_port_usage(self, mock_vup, mock_glv): label_index = {'loc1': 'pre_label1', 'loc2': '', 'loc3': 'pre_label3', 'loc4': 'pre_label4'} # No LPs pport1 = mock.Mock(cfg_lps=0, loc_code='loc1', label='post_label1') # Pre-label empty, but label changed pport2 = mock.Mock(loc_code='loc2', label='post_label2') # Pre-label matches post-label pport3 = mock.Mock(loc_code='loc3', label='pre_label3') # Label changed pport4 = mock.Mock(loc_code='loc4', label='post_label4') pport5 = mock.Mock(loc_code='loc3', label='post_label3') # PPorts that hit the first three criteria (no LPs, label originally # unset, label unchanged) don't trigger expensive get_lpar_vnics or # _vnics_using_pport. sriov1 = mock.Mock(phys_ports=[pport1, pport2]) sriov2 = mock.Mock(phys_ports=[pport3]) ret = tsriov._vet_port_usage( mock.Mock(asio_config=mock.Mock(sriov_adapters=[sriov1, sriov2])), label_index) self.assertEqual([], ret) mock_vup.assert_not_called() mock_glv.assert_not_called() # Multiple pports that pass the easy criteria; get_lpar_vnics only # called once. mock_vup.side_effect = [1], [2] sriov3 = mock.Mock(phys_ports=[pport4, pport5]) ret = tsriov._vet_port_usage(mock.Mock( adapter='adap', asio_config=mock.Mock( sriov_adapters=[sriov1, sriov3])), label_index) mock_glv.assert_called_once_with('adap') self.assertEqual([1, 2], ret) mock_vup.assert_has_calls([mock.call(pport4, mock_glv.return_value), mock.call(pport5, mock_glv.return_value)]) @mock.patch('pypowervm.tasks.sriov._vet_port_usage') @mock.patch('pypowervm.tasks.sriov.LOG.warning') @mock.patch('fasteners.lock.ReaderWriterLock.write_lock') @mock.patch('pypowervm.wrappers.managed_system.System.getter') def test_safe_update_pports(self, mock_getter, mock_lock, mock_warn, mock_vpu): mock_sys = mock.Mock(asio_config=mock.Mock(sriov_adapters=[ mock.Mock(phys_ports=[mock.Mock(loc_code='loc1', label='label1'), mock.Mock(loc_code='loc2', label='label2')]), mock.Mock(phys_ports=[ mock.Mock(loc_code='loc3', label='label3')])])) def changes_func(ret_bool): def changes(sys_w): mock_lock.assert_called() mock_lock.reset_mock() return ret_bool return changes # No force, no warnings, update requested mock_vpu.return_value = [] self.assertEqual(mock_sys.update.return_value, tsriov.safe_update_pports( mock_sys, changes_func(True))) mock_warn.assert_not_called() # No force, no in-use, no update, use a getter: runs but doesn't update mock_sys.update.reset_mock() self.assertEqual(mock_getter.return_value, tsriov.safe_update_pports( ms.System.getter('adap'), changes_func(False))) mock_warn.assert_not_called() mock_getter.return_value.update.assert_not_called() # Update requested, some in-use, no force - raises mock_vpu.reset_mock() mock_vpu.return_value = [1] self.assertRaises(ex.CantUpdatePPortsInUse, tsriov.safe_update_pports, mock_sys, changes_func(True), force=False) mock_warn.assert_not_called() mock_sys.update.assert_not_called() # Update requested, some in-use, force - runs & warns mock_vpu.reset_mock() mock_vpu.return_value = ['one', 'two'] self.assertEqual(mock_sys.update.return_value, tsriov.safe_update_pports( mock_sys, changes_func(True), force=True)) mock_warn.assert_has_calls([mock.call(mock.ANY), mock.call('one'), mock.call('two')]) class TestMisc(twrap.TestWrapper): file = 'sys_with_sriov.txt' wrapper_class_to_test = ms.System def test_find_pport(self): self.assertIsNone(tsriov.find_pport(self.dwrap, 'bogus')) pport = tsriov.find_pport(self.dwrap, 'U78C7.001.RCH0004-P1-C8-T2') self.assertEqual('U78C7.001.RCH0004-P1-C8', pport.sriov_adap.phys_loc_code) self.assertEqual(1, pport.sriov_adap_id) # It's a converged port... self.assertIsInstance(pport, card.SRIOVConvPPort) # ...which is also an ethernet port self.assertIsInstance(pport, card.SRIOVEthPPort) self.assertEqual('U78C7.001.RCH0004-P1-C8-T2', pport.loc_code) self.assertEqual(1, pport.port_id) pypowervm-1.1.24/pypowervm/tests/tasks/test_memory.py0000664000175000017500000001340713571367171022535 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import pypowervm.entities as ent from pypowervm.tasks import memory import pypowervm.tests.test_fixtures as fx from pypowervm.wrappers import job class TestMemory(testtools.TestCase): """Unit Tests for Memory tasks.""" def setUp(self): super(TestMemory, self).setUp() entry = ent.Entry({}, ent.Element('Dummy', None), None) self.mock_job = job.Job(entry) self.adpt = self.useFixture(fx.AdapterFx()).adpt @mock.patch('pypowervm.wrappers.job.Job.wrap') @mock.patch('pypowervm.wrappers.job.Job.run_job') @mock.patch('pypowervm.wrappers.job.Job.create_job_parameter') @mock.patch('pypowervm.wrappers.job.Job.get_job_results_as_dict') def test_calculate_memory_overhead_on_host(self, mock_job_dict_res, mock_job_p, mock_run_job, mock_job_w): """Performs a simple set of calculate_memory_overhead_on_host tests.""" def _reset_mocks(): mock_job_w.reset_mock() mock_job_p.reset_mock() mock_run_job.reset_mock() mock_job_dict_res.reset_mock() def raise_exc_se(): raise Exception mock_job_w.return_value = self.mock_job mock_host_uuid = '1234' args = ['ManagedSystem', mock_host_uuid] kwargs = {'suffix_type': 'do', 'suffix_parm': ('QueryReservedMemory' 'RequiredForPartition')} # test empty job results dictionary with defaults mock_job_dict_res.return_value = {'RequiredMemory': None, 'CurrentAvailableSystemMemory': None} overhead, avail = (memory. calculate_memory_overhead_on_host(self.adpt, mock_host_uuid)) self.adpt.read.assert_called_once_with(*args, **kwargs) self.assertEqual(1, mock_job_w.call_count) self.assertEqual(6, mock_job_p.call_count) self.assertEqual(1, mock_run_job.call_count) self.assertEqual(1, mock_job_dict_res.call_count) self.assertEqual(512, overhead) self.assertEqual(None, avail) _reset_mocks() # test with desired mem and non empty job results dict mock_job_dict_res.return_value = {'RequiredMemory': 1024, 'CurrentAvailableSystemMemory': 32768} reserved_mem_data = {'desired_mem': 768, 'num_virt_eth_adapters': 2} kwargs2 = {'reserved_mem_data': reserved_mem_data} overhead, avail = (memory. calculate_memory_overhead_on_host(self.adpt, mock_host_uuid, **kwargs2)) self.assertEqual(6, mock_job_p.call_count) self.assertEqual((1024-768), overhead) self.assertEqual(32768, avail) _reset_mocks() # test defaults when run_job fails mock_run_job.side_effect = raise_exc_se overhead, avail = (memory. calculate_memory_overhead_on_host(self.adpt, mock_host_uuid)) mock_job_p.assert_any_call('LogicalPartitionEnvironment', 'AIX/Linux') mock_job_p.assert_any_call('DesiredMemory', '512') mock_job_p.assert_any_call('MaximumMemory', '32768') mock_job_p.assert_any_call('NumberOfVirtualEthernetAdapter', '2') mock_job_p.assert_any_call('NumberOfVirtualSCSIAdapter', '1') mock_job_p.assert_any_call('NumberOfVirtualFibreChannelAdapter', '1') self.assertEqual(512, overhead) self.assertEqual(None, avail) self.assertEqual(0, mock_job_dict_res.call_count) _reset_mocks() # test reserved_mem_data values are created as job params reserved_mem_data = {'desired_mem': 2048, 'max_mem': 65536, 'lpar_env': 'OS400', 'num_virt_eth_adapters': 4, 'num_vscsi_adapters': 5, 'num_vfc_adapters': 6} kwargs3 = {'reserved_mem_data': reserved_mem_data} overhead, avail = (memory. calculate_memory_overhead_on_host(self.adpt, mock_host_uuid, **kwargs3)) mock_job_p.assert_any_call('LogicalPartitionEnvironment', 'OS400') mock_job_p.assert_any_call('DesiredMemory', '2048') mock_job_p.assert_any_call('MaximumMemory', '65536') mock_job_p.assert_any_call('NumberOfVirtualEthernetAdapter', '4') mock_job_p.assert_any_call('NumberOfVirtualSCSIAdapter', '5') mock_job_p.assert_any_call('NumberOfVirtualFibreChannelAdapter', '6') self.assertEqual(512, overhead) self.assertEqual(None, avail) pypowervm-1.1.24/pypowervm/tests/tasks/test_cna.py0000664000175000017500000005012013571367171021757 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from pypowervm import adapter as adp from pypowervm import exceptions as exc from pypowervm.tasks import cna from pypowervm.tests import test_fixtures as fx from pypowervm.tests.test_utils import test_wrapper_abc as twrap from pypowervm.wrappers import entry_wrapper as ewrap from pypowervm.wrappers import logical_partition as pvm_lpar from pypowervm.wrappers import network as pvm_net VSWITCH_FILE = 'fake_vswitch_feed.txt' VNET_FILE = 'fake_virtual_network_feed.txt' class TestCNA(twrap.TestWrapper): """Unit Tests for creating Client Network Adapters.""" mock_adapter_fx_args = {'traits': fx.RemoteHMCTraits} file = VSWITCH_FILE wrapper_class_to_test = pvm_net.VSwitch @mock.patch('pypowervm.tasks.cna._find_or_create_vnet') def test_crt_cna(self, mock_vnet_find): """Tests the creation of Client Network Adapters.""" # Create a side effect that can validate the input into the create # call. def validate_of_create(*kargs, **kwargs): self.assertIsNotNone(kargs[0]) self.assertEqual('LogicalPartition', kargs[1]) self.assertEqual('fake_lpar', kwargs.get('root_id')) self.assertEqual('ClientNetworkAdapter', kwargs.get('child_type')) return pvm_net.CNA.bld(self.adpt, 1, 'href').entry self.adpt.create.side_effect = validate_of_create self.adpt.read.return_value = self.resp n_cna = cna.crt_cna(self.adpt, None, 'fake_lpar', 5) self.assertIsNotNone(n_cna) self.assertIsInstance(n_cna, pvm_net.CNA) self.assertEqual(1, mock_vnet_find.call_count) @mock.patch('pypowervm.tasks.cna._find_or_create_vnet') def test_crt_cna_no_vnet_crt(self, mock_vnet_find): """Tests the creation of Client Network Adapters. The virtual network creation shouldn't be done in this flow. """ # PVMish Traits self.adptfx.set_traits(fx.LocalPVMTraits) self.adpt.read.return_value = self.resp # Create a side effect that can validate the input into the create # call. def validate_of_create(*kargs, **kwargs): self.assertIsNotNone(kargs[0]) self.assertEqual('LogicalPartition', kargs[1]) self.assertEqual('fake_lpar', kwargs.get('root_id')) self.assertEqual('ClientNetworkAdapter', kwargs.get('child_type')) return pvm_net.CNA.bld(self.adpt, 1, 'href').entry self.adpt.create.side_effect = validate_of_create n_cna = cna.crt_cna(self.adpt, None, 'fake_lpar', 5, slot_num=1) self.assertIsNotNone(n_cna) self.assertIsInstance(n_cna, pvm_net.CNA) self.assertEqual(0, mock_vnet_find.call_count) def test_find_or_create_vswitch(self): """Validates that a vswitch can be created.""" self.adpt.read.return_value = self.resp # Test that it finds the right vSwitch vswitch_w = cna._find_or_create_vswitch(self.adpt, 'ETHERNET0', True) self.assertIsNotNone(vswitch_w) # Create a side effect that can validate the input into the create call def validate_of_create(*kargs, **kwargs): self.assertIsNotNone(kargs[0]) # Is the vSwitch create self.assertEqual('ManagedSystem', kargs[1]) self.assertEqual('VirtualSwitch', kwargs.get('child_type')) # Return a previously created vSwitch... return self.dwrap.entry self.adpt.create.side_effect = validate_of_create # Test the create vswitch_w = cna._find_or_create_vswitch(self.adpt, 'Temp', True) self.assertIsNotNone(vswitch_w) self.assertTrue(self.adpt.create.called) # Make sure that if the create flag is set to false, an error is thrown # when the vswitch can't be found. self.assertRaises(exc.Error, cna._find_or_create_vswitch, self.adpt, 'Temp', False) class TestVNET(twrap.TestWrapper): mock_adapter_fx_args = {'traits': fx.RemoteHMCTraits} file = VNET_FILE wrapper_class_to_test = pvm_net.VNet def test_find_or_create_vnet(self): """Tests that the virtual network can be found/created.""" self.adpt.read.return_value = self.resp fake_vs = mock.Mock() fake_vs.switch_id = 0 fake_vs.name = 'ETHERNET0' fake_vs.related_href = ('https://9.1.2.3:12443/rest/api/uom/' 'ManagedSystem/' '67dca605-3923-34da-bd8f-26a378fc817f/' 'VirtualSwitch/' 'ec8aaa54-9837-3c23-a541-a4e4be3ae489') # This should find a vnet. vnet_resp = cna._find_or_create_vnet(self.adpt, '2227', fake_vs) self.assertIsNotNone(vnet_resp) # Now flip to a CNA that requires a create... resp = adp.Response('reqmethod', 'reqpath', 'status', 'reason', {}) resp.entry = ewrap.EntryWrapper._bld( self.adpt, tag='VirtualNetwork').entry self.adpt.create.return_value = resp vnet_resp = cna._find_or_create_vnet(self.adpt, '2228', fake_vs) self.assertIsNotNone(vnet_resp) self.assertEqual(1, self.adpt.create.call_count) def test_find_free_vlan(self): """Tests that a free VLAN can be found.""" self.adpt.read.return_value = self.resp # Mock data specific to the VNET File fake_vs = mock.Mock() fake_vs.name = 'ETHERNET0' fake_vs.related_href = ('https://9.1.2.3:12443/rest/api/uom/' 'ManagedSystem/' '67dca605-3923-34da-bd8f-26a378fc817f/' 'VirtualSwitch/' 'ec8aaa54-9837-3c23-a541-a4e4be3ae489') self.assertEqual(1, cna._find_free_vlan(self.adpt, fake_vs)) @mock.patch('pypowervm.wrappers.network.VNet.wrap') def test_find_free_vlan_mocked(self, mock_vnet_wrap): """Uses lots of mock data for a find vlan.""" self.adpt.read.return_value = mock.Mock() # Helper function to build the vnets. def build_mock_vnets(max_vlan, vswitch_uri): vnets = [] for x in range(1, max_vlan + 1): vnets.append(mock.Mock(vlan=x, associated_switch_uri=vswitch_uri)) return vnets mock_vswitch = mock.Mock(related_href='test_vs') # Test when all the vnet's are on a single switch. mock_vnet_wrap.return_value = build_mock_vnets(3000, 'test_vs') self.assertEqual(3001, cna._find_free_vlan(self.adpt, mock_vswitch)) # Test with multiple switches. The second vswitch with a higher vlan # should not impact the vswitch we're searching for. mock_vnet_wrap.return_value = (build_mock_vnets(2000, 'test_vs') + build_mock_vnets(4000, 'test_vs2')) self.assertEqual(2001, cna._find_free_vlan(self.adpt, mock_vswitch)) # Test when all the VLANs are consumed mock_vnet_wrap.return_value = build_mock_vnets(4094, 'test_vs') self.assertRaises(exc.Error, cna._find_free_vlan, self.adpt, mock_vswitch) @mock.patch('pypowervm.tasks.cna._find_free_vlan') def test_assign_free_vlan(self, mock_find_vlan): mock_find_vlan.return_value = 2016 mocked = mock.MagicMock() mock_cna = mock.MagicMock(pvid=31, enabled=False) mock_cna.update.return_value = mock_cna updated_cna = cna.assign_free_vlan(mocked, mocked, mocked, mock_cna) self.assertEqual(2016, updated_cna.pvid) self.assertEqual(mock_cna.enabled, updated_cna.enabled) updated_cna = cna.assign_free_vlan(mocked, mocked, mocked, mock_cna, ensure_enabled=True) self.assertEqual(True, updated_cna.enabled) @mock.patch('pypowervm.wrappers.network.CNA.bld') @mock.patch('pypowervm.tasks.cna._find_free_vlan') @mock.patch('pypowervm.tasks.cna._find_or_create_vswitch') @mock.patch('pypowervm.tasks.partition.get_partitions') def test_crt_p2p_cna( self, mock_get_partitions, mock_find_or_create_vswitch, mock_find_free_vlan, mock_cna_bld): """Tests the crt_p2p_cna.""" # Mock out the data mock_vswitch = mock.Mock(related_href='vswitch_href') mock_find_or_create_vswitch.return_value = mock_vswitch mock_find_free_vlan.return_value = 2050 # Mock the get of the VIOSes mock_vio1 = mock.Mock(uuid='src_io_host_uuid') mock_vio2 = mock.Mock(uuid='vios_uuid2') mock_get_partitions.return_value = [mock_vio1, mock_vio2] mock_cna = mock.MagicMock() mock_trunk1, mock_trunk2 = mock.MagicMock(pvid=2050), mock.MagicMock() mock_trunk1.create.return_value = mock_trunk1 mock_cna_bld.side_effect = [mock_trunk1, mock_trunk2, mock_cna] # Invoke the create mock_ext_ids = {'test': 'value', 'test2': 'value2'} client_adpt, trunk_adpts = cna.crt_p2p_cna( self.adpt, None, 'lpar_uuid', ['src_io_host_uuid', 'vios_uuid2'], mock_vswitch, crt_vswitch=True, slot_num=1, mac_addr='aabbccddeeff', ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450) # Make sure the client and trunk were 'built' mock_cna_bld.assert_any_call(self.adpt, 2050, 'vswitch_href', slot_num=1, mac_addr='aabbccddeeff') mock_cna_bld.assert_any_call( self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name=None, ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450) mock_cna_bld.assert_any_call( self.adpt, 2050, 'vswitch_href', trunk_pri=2, dev_name=None, ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450) # Make sure they were then created self.assertIsNotNone(client_adpt) self.assertEqual(2, len(trunk_adpts)) mock_cna.create.assert_called_once_with( parent_type=pvm_lpar.LPAR, parent_uuid='lpar_uuid') mock_trunk1.create.assert_called_once_with(parent=mock_vio1) mock_trunk2.create.assert_called_once_with(parent=mock_vio2) @mock.patch('pypowervm.wrappers.network.CNA.bld') @mock.patch('pypowervm.tasks.cna._find_free_vlan') @mock.patch('pypowervm.tasks.cna._find_or_create_vswitch') @mock.patch('pypowervm.tasks.partition.get_partitions') def test_crt_p2p_cna_single( self, mock_get_partitions, mock_find_or_create_vswitch, mock_find_free_vlan, mock_cna_bld): """Tests the crt_p2p_cna with the mgmt lpar and a dev_name.""" # Mock out the data mock_vswitch = mock.Mock(related_href='vswitch_href') mock_find_or_create_vswitch.return_value = mock_vswitch mock_find_free_vlan.return_value = 2050 # Mock the get of the VIOSes mock_vio1 = mock.Mock(uuid='mgmt_lpar_uuid') mock_vio2 = mock.Mock(uuid='vios_uuid2') mock_get_partitions.return_value = [mock_vio1, mock_vio2] mock_cna = mock.MagicMock() mock_trunk1 = mock.MagicMock(pvid=2050) mock_trunk1.create.return_value = mock_trunk1 mock_cna_bld.side_effect = [mock_trunk1, mock_cna] # Invoke the create client_adpt, trunk_adpts = cna.crt_p2p_cna( self.adpt, None, 'lpar_uuid', ['mgmt_lpar_uuid'], mock_vswitch, crt_vswitch=True, mac_addr='aabbccddeeff', dev_name='tap-12345') # Make sure the client and trunk were 'built' mock_cna_bld.assert_any_call(self.adpt, 2050, 'vswitch_href', mac_addr='aabbccddeeff', slot_num=None) mock_cna_bld.assert_any_call( self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name='tap-12345', ovs_bridge=None, ovs_ext_ids=None, configured_mtu=None) # Make sure they were then created self.assertIsNotNone(client_adpt) self.assertEqual(1, len(trunk_adpts)) mock_cna.create.assert_called_once_with( parent_type=pvm_lpar.LPAR, parent_uuid='lpar_uuid') mock_trunk1.create.assert_called_once_with(parent=mock_vio1) @mock.patch('pypowervm.wrappers.network.CNA.bld') @mock.patch('pypowervm.tasks.cna._find_free_vlan') @mock.patch('pypowervm.tasks.cna._find_or_create_vswitch') @mock.patch('pypowervm.tasks.partition.get_partitions') def test_crt_trunk_with_free_vlan( self, mock_get_partitions, mock_find_or_create_vswitch, mock_find_free_vlan, mock_cna_bld): """Tests the crt_trunk_with_free_vlan on mgmt based VIOS.""" # Mock out the data mock_vswitch = mock.Mock(related_href='vswitch_href') mock_find_or_create_vswitch.return_value = mock_vswitch mock_find_free_vlan.return_value = 2050 # Mock the get of the VIOSes. mock_vio1 = mock.Mock(uuid='vios_uuid1') mock_get_partitions.return_value = [mock_vio1] mock_trunk1 = mock.MagicMock(pvid=2050) mock_trunk1.create.return_value = mock_trunk1 mock_cna_bld.return_value = mock_trunk1 # Invoke the create mock_ext_id = {'test1': 'value1', 'test2': 'value2'} trunk_adpts = cna.crt_trunk_with_free_vlan( self.adpt, None, ['vios_uuid1'], mock_vswitch, crt_vswitch=True, dev_name='tap-12345', ovs_bridge='br-int', ovs_ext_ids=mock_ext_id, configured_mtu=1450) # Make sure the client and trunk were 'built' mock_cna_bld.assert_any_call( self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name='tap-12345', ovs_bridge='br-int', ovs_ext_ids=mock_ext_id, configured_mtu=1450) # Make sure that the trunk was created self.assertEqual(1, len(trunk_adpts)) mock_trunk1.create.assert_called_once_with(parent=mock_vio1) @mock.patch('pypowervm.wrappers.network.CNA.get') def test_find_trunk_on_lpar(self, mock_cna_get): parent_wrap = mock.MagicMock() m1 = mock.Mock(is_trunk=True, pvid=2, vswitch_id=2) m2 = mock.Mock(is_trunk=False, pvid=3, vswitch_id=2) m3 = mock.Mock(is_trunk=True, pvid=3, vswitch_id=1) m4 = mock.Mock(is_trunk=True, pvid=3, vswitch_id=2) mock_cna_get.return_value = [m1, m2, m3] self.assertIsNone(cna._find_trunk_on_lpar(self.adpt, parent_wrap, m4)) self.assertTrue(mock_cna_get.called) mock_cna_get.reset_mock() mock_cna_get.return_value = [m1, m2, m3, m4] self.assertEqual(m4, cna._find_trunk_on_lpar(self.adpt, parent_wrap, m4)) self.assertTrue(mock_cna_get.called) @mock.patch('pypowervm.tasks.cna._find_trunk_on_lpar') @mock.patch('pypowervm.tasks.partition.get_mgmt_partition') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') def test_find_trunks(self, mock_vios_get, mock_get_mgmt, mock_find_trunk): # Mocked responses can be simple, since they are just fed into the # _find_trunk_on_lpar mock_vios_get.return_value = [mock.MagicMock(), mock.MagicMock()] mock_get_mgmt.return_value = mock.MagicMock() # The responses back from the find trunk. Make it an odd trunk # priority ordering to make sure we sort properly v1 = mock.Mock(trunk_pri=3) c1, c2 = mock.Mock(trunk_pri=1), mock.Mock(trunk_pri=2) mock_find_trunk.side_effect = [v1, c1, c2] # Invoke the method. resp = cna.find_trunks(self.adpt, mock.Mock(pvid=2)) # Make sure four calls to the find trunk self.assertEqual(3, mock_find_trunk.call_count) # Order of the response is important. Should be based off of trunk # priority self.assertEqual([c1, c2, v1], resp) @mock.patch('pypowervm.wrappers.network.CNA.get') def test_find_all_trunks_on_lpar(self, mock_cna_get): parent_wrap = mock.MagicMock() m1 = mock.Mock(is_trunk=True, vswitch_id=2) m2 = mock.Mock(is_trunk=False, vswitch_id=2) m3 = mock.Mock(is_trunk=True, vswitch_id=1) m4 = mock.Mock(is_trunk=True, vswitch_id=2) mock_cna_get.return_value = [m1, m2, m3, m4] returnVal = [m1, m3, m4] self.assertEqual(returnVal, cna._find_all_trunks_on_lpar(self.adpt, parent_wrap)) mock_cna_get.reset_mock() mock_cna_get.return_value = [m1, m2, m3, m4] self.assertEqual([m3], cna._find_all_trunks_on_lpar(self.adpt, parent_wrap=parent_wrap, vswitch_id=1)) @mock.patch('pypowervm.wrappers.network.CNA.get') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') @mock.patch('pypowervm.wrappers.logical_partition.LPAR.get') def test_find_cna_wraps(self, mock_lpar_get, mock_vios_get, mock_cna_get): # Mocked responses are simple since they are only used for # pvm_net.CNA.get mock_lpar_get.return_value = [mock.MagicMock()] mock_vios_get.return_value = [mock.MagicMock()] # Mocked cna_wraps m1 = mock.Mock(uuid=2, pvid=2, vswitch_id=2) m2 = mock.Mock(uuid=3, pvid=1, vswitch_id=1) m3 = mock.Mock(uuid=1, pvid=1, vswitch_id=1) mock_cna_get.side_effect = [[m1, m2], [m3]] mock_trunk = mock.Mock(adapter=self.adpt, uuid=1, pvid=1, vswitch_id=1) self.assertEqual([m1, m2, m3], cna._find_cna_wraps(mock_trunk)) mock_cna_get.side_effect = [[m1, m2], [m3]] self.assertEqual([m2, m3], cna._find_cna_wraps(mock_trunk, 1)) @mock.patch('pypowervm.tasks.cna._find_cna_wraps') def test_find_cnas_on_trunk(self, mock_find_wraps): # Mocked cna_wraps m1 = mock.Mock(uuid=2, pvid=2, vswitch_id=2) m2 = mock.Mock(uuid=3, pvid=1, vswitch_id=1) m3 = mock.Mock(uuid=1, pvid=1, vswitch_id=1) mock_find_wraps.return_value = [m1, m2, m3] mock_trunk = mock.Mock(adapter=self.adpt, uuid=1, pvid=1, vswitch_id=1) self.assertEqual([m2], cna.find_cnas_on_trunk(mock_trunk)) mock_find_wraps.return_value = [m1, m3] self.assertEqual([], cna.find_cnas_on_trunk(mock_trunk)) mock_trunk = mock.Mock(adapter=self.adpt, uuid=3, pvid=3, vswitch_id=3) self.assertEqual([], cna.find_cnas_on_trunk(mock_trunk)) @mock.patch('pypowervm.tasks.cna._find_cna_wraps') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') @mock.patch('pypowervm.tasks.partition.get_mgmt_partition') @mock.patch('pypowervm.tasks.cna._find_all_trunks_on_lpar') @mock.patch('pypowervm.wrappers.network.VSwitch.search') def test_find_orphaned_trunks(self, mock_vswitch, mock_trunks, mock_get_mgmt, mock_vios_get, mock_wraps): mock_vswitch.return_value = mock.MagicMock(switch_id=1) mock_get_mgmt.return_value = mock.MagicMock() mock_vios_get.return_value = [mock.MagicMock()] # Mocked cna_wraps m1 = mock.Mock(is_trunk=True, uuid=2, pvid=2, vswitch_id=1) m2 = mock.Mock(is_trunk=False, uuid=3, pvid=3, vswitch_id=1) m3 = mock.Mock(is_trunk=True, uuid=1, pvid=1, vswitch_id=1) m4 = mock.Mock(is_trunk=False, uuid=4, pvid=1, vswitch_id=1) mock_wraps.return_value = [m1, m2, m3, m4] mock_trunks.side_effect = [[m1, m3], []] self.assertEqual([m1], cna.find_orphaned_trunks(self.adpt, mock.MagicMock)) pypowervm-1.1.24/pypowervm/tests/tasks/create_cluster.py0000664000175000017500000000463213571367171023172 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script attempts to create a real cluster on a real system using the # cluster_ssp module. Execute from project root directory as: # PYTHONPATH=. python pypowervm/tests/tasks/create_cluster.py import six import pypowervm.adapter as adp import pypowervm.exceptions as ex import pypowervm.tasks.cluster_ssp as cs import pypowervm.wrappers.cluster as clust import pypowervm.wrappers.storage as stor # >>>Replace the following with real values>>> HOST = '9.1.2.3' USER = 'hscroot' PASS = 'abc123' NODE_HOSTNAME = 'vios1.example.com' NODE_MTMS = '8247-22L*1234D0A' NODE_LPARID = 2 NODE_URI = ('https://9.1.2.3:12443/rest/api/uom/VirtualIOServer/' '58C9EB1D-7213-4956-A011-77D43CC4ACCC') REPOS_UDID = '01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAwMg==' REPOS_NAME = 'hdisk2' DATA1_UDID = '01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAwMw==' DATA1_NAME = 'hdisk3' DATA2_UDID = '01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAwNA==' DATA2_NAME = 'hdisk4' DATA3_UDID = '01M0lCTTIxNDUxMjQ2MDA1MDc2ODAyODI4NjFEODgwMDAwMDAwMDAwMDAwNQ==' DATA3_NAME = 'hdisk5' # <<3' '2.0false' 'OTHER' 'foo1' '60011223344556677' '1111223344556677ee000000000000' '0011223344556677' '1111223344556678ee000000000000' '0011223344556677' '1111223344556679ee000000000000' '0011223344556678' '1111223344556677ee000000000000' '0011223344556678' '1111223344556678ee000000000000' '0011223344556678' '1111223344556679ee000000000000' '') self.assertEqual(lua_xml, fc._lua_recovery_xml(all_itls, None, device_id='foo')) def test_process_lua_result_no_resp(self): result = {} status, dev_name, udid = fc._process_lua_result(result) self.assertIsNone(status) self.assertIsNone(dev_name) self.assertIsNone(udid) def test_process_lua_result_terse_resp(self): """Tests where valid XML is returned, but no device.""" xml = ('2.0' '') result = {'StdOut': xml} status, dev_name, udid = fc._process_lua_result(result) self.assertIsNone(status) self.assertIsNone(dev_name) self.assertIsNone(udid) def test_process_lua_result(self): xml = ('2.0' '2189' 'test texthdisk10' 'fake_uidfake_udid' '') result = {'StdOut': xml} status, dev_name, udid = fc._process_lua_result(result) self.assertEqual('8', status) self.assertEqual('hdisk10', dev_name) self.assertEqual('fake_udid', udid) # Repeat with the input as the resultXML result = {'OutputXML': xml} status, dev_name, udid = fc._process_lua_result(result) self.assertEqual('8', status) self.assertEqual('hdisk10', dev_name) self.assertEqual('fake_udid', udid) @mock.patch('pypowervm.tasks.hdisk._fc.LOG') def test_validate_lua_status(self, mock_log): """This tests the branches of validate_lua_status.""" fc._log_lua_status(fc.LUAStatus.DEVICE_AVAILABLE, 'dev_name', 'message') self.assertEqual(1, mock_log.info.call_count) fc._log_lua_status(fc.LUAStatus.FOUND_ITL_ERR, 'dev_name', 'message') self.assertEqual(1, mock_log.warning.call_count) fc._log_lua_status(fc.LUAStatus.DEVICE_IN_USE, 'dev_name', 'message') self.assertEqual(2, mock_log.warning.call_count) fc._log_lua_status(fc.LUAStatus.FOUND_DEVICE_UNKNOWN_UDID, 'dev_name', 'message') self.assertEqual(3, mock_log.warning.call_count) fc._log_lua_status(fc.LUAStatus.INCORRECT_ITL, 'dev_name', 'message') self.assertEqual(4, mock_log.warning.call_count) @mock.patch('pypowervm.tasks.hdisk._fc._process_lua_result') @mock.patch('pypowervm.wrappers.job.Job', new=mock.Mock()) @mock.patch('pypowervm.adapter.Adapter') def test_lua_recovery(self, mock_adapter, mock_lua_result): itls = [fc.ITL('AABBCCDDEEFF0011', '00:11:22:33:44:55:66:EE', 238)] mock_lua_result.return_value = ('OK', 'hdisk1', 'udid') status, devname, udid = fc.lua_recovery(mock_adapter, 'vios_uuid', itls) # Validate value unpack self.assertEqual('OK', status) self.assertEqual('hdisk1', devname) self.assertEqual('udid', udid) # Validate method invocations self.assertEqual(1, mock_adapter.read.call_count) self.assertEqual(1, mock_lua_result.call_count) @mock.patch('pypowervm.tasks.hdisk._fc._lua_recovery_xml') @mock.patch('pypowervm.tasks.hdisk._fc._process_lua_result') @mock.patch('pypowervm.wrappers.job.Job', new=mock.Mock()) @mock.patch('pypowervm.adapter.Adapter') def test_lua_recovery_dupe_itls(self, mock_adapter, mock_lua_result, mock_lua_xml): itls = [fc.ITL('AABBCCDDEEFF0011', '00:11:22:33:44:55:66:EE', 238), fc.ITL('AABBCCDDEEFF0011', '00:11:22:33:44:55:66:EE', 238)] mock_lua_result.return_value = ('OK', 'hdisk1', 'udid') status, devname, udid = fc.lua_recovery(mock_adapter, 'vios_uuid', itls, device_id='foo') # Validate value unpack self.assertEqual('OK', status) self.assertEqual('hdisk1', devname) self.assertEqual('udid', udid) # Validate method invocations self.assertEqual(1, mock_adapter.read.call_count) self.assertEqual(1, mock_lua_result.call_count) mock_lua_xml.assert_called_with({itls[0]}, mock_adapter, vendor='OTHER', device_id='foo') @mock.patch('pypowervm.tasks.hdisk._fc.lua_recovery') @mock.patch('pypowervm.utils.transaction.FeedTask') @mock.patch('pypowervm.tasks.storage.add_lpar_storage_scrub_tasks') @mock.patch('pypowervm.tasks.storage.find_stale_lpars') @mock.patch('pypowervm.wrappers.entry_wrapper.EntryWrapper.get', new=mock.Mock()) def test_discover_hdisk(self, mock_fsl, mock_alsst, mock_ftsk, mock_luar): def set_luar_side_effect(_stat, _dev): """Set up the lua_recovery mock's side effect. The second return will always be the same - used to verify that we really called twice when appropriate. The first return will be (_stat, _dev, "udid"), per the params. """ mock_luar.reset_mock() mock_luar.side_effect = [(_stat, _dev, 'udid'), ('ok_s', 'ok_h', 'ok_u')] stale_lpar_ids = [12, 34] # All of these should cause a scrub-and-retry retry_rets = [(None, None), (fc.LUAStatus.DEVICE_AVAILABLE, None), (fc.LUAStatus.FOUND_DEVICE_UNKNOWN_UDID, 'hdisk456')] # These should *not* cause a scrub-and-retry no_retry_rets = [(fc.LUAStatus.DEVICE_AVAILABLE, 'hdisk456'), (fc.LUAStatus.FOUND_ITL_ERR, 'hdisk456'), (fc.LUAStatus.DEVICE_IN_USE, 'hdisk456')] mock_fsl.return_value = stale_lpar_ids for st, dev in retry_rets: set_luar_side_effect(st, dev) self.assertEqual( ('ok_s', 'ok_h', 'ok_u'), fc.discover_hdisk( 'adp', 'vuuid', ['itls'], device_id='foo')) self.assertEqual(1, mock_fsl.call_count) mock_ftsk.assert_called_with('scrub_vios_vuuid', mock.ANY) self.assertEqual(1, mock_alsst.call_count) mock_luar.assert_has_calls( [mock.call('adp', 'vuuid', ['itls'], vendor=fc.LUAType.OTHER, device_id='foo')] * 2) mock_fsl.reset_mock() mock_alsst.reset_mock() mock_ftsk.reset_mock() for st, dev in no_retry_rets: set_luar_side_effect(st, dev) self.assertEqual( (st, dev, 'udid'), fc.discover_hdisk( 'adp', 'vuuid', ['itls'])) self.assertEqual(0, mock_fsl.call_count) self.assertEqual(0, mock_ftsk.call_count) self.assertEqual(0, mock_alsst.call_count) self.assertEqual(1, mock_luar.call_count) mock_luar.assert_called_with('adp', 'vuuid', ['itls'], vendor=fc.LUAType.OTHER, device_id=None) # If no stale LPARs found, scrub-and-retry should not be triggered with # either set. mock_fsl.return_value = [] for st, dev in retry_rets + no_retry_rets: set_luar_side_effect(st, dev) self.assertEqual( (st, dev, 'udid'), fc.discover_hdisk( 'adp', 'vuuid', ['itls'])) # find_stale_lpars will be called for retry_rets, but not for # no_retry_rets self.assertLessEqual(mock_fsl.call_count, 1) self.assertEqual(0, mock_ftsk.call_count) self.assertEqual(0, mock_alsst.call_count) self.assertEqual(1, mock_luar.call_count) mock_luar.assert_called_with('adp', 'vuuid', ['itls'], vendor=fc.LUAType.OTHER, device_id=None) mock_fsl.reset_mock() @mock.patch('pypowervm.wrappers.job.Job.job_status', new=mock.Mock()) @mock.patch('pypowervm.wrappers.job.Job.run_job') @mock.patch('pypowervm.adapter.Adapter') def test_remove_hdisk_classic(self, mock_adapter, mock_run_job): mock_adapter.read.return_value = (tju.load_file(VIOS_FEED) .feed.entries[0]) fc._remove_hdisk_classic(mock_adapter, 'host_name', 'dev_name', 'vios_uuid') # Validate method invocations self.assertEqual(2, mock_adapter.read.call_count) self.assertEqual(1, mock_run_job.call_count) @mock.patch('pypowervm.wrappers.job.Job.run_job') @mock.patch('pypowervm.adapter.Adapter') def test_remove_hdisk_job(self, mock_adapter, mock_run_job): mock_adapter.read.return_value = (tju.load_file(VIOS_FEED) .feed.entries[0]) def verify_run_job(vios_uuid, job_parms=None): self.assertEqual('vios_uuid', vios_uuid) self.assertEqual(1, len(job_parms)) job_parm = (b'devName' b'dev_name' b'') self.assertEqual(job_parm, job_parms[0].toxmlstring()) mock_run_job.side_effect = verify_run_job fc._remove_hdisk_job(mock_adapter, 'dev_name', 'vios_uuid') # Validate method invocations self.assertEqual(1, mock_adapter.read.call_count) self.assertEqual(1, mock_run_job.call_count) def test_normalize_lun(self): lun = fc.normalize_lun(12) self.assertEqual('c000000000000', lun) # Test when lun exceeds len 8 lun = fc.normalize_lun(1074872357) self.assertEqual('4011402500000000', lun) pypowervm-1.1.24/pypowervm/tests/tasks/hdisk/test_rbd.py0000664000175000017500000000443713571367171023101 0ustar neoneo00000000000000# Copyright 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import pypowervm.entities as ent from pypowervm.tasks.hdisk import _rbd as rbd import pypowervm.tests.test_fixtures as fx from pypowervm.wrappers import job class TestRbd(testtools.TestCase): def setUp(self): super(TestRbd, self).setUp() entry = ent.Entry({}, ent.Element('Dummy', None), None) self.mock_job = job.Job(entry) self.adpt = self.useFixture(fx.AdapterFx()).adpt @mock.patch('pypowervm.wrappers.job.Job.create_job_parameter') @mock.patch('pypowervm.wrappers.job.Job.wrap') @mock.patch('pypowervm.wrappers.job.Job.run_job') @mock.patch('pypowervm.wrappers.job.Job.get_job_results_as_dict') def test_rbd_exists(self, mock_job_res, mock_run_job, mock_job_w, mock_job_p): mock_job_w.return_value = self.mock_job mock_uuid = 'uuid' mock_name = 'pool/image' mock_parm = mock.MagicMock() mock_job_p.return_value = mock_parm args = ['VirtualIOServer', mock_uuid] kwargs = {'suffix_type': 'do', 'suffix_parm': ('RBDExists')} mock_job_res.return_value = {'exists': 'true'} self.assertTrue(rbd.rbd_exists(self.adpt, mock_uuid, mock_name)) self.adpt.read.assert_called_once_with(*args, **kwargs) mock_run_job.assert_called_once_with(mock_uuid, job_parms=[mock_parm], timeout=120) mock_job_p.assert_any_call('name', mock_name) self.assertEqual(1, mock_run_job.call_count) mock_job_res.return_value = {'exists': 'false'} mock_job_p.return_value = mock_parm self.assertFalse(rbd.rbd_exists(self.adpt, mock_uuid, mock_name)) pypowervm-1.1.24/pypowervm/tests/tasks/monitor/0000775000175000017500000000000013571367172021277 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/tasks/monitor/test_monitor.py0000664000175000017500000004320713571367171024404 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test for the monitoring functions.""" import datetime import mock import testtools from pypowervm import entities as pvm_e from pypowervm.tasks.monitor import util as pvm_t_mon from pypowervm.tests.tasks import util as tju from pypowervm.tests import test_fixtures as fx from pypowervm.tests.test_utils import pvmhttp from pypowervm.wrappers import monitor as pvm_mon from pypowervm.wrappers.pcm import lpar as pvm_mon_lpar from pypowervm.wrappers.pcm import phyp as pvm_mon_phyp from pypowervm.wrappers.pcm import vios as pvm_mon_vios PHYP_DATA = 'phyp_pcm_data.txt' VIOS_DATA = 'vios_pcm_data.txt' LPAR_DATA = 'lpar_pcm_data.txt' LTM_FEED = 'ltm_feed2.txt' class TestMonitors(testtools.TestCase): def setUp(self): super(TestMonitors, self).setUp() self.adptfx = self.useFixture(fx.AdapterFx(traits=fx.RemoteHMCTraits)) self.adpt = self.adptfx.adpt def test_query_ltm_feed(self): self.adpt.read_by_path.return_value = tju.load_file(LTM_FEED) feed = pvm_t_mon.query_ltm_feed(self.adpt, 'host_uuid') # Make sure the feed is correct. Our sample data has 130 elements # in the feed. self.assertEqual(130, len(feed)) # Make sure each element is a LTMMetric for mon in feed: self.assertIsInstance(mon, pvm_mon.LTMMetrics) self.assertEqual(1, self.adpt.read_by_path.call_count) def test_ensure_ltm_monitors(self): """Verifies that the LTM monitors can be turned on.""" resp = tju.load_file('pcm_pref.txt') self.adpt.read_by_href.return_value = resp # Create a side effect that can validate the input to the update def validate_of_update(*kargs, **kwargs): element = kargs[0] etag = kargs[1] self.assertIsNotNone(element) self.assertEqual('-215935973', etag) # Wrap the element so we can validate it. pref = pvm_mon.PcmPref.wrap(pvm_e.Entry({'etag': etag}, element, self.adpt)) self.assertFalse(pref.compute_ltm_enabled) self.assertTrue(pref.ltm_enabled) self.assertFalse(pref.stm_enabled) self.assertFalse(pref.aggregation_enabled) return element self.adpt.update.side_effect = validate_of_update # This will invoke the validate_of_update pvm_t_mon.ensure_ltm_monitors(self.adpt, 'host_uuid') # Make sure the update was in fact invoked though self.assertEqual(1, self.adpt.update.call_count) def test_ensure_ltm_monitors_non_default(self): """Verifies that the LTM monitors with different default inputs""" resp = tju.load_file('pcm_pref.txt') self.adpt.read_by_href.return_value = resp # Create a side effect that can validate the input to the update def validate_of_update(*kargs, **kwargs): element = kargs[0] etag = kargs[1] self.assertIsNotNone(element) # Wrap the element so we can validate it. pref = pvm_mon.PcmPref.wrap(pvm_e.Entry({'etag': etag}, element, self.adpt)) self.assertTrue(pref.compute_ltm_enabled) self.assertTrue(pref.ltm_enabled) self.assertFalse(pref.stm_enabled) self.assertTrue(pref.aggregation_enabled) return element self.adpt.update.side_effect = validate_of_update # This will invoke the validate_of_update pvm_t_mon.ensure_ltm_monitors(self.adpt, 'host_uuid', compute_ltm=True, override_to_default=True) # Make sure the update was in fact invoked though self.assertEqual(1, self.adpt.update.call_count) def _load(self, file_name): """Loads a file.""" return pvmhttp.PVMFile(file_name).body def test_parse_to_vm_metrics(self): """Verifies the parsing to LPAR metrics.""" phyp_resp = self._load(PHYP_DATA) phyp_data = pvm_mon_phyp.PhypInfo(phyp_resp) vios_resp = self._load(VIOS_DATA) vios_data = pvm_mon_vios.ViosInfo(vios_resp) lpar_resp = self._load(LPAR_DATA) lpar_data = pvm_mon_lpar.LparInfo(lpar_resp) metrics = pvm_t_mon.vm_metrics(phyp_data, [vios_data], lpar_data) self.assertIsNotNone(metrics) # In the test data, there are 5 LPARs total. self.assertEqual(5, len(metrics.keys())) # Validate a metric with live data good_vm = '42AD4FD4-DC64-4935-9E29-9B7C6F35AFCC' metric = metrics.get(good_vm) self.assertIsNotNone(metric) self.assertIsNotNone(metric.network) self.assertIsNotNone(metric.storage) self.assertIsNotNone(metric.processor) self.assertIsNotNone(metric.memory) # Memory validation self.assertEqual(20480, metric.memory.logical_mem) self.assertEqual(20480, metric.memory.backed_physical_mem) self.assertEqual(80, metric.memory.pct_real_mem_avbl) self.assertEqual(1024, metric.memory.total_pg_count) self.assertEqual(512, metric.memory.free_pg_count) self.assertEqual(1048576, metric.memory.real_mem_size_bytes) self.assertEqual(61, metric.memory.pct_real_mem_free) self.assertEqual(25, metric.memory.vm_pg_out_rate) # Processor validation self.assertEqual(0, metric.processor.pool_id) self.assertEqual('uncap', metric.processor.mode) self.assertEqual(4, metric.processor.virt_procs) self.assertEqual(.4, metric.processor.proc_units) # Network validation self.assertEqual(1, len(metric.network.cnas)) cna = metric.network.cnas[0] self.assertEqual(2227, cna.vlan_id) self.assertEqual(0, cna.vswitch_id) self.assertEqual('U8247.22L.2125D4A-V2-C2', cna.physical_location) self.assertEqual(10, cna.received_packets) self.assertEqual(100, cna.sent_packets) self.assertEqual(5, cna.dropped_packets) self.assertEqual(100, cna.sent_bytes) self.assertEqual(10000, cna.received_bytes) # Storage validation self.assertEqual(1, len(metric.storage.virt_adpts)) self.assertEqual(0, len(metric.storage.vfc_adpts)) vadpt = metric.storage.virt_adpts[0] self.assertEqual('virtual', vadpt.type) self.assertEqual('vhost0', vadpt.name) self.assertEqual('U8247.22L.2125D4A-V1-C1000', vadpt.physical_location) self.assertEqual(1074, vadpt.num_reads) self.assertEqual(1075, vadpt.num_writes) self.assertEqual(549888, vadpt.read_bytes) self.assertEqual(550400, vadpt.write_bytes) # Validate a metric for a system that was powered off. bad_vm = '3B0237F9-26F1-41C7-BE57-A08C9452AD9D' metric = metrics.get(bad_vm) self.assertIsNotNone(metric) self.assertIsNotNone(metric.processor) self.assertIsNotNone(metric.memory) # For powered off VM, OS specific memory metrics are None self.assertIsNone(metric.memory.pct_real_mem_avbl) self.assertIsNone(metric.memory.total_pg_count) self.assertIsNone(metric.memory.free_pg_count) self.assertIsNone(metric.memory.active_pg_count) self.assertIsNone(metric.memory.real_mem_size_bytes) # For powered off VM, the free memory is 100 percent. self.assertEqual(100, metric.memory.pct_real_mem_free) # For powered off VM, the page in/out rate is 0. self.assertEqual(0, metric.memory.vm_pg_out_rate) self.assertIsNone(metric.storage) self.assertIsNone(metric.network) # Take a VM which has entry in phyp data but not in PCM Lpar data. # Assert that it has been correctly parsed and memory metrics # are set to default values. vm_in_phyp_not_in_lpar_pcm = '66A2E886-D05D-42F4-87E0-C3BA02CF7C7E' metric = metrics.get(vm_in_phyp_not_in_lpar_pcm) self.assertIsNotNone(metric) self.assertIsNotNone(metric.processor) self.assertIsNotNone(metric.memory) self.assertEqual(.2, metric.processor.proc_units) self.assertEqual(0, metric.memory.pct_real_mem_free) self.assertEqual(-1, metric.memory.vm_pg_in_rate) def test_vm_metrics_no_phyp_data(self): self.assertEqual({}, pvm_t_mon.vm_metrics(None, [], None)) @mock.patch('pypowervm.tasks.monitor.util.query_ltm_feed') def test_latest_stats(self, mock_ltm_feed): # Set up the return data. mock_phyp_metric = mock.MagicMock() mock_phyp_metric.category = 'phyp' mock_phyp_metric.updated_datetime = 2 mock_phyp_metric.link = 'phyp' mock_phyp2_metric = mock.MagicMock() mock_phyp2_metric.category = 'phyp' mock_phyp2_metric.updated_datetime = 1 mock_phyp2_metric.link = 'phyp' mock_vio1_metric = mock.MagicMock() mock_vio1_metric.category = 'vios_1' mock_vio1_metric.updated_datetime = 1 mock_vio1_metric.link = 'vio' mock_vio2_metric = mock.MagicMock() mock_vio2_metric.category = 'vios_1' mock_vio2_metric.updated_datetime = 2 mock_vio2_metric.link = 'vio' mock_vio3_metric = mock.MagicMock() mock_vio3_metric.category = 'vios_3' mock_vio3_metric.updated_datetime = 2 mock_vio3_metric.link = 'vio' mock_lpar1_metric = mock.MagicMock() mock_lpar1_metric.category = 'lpar' mock_lpar1_metric.updated_datetime = 2 mock_lpar1_metric.link = 'lpar' mock_lpar2_metric = mock.MagicMock() mock_lpar2_metric.category = 'lpar' mock_lpar2_metric.updated_datetime = 1 mock_lpar2_metric.link = 'lpar' # Reset as this was invoked once up front. mock_ltm_feed.reset_mock() mock_ltm_feed.return_value = [mock_phyp_metric, mock_phyp2_metric, mock_vio1_metric, mock_vio2_metric, mock_vio3_metric, mock_lpar1_metric, mock_lpar2_metric] # Data for the responses. phyp_resp = self._load(PHYP_DATA) vios_resp = self._load(VIOS_DATA) def validate_read(link, xag=None): resp = mock.MagicMock() if link == 'phyp': resp.body = phyp_resp return resp elif link == 'vio': resp.body = vios_resp return resp elif link == 'lpar': resp.body = self._load(LPAR_DATA) return resp else: self.fail() self.adpt.read_by_href.side_effect = validate_read resp_date, resp_phyp, resp_vioses, resp_lpars = ( pvm_t_mon.latest_stats(self.adpt, mock.Mock())) self.assertIsNotNone(resp_phyp) self.assertIsInstance(resp_phyp, pvm_mon_phyp.PhypInfo) self.assertEqual(2, len(resp_vioses)) self.assertIsInstance(resp_vioses[0], pvm_mon_vios.ViosInfo) self.assertIsInstance(resp_vioses[1], pvm_mon_vios.ViosInfo) self.assertEqual(6, len(resp_lpars.lpars_util)) self.assertIsInstance(resp_lpars, pvm_mon_lpar.LparInfo) self.assertIsNotNone(resp_date) # Invoke again, but set to ignore vioses resp_date, resp_phyp, resp_vioses, resp_lpars = ( pvm_t_mon.latest_stats(self.adpt, mock.Mock(), include_vio=False)) self.assertIsNotNone(resp_phyp) self.assertIsInstance(resp_phyp, pvm_mon_phyp.PhypInfo) self.assertEqual(0, len(resp_vioses)) self.assertIsNotNone(resp_date) # Run a pass for previous data prev_date, prev_phyp, prev_vioses, prev_lpars = ( pvm_t_mon.latest_stats(self.adpt, mock.Mock(), second_latest=True)) self.assertIsNotNone(prev_phyp) self.assertIsInstance(prev_phyp, pvm_mon_phyp.PhypInfo) self.assertEqual(1, len(prev_vioses)) self.assertIsInstance(prev_vioses[0], pvm_mon_vios.ViosInfo) self.assertEqual(6, len(prev_lpars.lpars_util)) self.assertIsInstance(prev_lpars, pvm_mon_lpar.LparInfo) self.assertIsNotNone(prev_date) @mock.patch('pypowervm.tasks.monitor.util.vm_metrics') @mock.patch('pypowervm.tasks.monitor.util.query_ltm_feed') def test_latest_stats_no_data(self, mock_ltm_feed, mock_vm_metrics): # Set up the return data. mock_vio3_metric = mock.MagicMock() mock_vio3_metric.category = 'vios_3' mock_vio3_metric.updated_datetime = 2 # Reset as this was invoked once up front. mock_ltm_feed.reset_mock() mock_ltm_feed.return_value = [mock_vio3_metric] # Call the system. resp_date, resp_phyp, resp_vios, resp_lpars = ( pvm_t_mon.latest_stats(mock.Mock(), mock.Mock())) self.assertIsNotNone(resp_date) self.assertIsNone(resp_phyp) self.assertIsNone(resp_vios) self.assertIsNone(resp_lpars) class TestMetricsCache(testtools.TestCase): """Validates the LparMetricCache.""" def setUp(self): super(TestMetricsCache, self).setUp() self.adptfx = self.useFixture(fx.AdapterFx(traits=fx.RemoteHMCTraits)) self.adpt = self.adptfx.adpt @mock.patch('pypowervm.tasks.monitor.util.vm_metrics') @mock.patch('pypowervm.tasks.monitor.util.latest_stats') @mock.patch('pypowervm.tasks.monitor.util.ensure_ltm_monitors') def test_refresh(self, mock_ensure_monitor, mock_stats, mock_vm_metrics): ret_prev = None ret1 = None ret2 = {'lpar_uuid': 2} ret3 = {'lpar_uuid': 3} date_ret1 = datetime.datetime.now() date_ret2 = date_ret1 + datetime.timedelta(milliseconds=250) date_ret3 = date_ret2 + datetime.timedelta(milliseconds=250) # LparMetricCache creation invokes latest_stats twice to set both # prev and cur mock_stats.side_effect = [ (None, mock.Mock(), mock.Mock(), mock.Mock()), (date_ret1, mock.Mock(), mock.Mock(), mock.Mock()), (None, mock.Mock(), mock.Mock(), mock.Mock()), (date_ret1, mock.Mock(), mock.Mock(), mock.Mock()), (date_ret2, mock.Mock(), mock.Mock(), mock.Mock()), (date_ret3, mock.Mock(), mock.Mock(), mock.Mock())] mock_vm_metrics.side_effect = [ret_prev, ret1, ret_prev, ret1, ret2, ret3] # Validate that include_vio is passed to latest_stats metric_cache = pvm_t_mon.LparMetricCache(self.adpt, 'host_uuid', refresh_delta=.25, include_vio=False) mock_stats.assert_called_with(self.adpt, 'host_uuid', include_vio=False) metric_cache = pvm_t_mon.LparMetricCache(self.adpt, 'host_uuid', refresh_delta=.25) mock_stats.assert_called_with(self.adpt, 'host_uuid', include_vio=True) # Make sure the current and prev are none. self.assertEqual(date_ret1, metric_cache.cur_date) self.assertIsNone(metric_cache.cur_metric) self.assertIsNone(metric_cache.prev_date) self.assertIsNone(metric_cache.prev_metric) # The current metric should detect that it hasn't been enough time # and pass us none. cur_date, cur_metric = metric_cache.get_latest_metric('lpar_uuid') self.assertEqual(date_ret1, cur_date) self.assertIsNone(cur_metric) prev_date, prev_metric = metric_cache.get_previous_metric('lpar_uuid') self.assertIsNone(prev_date) self.assertIsNone(prev_metric) # Force the update by stating we're older than we are. pre_date = metric_cache.cur_date - datetime.timedelta(milliseconds=250) metric_cache.cur_date = pre_date # Verify that we've incremented cur_date, cur_metric = metric_cache.get_latest_metric('lpar_uuid') self.assertEqual(date_ret2, cur_date) self.assertEqual(2, cur_metric) prev_date, prev_metric = metric_cache.get_previous_metric('lpar_uuid') self.assertEqual(pre_date, prev_date) self.assertIsNone(prev_metric) # Verify that if we set the date to now, we don't increment metric_cache.cur_date = datetime.datetime.now() cur_date, cur_metric = metric_cache.get_latest_metric('lpar_uuid') self.assertEqual(2, cur_metric) prev_date, prev_metric = metric_cache.get_previous_metric('lpar_uuid') self.assertEqual(pre_date, prev_date) self.assertIsNone(prev_metric) # Delay one more time. Make sure the previous values are now set. pre_date = metric_cache.cur_date - datetime.timedelta(milliseconds=250) metric_cache.cur_date = pre_date cur_date, cur_metric = metric_cache.get_latest_metric('lpar_uuid') self.assertEqual(date_ret3, cur_date) self.assertEqual(3, cur_metric) prev_date, prev_metric = metric_cache.get_previous_metric('lpar_uuid') self.assertEqual(pre_date, prev_date) self.assertEqual(2, prev_metric) pypowervm-1.1.24/pypowervm/tests/tasks/monitor/__init__.py0000664000175000017500000000000013571367171023375 0ustar neoneo00000000000000pypowervm-1.1.24/pypowervm/tests/tasks/monitor/test_host_cpu.py0000664000175000017500000003501013571367171024532 0ustar neoneo00000000000000# Copyright 2014, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import mock import testtools from pypowervm.tasks.monitor import host_cpu import pypowervm.tests.test_fixtures as pvm_fx LOG = logging.getLogger(__name__) class TestHostCPUBase(testtools.TestCase): def setUp(self): super(TestHostCPUBase, self).setUp() # Fixture for the adapter self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt ensure_ltm_p = mock.patch( 'pypowervm.tasks.monitor.util.ensure_ltm_monitors') refresh_p = mock.patch( 'pypowervm.tasks.monitor.util.MetricCache._refresh_if_needed') self.mock_ensure_ltm_monitors = ensure_ltm_p.start() self.mock_refresh_if_needed = refresh_p.start() self.addCleanup(ensure_ltm_p.stop) self.addCleanup(refresh_p.stop) class TestHostCPUFreq(TestHostCPUBase): def test_get_cpu_freq(self): # _get_cpu_freq() should return an int based on the clock line of the # file m = mock.mock_open( read_data='processor : 12\nclock : 4116.000000MHz\n') m.return_value.__iter__ = lambda self: iter(self.readline, '') with mock.patch('pypowervm.tasks.monitor.host_cpu.open', m): host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') self.assertEqual(host_stats.cpu_freq, 4116) class TestHostCPUMetricCache(TestHostCPUBase): def setUp(self): super(TestHostCPUMetricCache, self).setUp() get_cpu_freq_p = mock.patch('pypowervm.tasks.monitor.host_cpu.' 'HostCPUMetricCache._get_cpu_freq') self.mock_get_cpu_freq = get_cpu_freq_p.start() self.addCleanup(get_cpu_freq_p.stop) def _get_sample(self, lpar_id, sample): for lpar in sample.lpars: if lpar.id == lpar_id: return lpar return None def test_refresh(self): host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') host_stats.refresh() # Called once in init and once in refesh() self.assertEqual(self.mock_refresh_if_needed.call_count, 2) @mock.patch('pypowervm.tasks.monitor.host_cpu.HostCPUMetricCache.' '_get_fw_cycles_delta') @mock.patch('pypowervm.tasks.monitor.host_cpu.HostCPUMetricCache.' '_get_total_cycles_delta') @mock.patch('pypowervm.tasks.monitor.host_cpu.HostCPUMetricCache.' '_gather_user_cycles_delta') def test_update_internal_metric(self, mock_user_cycles, mock_total_cycles, mock_fw_cycles): host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') # Make sure totals are 0 if there is no data. host_stats.cur_phyp = None host_stats._update_internal_metric() self.assertEqual(host_stats.total_cycles, 0) self.assertEqual(host_stats.total_user_cycles, 0) self.assertEqual(host_stats.total_fw_cycles, 0) # Create mock phyp objects to test with mock_phyp = mock.MagicMock() mock_fw_cycles.return_value = 58599310268 mock_prev_phyp = mock.MagicMock() # Mock methods not currently under test mock_user_cycles.return_value = 50 mock_total_cycles.return_value = 1.6125945178663e+16 # Make the 'prev' the current...for the first pass host_stats.cur_phyp = mock_prev_phyp host_stats.prev_phyp = None host_stats._update_internal_metric() self.assertEqual(host_stats.total_cycles, 1.6125945178663e+16) self.assertEqual(host_stats.total_user_cycles, 50) self.assertEqual(host_stats.total_fw_cycles, 58599310268) # Mock methods not currently under test mock_user_cycles.return_value = 30010090000 mock_total_cycles.return_value = 1.6125945178663e+16 # Now 'increment' it with a new current/previous host_stats.cur_phyp = mock_phyp host_stats.prev_phyp = mock_prev_phyp mock_user_cycles.return_value = 100000 host_stats._update_internal_metric() # Validate the new values. Note that these values are 'higher' because # they are running totals. new_fw = 58599310268 * 2 new_total = 1.6125945178663e+16 * 2 self.assertEqual(host_stats.total_cycles, new_total) self.assertEqual(host_stats.total_user_cycles, 100050) self.assertEqual(host_stats.total_fw_cycles, new_fw) @mock.patch('pypowervm.tasks.monitor.host_cpu.HostCPUMetricCache.' '_get_fw_cycles_delta') @mock.patch('pypowervm.tasks.monitor.host_cpu.HostCPUMetricCache.' '_get_total_cycles_delta') @mock.patch('pypowervm.tasks.monitor.host_cpu.HostCPUMetricCache.' '_gather_user_cycles_delta') def test_update_internal_metric_bad_total( self, mock_user_cycles, mock_tot_cycles, mock_fw_cycles): """Validates that if the total cycles are off, we handle.""" host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') mock_user_cycles.return_value = 30010090000 mock_fw_cycles.return_value = 58599310268 # Mock the total cycles to some really low number. mock_tot_cycles.return_value = 5 # Create mock phyp objects to test with mock_phyp = mock.MagicMock() mock_prev_phyp = mock.MagicMock() mock_phyp.sample.system_firmware.utilized_proc_cycles = 58599310268 # Run the actual test - 'increment' it with a new current/previous host_stats.cur_phyp = mock_phyp host_stats.prev_phyp = mock_prev_phyp host_stats._update_internal_metric() # Validate the results. The total cycles are set to the sum of user # and fw when the total is bad. self.assertEqual(host_stats.total_cycles, 88609400268) self.assertEqual(host_stats.total_user_cycles, 30010090000) self.assertEqual(host_stats.total_fw_cycles, 58599310268) @mock.patch('pypowervm.tasks.monitor.host_cpu.HostCPUMetricCache.' '_delta_proc_cycles') def test_gather_user_cycles_delta(self, mock_cycles): # Crete objects to test with host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') mock_phyp = mock.MagicMock() mock_prev_phyp = mock.MagicMock() # Mock methods not currently under test mock_cycles.return_value = 15005045000 # Test that we can run with previous samples and then without. host_stats.cur_phyp = mock_phyp host_stats.prev_phyp = mock_prev_phyp resp = host_stats._gather_user_cycles_delta() self.assertEqual(30010090000, resp) # Now test if there is no previous sample. Since there are no previous # samples, it will be 0. host_stats.prev_phyp = None mock_cycles.return_value = 0 resp = host_stats._gather_user_cycles_delta() self.assertEqual(0, resp) def test_delta_proc_cycles(self): # Create objects to test with host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') mock_phyp, mock_prev_phyp = self._get_mock_phyps() # Test that a previous sample allows us to gather the delta across all # of the VMs. This should take into account the scenario where a LPAR # is deleted and a new one takes its place (LPAR ID 6) delta = host_stats._delta_proc_cycles(mock_phyp.sample.lpars, mock_prev_phyp.sample.lpars) self.assertEqual(10010000000, delta) # Now test as if there is no previous data. This results in 0 as they # could have all been LPMs with months of cycles (rather than 30 # seconds delta). delta2 = host_stats._delta_proc_cycles(mock_phyp.sample.lpars, None) self.assertEqual(0, delta2) self.assertNotEqual(delta2, delta) # Test that if previous sample had 0 values, the sample is not # considered for evaluation, and resultant delta cycles is 0. prev_lpar_sample = mock_prev_phyp.sample.lpars[0].processor prev_lpar_sample.util_cap_proc_cycles = 0 prev_lpar_sample.util_uncap_proc_cycles = 0 prev_lpar_sample.idle_proc_cycles = 0 delta3 = host_stats._delta_proc_cycles(mock_phyp.sample.lpars, mock_prev_phyp.sample.lpars) self.assertEqual(0, delta3) def test_delta_user_cycles(self): # Create objects to test with host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') mock_phyp, mock_prev_phyp = self._get_mock_phyps() mock_phyp.sample.lpars[0].processor.util_cap_proc_cycles = 250000 mock_phyp.sample.lpars[0].processor.util_uncap_proc_cycles = 250000 mock_phyp.sample.lpars[0].processor.idle_proc_cycles = 500 mock_prev_phyp.sample.lpars[0].processor.util_cap_proc_cycles = 0 num = 455000 mock_prev_phyp.sample.lpars[0].processor.util_uncap_proc_cycles = num mock_prev_phyp.sample.lpars[0].processor.idle_proc_cycles = 1000 # Test that a previous sample allows us to gather just the delta. new_elem = self._get_sample(4, mock_phyp.sample) old_elem = self._get_sample(4, mock_prev_phyp.sample) delta = host_stats._delta_user_cycles(new_elem, old_elem) self.assertEqual(45500, delta) # Validate the scenario where we don't have a previous. Should default # to 0, given no context of why the previous sample did not have the # data. delta = host_stats._delta_user_cycles(new_elem, None) self.assertEqual(0, delta) def test_find_prev_sample(self): # Create objects to test with host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') mock_lpar_4A = mock.Mock() mock_lpar_4A.configure_mock(id=4, name='A') mock_lpar_4A.processor = mock.MagicMock( entitled_proc_cycles=500000) mock_lpar_6A = mock.Mock() mock_lpar_6A.configure_mock(id=6, name='A') mock_lpar_6B = mock.Mock() mock_lpar_6B.configure_mock(id=6, name='B') mock_phyp = mock.MagicMock(sample=mock.MagicMock(lpars=[mock_lpar_4A, mock_lpar_6A])) mock_prev_phyp = mock.MagicMock(sample=mock.MagicMock( lpars=[mock_lpar_4A, mock_lpar_6B])) # Sample 6 in the current shouldn't match the previous. It has the # same LPAR ID, but a different name. This is considered different new_elem = self._get_sample(6, mock_phyp.sample) prev = host_stats._find_prev_sample(new_elem, mock_prev_phyp.sample.lpars) self.assertIsNone(prev) # Lpar 4 should be in the old one. Match that up. new_elem = self._get_sample(4, mock_phyp.sample) prev = host_stats._find_prev_sample(new_elem, mock_prev_phyp.sample.lpars) self.assertIsNotNone(prev) self.assertEqual(500000, prev.processor.entitled_proc_cycles) # Test that we get None back if there are no previous samples prev = host_stats._find_prev_sample(new_elem, None) self.assertIsNone(prev) def test_get_total_cycles(self): # Mock objects to test with host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') mock_phyp = mock.MagicMock() mock_phyp.sample = mock.MagicMock() mock_phyp.sample.processor.configurable_proc_units = 5 mock_phyp.sample.time_based_cycles = 500 host_stats.cur_phyp = mock_phyp # Make sure we get the full system cycles. max_cycles = host_stats._get_total_cycles_delta() self.assertEqual(2500, max_cycles) def test_get_total_cycles_diff_cores(self): # Mock objects to test with host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') # Latest Sample mock_phyp = mock.MagicMock(sample=mock.MagicMock()) mock_phyp.sample.processor.configurable_proc_units = 48 mock_phyp.sample.time_based_cycles = 1000 host_stats.cur_phyp = mock_phyp # Earlier sample. Use a higher proc unit sample mock_phyp = mock.MagicMock(sample=mock.MagicMock()) mock_phyp.sample.processor.configurable_proc_units = 1 mock_phyp.sample.time_based_cycles = 500 host_stats.prev_phyp = mock_phyp # Make sure we get the full system cycles. max_cycles = host_stats._get_total_cycles_delta() self.assertEqual(24000, max_cycles) def test_get_firmware_cycles(self): # Mock objects to test with host_stats = host_cpu.HostCPUMetricCache(self.adpt, 'host_uuid') # Latest Sample mock_phyp = mock.MagicMock(sample=mock.MagicMock()) mock_phyp.sample.system_firmware.utilized_proc_cycles = 2000 # Previous Sample prev_phyp = mock.MagicMock(sample=mock.MagicMock()) prev_phyp.sample.system_firmware.utilized_proc_cycles = 1000 host_stats.cur_phyp = mock_phyp host_stats.prev_phyp = prev_phyp # Get delta delta_firmware_cycles = host_stats._get_fw_cycles_delta() self.assertEqual(1000, delta_firmware_cycles) def _get_mock_phyps(self): """Helper method to return cur_phyp and prev_phyp.""" mock_lpar_4A = mock.Mock() mock_lpar_4A.configure_mock(id=4, name='A') mock_lpar_4A.processor = mock.MagicMock( util_cap_proc_cycles=5005045000, util_uncap_proc_cycles=5005045000, idle_proc_cycles=10000) mock_lpar_4A_prev = mock.Mock() mock_lpar_4A_prev.configure_mock(id=4, name='A') mock_lpar_4A_prev.processor = mock.MagicMock( util_cap_proc_cycles=40000, util_uncap_proc_cycles=40000, idle_proc_cycles=0) mock_phyp = mock.MagicMock(sample=mock.MagicMock(lpars=[mock_lpar_4A])) mock_prev_phyp = mock.MagicMock( sample=mock.MagicMock(lpars=[mock_lpar_4A_prev])) return mock_phyp, mock_prev_phyp pypowervm-1.1.24/pypowervm/tests/tasks/test_master_mode.py0000664000175000017500000000547513571367171023532 0ustar neoneo00000000000000# Copyright 2015 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import pypowervm.entities as ent from pypowervm.tasks import master_mode as m_mode import pypowervm.tests.tasks.util as u import pypowervm.tests.test_fixtures as fx class TestMasterMode(testtools.TestCase): """Unit Tests for master mode request and release.""" def setUp(self): super(TestMasterMode, self).setUp() self.adpt = self.useFixture(fx.AdapterFx()).adpt mock_resp = mock.MagicMock() mock_resp.entry = ent.Entry( {}, ent.Element('Dummy', self.adpt), self.adpt) self.adpt.read.return_value = mock_resp self.msys_w = mock.MagicMock() self.msys_w.adapter = self.adpt self.msys_w.uuid = '1234' @mock.patch('pypowervm.wrappers.job.Job.run_job') def test_request_master(self, mock_run_job): mock_run_job.side_effect = u.get_parm_checker( self, '1234', [(m_mode.CO_MGMT_MASTER_STATUS, m_mode.MasterMode.NORMAL)], exp_timeout=1800) m_mode.request_master(self.msys_w) self.adpt.read.assert_called_once_with('ManagedSystem', '1234', suffix_parm='RequestMaster', suffix_type='do') self.adpt.reset_mock() mock_run_job.reset_mock() # Test temp mode mock_run_job.side_effect = u.get_parm_checker( self, '1234', [(m_mode.CO_MGMT_MASTER_STATUS, m_mode.MasterMode.TEMP)], exp_timeout=1800) m_mode.request_master(self.msys_w, mode=m_mode.MasterMode.TEMP) self.adpt.read.assert_called_once_with('ManagedSystem', '1234', suffix_parm='RequestMaster', suffix_type='do') @mock.patch('pypowervm.wrappers.job.Job.run_job') def test_release_master(self, mock_run_job): m_mode.release_master(self.msys_w) self.adpt.read.assert_called_once_with('ManagedSystem', '1234', suffix_parm='ReleaseMaster', suffix_type='do') mock_run_job.assert_called_once_with('1234', timeout=1800) pypowervm-1.1.24/pypowervm/tests/tasks/test_ibmi.py0000664000175000017500000002127313571367171022145 0ustar neoneo00000000000000# Copyright 2015, 2017 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures import mock import testtools from pypowervm import exceptions as pvm_exc from pypowervm.tasks import ibmi import pypowervm.tests.tasks.util as tju import pypowervm.tests.test_fixtures as pvm_fx import pypowervm.wrappers.base_partition as pvm_bp from pypowervm.wrappers import virtual_io_server as pvm_vios VIOS_FEED = 'fake_vios_feed.txt' class TestIBMi(testtools.TestCase): """Unit Tests for IBMi changes.""" def setUp(self, traits_type): super(TestIBMi, self).setUp() self.traits = traits_type self.apt = self.useFixture(pvm_fx.AdapterFx( traits=self.traits)).adpt self.vio_feed = pvm_vios.VIOS.wrap( tju.load_file(VIOS_FEED, self.apt)) self.vioslist = [self.vio_feed[0], self.vio_feed[1]] @staticmethod def _validate_settings(self, boot_type, traits_type, entry): self.assertEqual('b', entry.desig_ipl_src) self.assertEqual('normal', entry.keylock_pos) if traits_type == pvm_fx.LocalPVMTraits: self.assertEqual('HMC', entry.io_config.tagged_io.console) else: self.assertEqual('HMC', entry.io_config.tagged_io.console) if boot_type == 'npiv': self.assertEqual('3', entry.io_config.tagged_io.load_src) self.assertEqual('4', entry.io_config.tagged_io.alt_load_src) else: self.assertEqual('2', entry.io_config.tagged_io.load_src) self.assertEqual('2', entry.io_config.tagged_io.alt_load_src) def _validate_ibmi_settings(self, mock_viosw): mock_viosw.return_value = self.vioslist mock_lparw = mock.MagicMock() mock_lparw.id = 22 # Test update load source with npiv boot boot_type = 'npiv' entry = ibmi.update_ibmi_settings(self.apt, mock_lparw, boot_type) self._validate_settings(self, boot_type, self.traits, entry) # Test update load source with vscsi boot boot_type = 'vscsi' entry = ibmi.update_ibmi_settings(self.apt, mock_lparw, boot_type) self._validate_settings(self, boot_type, self.traits, entry) # Test bad path if load source is not found mock_lparw.reset_mock() mock_lparw.id = 220 boot_type = 'vscsi' self.assertRaises(pvm_exc.IBMiLoadSourceNotFound, ibmi.update_ibmi_settings, self.apt, mock_lparw, boot_type) class TestIBMiWithHMC(TestIBMi): """Unit Tests for IBMi changes for HMC.""" def setUp(self): super(TestIBMiWithHMC, self).setUp(pvm_fx.RemoteHMCTraits) @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.wrap') def test_update_ibmi_settings(self, mock_viosw): self._validate_ibmi_settings(mock_viosw) class TestIBMiWithPVM(TestIBMi): """Unit Tests for IBMi changes for PVM.""" def setUp(self): super(TestIBMiWithPVM, self).setUp(pvm_fx.LocalPVMTraits) @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.wrap') def test_update_ibmi_settings(self, mock_viosw): self._validate_ibmi_settings(mock_viosw) @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.wrap') @mock.patch('pypowervm.wrappers.virtual_io_server.VStorageMapping.' 'client_adapter', new_callable=mock.PropertyMock, return_value=None) def test_update_ibmi_settings_w_stale_adapters(self, mock_c_adap, mock_viosw): mock_lparw = mock.MagicMock() mock_lparw.id = 22 self.assertRaises(pvm_exc.IBMiLoadSourceNotFound, ibmi.update_ibmi_settings, self.apt, mock_lparw, 'vscsi') class TestPanelFunction(testtools.TestCase): def setUp(self): super(TestPanelFunction, self).setUp() self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt # Make it easier to validate job params: create_job_parameter returns a # simple 'name=value' string. mock_crt_jparm = self.useFixture(fixtures.MockPatch( 'pypowervm.wrappers.job.Job.create_job_parameter')).mock mock_crt_jparm.side_effect = ( lambda name, value, cdata=False: '%s=%s' % (name, value)) # Patch Job.wrap to return a mocked Job wrapper mock_job = mock.Mock() self.useFixture(fixtures.MockPatch( 'pypowervm.wrappers.job.Job.wrap')).mock.return_value = mock_job self.run_job = mock_job.run_job def mock_partition(self, env=pvm_bp.LPARType.OS400, rmc_state=pvm_bp.RMCState.ACTIVE): """Returns a mocked partition with the specified properties.""" return mock.Mock(adapter=self.adpt, env=env, rmc_state=rmc_state) def test_ops(self): mock_part = self.mock_partition() ibmi.start_panel_job(mock_part, ibmi.IBMiPanelOperations.CONSOLESERVICE) self.run_job.assert_called_once_with( mock_part.uuid, job_parms=['operation=consoleservice'], synchronous=True, timeout=1800) self.run_job.reset_mock() ibmi.start_panel_job(mock_part, ibmi.IBMiPanelOperations.IOPDUMP) self.run_job.assert_called_once_with( mock_part.uuid, job_parms=['operation=iopdump'], synchronous=True, timeout=1800) self.run_job.reset_mock() ibmi.start_panel_job(mock_part, ibmi.IBMiPanelOperations.IOPRESET) self.run_job.assert_called_once_with( mock_part.uuid, job_parms=['operation=iopreset'], synchronous=True, timeout=1800) self.run_job.reset_mock() ibmi.start_panel_job(mock_part, ibmi.IBMiPanelOperations.REMOTEDSTON) self.run_job.assert_called_once_with( mock_part.uuid, job_parms=['operation=remotedston'], synchronous=True, timeout=1800) self.run_job.reset_mock() ibmi.start_panel_job(mock_part, ibmi.IBMiPanelOperations.REMOTEDSTOFF) self.run_job.assert_called_once_with( mock_part.uuid, job_parms=['operation=remotedstoff'], synchronous=True, timeout=1800) self.run_job.reset_mock() ibmi.start_panel_job(mock_part, ibmi.IBMiPanelOperations.RETRYDUMP) self.run_job.assert_called_once_with( mock_part.uuid, job_parms=['operation=retrydump'], synchronous=True, timeout=1800) self.run_job.reset_mock() ibmi.start_panel_job(mock_part, ibmi.IBMiPanelOperations.DSTON) self.run_job.assert_called_once_with( mock_part.uuid, job_parms=['operation=dston'], synchronous=True, timeout=1800) self.run_job.reset_mock() ibmi.start_panel_job(mock_part, ibmi.IBMiPanelOperations.DUMPRESTART) self.run_job.assert_called_once_with( mock_part.uuid, job_parms=['operation=dumprestart'], synchronous=True, timeout=1800) self.run_job.reset_mock() ibmi.start_panel_job(mock_part, ibmi.IBMiPanelOperations.DUMPRESTART, synchronous=False, timeout=100) self.run_job.assert_called_once_with( mock_part.uuid, job_parms=['operation=dumprestart'], synchronous=False, timeout=100) self.run_job.reset_mock() self.assertRaises(pvm_exc.InvalidIBMiPanelFunctionOperation, ibmi.start_panel_job, mock_part, 'NotRight') self.assertRaises(pvm_exc.InvalidIBMiPanelFunctionOperation, ibmi.start_panel_job, mock_part, None) def test_exceptions(self): mock_part = self.mock_partition(env=pvm_bp.LPARType.AIXLINUX) self.assertRaises(pvm_exc.PartitionIsNotIBMi, ibmi.start_panel_job, mock_part, ibmi.IBMiPanelOperations.REMOTEDSTON) self.assertRaises(pvm_exc.PanelFunctionRequiresPartition, ibmi.start_panel_job, None, ibmi.IBMiPanelOperations.REMOTEDSTON) pypowervm-1.1.24/pypowervm/tests/tasks/test_vfc_mapper.py0000664000175000017500000007261313571367171023353 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest import mock from pypowervm import const as c from pypowervm import exceptions as e from pypowervm.tasks import vfc_mapper from pypowervm.tests.tasks import util as tju from pypowervm.tests.test_utils import test_wrapper_abc as twrap from pypowervm.wrappers import storage as pvm_stor from pypowervm.wrappers import virtual_io_server as pvm_vios VIOS_FILE = 'fake_vios.txt' VIOS_FEED = 'fake_vios_feed.txt' FAKE_UUID = '42DF39A2-3A4A-4748-998F-25B15352E8A7' class TestVFCMapper(unittest.TestCase): @mock.patch('pypowervm.wrappers.job.Job') def test_build_wwpn_pair(self, mock_job): mock_adpt = mock.MagicMock() mock_adpt.read.return_value = mock.Mock() # Mock out the job response job_w = mock.MagicMock() mock_job.wrap.return_value = job_w job_w.get_job_results_as_dict.return_value = {'wwpnList': 'a,b,c,d,e,f,g,h'} # Invoke and validate resp = vfc_mapper.build_wwpn_pair(mock_adpt, 'host_uuid', pair_count=4) self.assertEqual(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'], resp) # Make sure that the job was built properly mock_adpt.read.assert_called_once_with( 'ManagedSystem', root_id='host_uuid', suffix_type=c.SUFFIX_TYPE_DO, suffix_parm=vfc_mapper._GET_NEXT_WWPNS) job_w.create_job_parameter.assert_called_once_with( 'numberPairsRequested', '4') def test_find_vios_for_wwpn(self): vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry) vios_feed_w = [vios_w] # Basic test vio_resp, p_resp = vfc_mapper.find_vios_for_wwpn( vios_feed_w, '10000090FA45473B') self.assertEqual(vios_w, vio_resp) self.assertIsNotNone(p_resp) # Validates the sanitized input vio_resp, p_resp = vfc_mapper.find_vios_for_wwpn( vios_feed_w, '10:00:00:90:fa:45:47:3b') self.assertEqual(vios_w, vio_resp) self.assertIsNotNone(p_resp) # Make sure a bad WWPN returns no result vio_resp, p_resp = vfc_mapper.find_vios_for_wwpn( vios_feed_w, '10:00:00:90:fa:45:47:3f') self.assertIsNone(vio_resp) self.assertIsNone(p_resp) def test_intersect_wwpns(self): list1 = ['AA:BB:CC:DD:EE:FF'] list2 = {'aabbccddeeff', '1234567890'} self.assertEqual(list1, vfc_mapper.intersect_wwpns(list1, list2)) # Full match list1 = {'aabbccddeeff', '1234567890'} list2 = ['AA:BB:CC:DD:EE:FF', '12:34:56:78:90'] self.assertEqual(list1, set(vfc_mapper.intersect_wwpns(list1, list2))) # Second set as the limiter list1 = ['AA:BB:CC:DD:EE:FF', '12:34:56:78:90'] list2 = {'aabbccddeeff'} self.assertEqual(['AA:BB:CC:DD:EE:FF'], vfc_mapper.intersect_wwpns(list1, list2)) def test_derive_npiv_map(self): vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry) vios_wraps = [vios_w] # Subset the WWPNs on that VIOS p_wwpns = ['10000090FA45473B', '10:00:00:90:fa:45:17:58'] # Virtual WWPNs can be faked, and simplified. v_port_wwpns = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] # Run the derivation now resp = vfc_mapper.derive_npiv_map(vios_wraps, p_wwpns, v_port_wwpns) self.assertIsNotNone(resp) self.assertEqual(5, len(resp)) # Make sure we only get two unique keys back. unique_keys = set([i[0] for i in resp]) self.assertEqual({'10000090FA45473B', '10000090FA451758'}, unique_keys) def test_derive_npiv_map_existing_preserve(self): # Use sample vios data with mappings. vios_file = 'fake_vios_mappings.txt' vios_w = pvm_vios.VIOS.wrap(tju.load_file(vios_file).entry) vios_wraps = [vios_w] # Subset the WWPNs on that VIOS p_wwpns = ['10000090FA1B6898', '10000090FA1B6899'] v_port_wwpns = ['c05076065a7c02e4', 'c05076065a7c02e5'] candidates = vfc_mapper._find_ports_on_vio(vios_w, p_wwpns) for p_port in candidates: if p_port.wwpn == p_wwpns[1]: # Artificially inflate the free ports so that it would get # chosen for a newly created mapping, but first in list # would show up for a preserved mapping. p_port.set_parm_value('AvailablePorts', '64') # Run the derivation now resp = vfc_mapper.derive_npiv_map(vios_wraps, p_wwpns, v_port_wwpns, preserve=True) self.assertIsNotNone(resp) self.assertEqual(1, len(resp)) # Make sure we only got the one phys port key back that has the # existing mapping. unique_keys = set([i[0] for i in resp]) self.assertEqual({'10000090FA1B6898'}, unique_keys) def test_derive_npiv_map_existing_no_preserve(self): # Use sample vios data with mappings. vios_file = 'fake_vios_mappings.txt' vios_w = pvm_vios.VIOS.wrap(tju.load_file(vios_file).entry) vios_wraps = [vios_w] # Subset the WWPNs on that VIOS p_wwpns = ['10000090FA1B6898', '10000090FA1B6899'] v_port_wwpns = ['c05076065a7c02e4', 'c05076065a7c02e5'] candidates = vfc_mapper._find_ports_on_vio(vios_w, p_wwpns) for p_port in candidates: if p_port.wwpn == p_wwpns[1]: # Artificially inflate the free ports so that it would get # chosen for a newly created mapping. p_port.set_parm_value('AvailablePorts', '64') # Run the derivation now resp = vfc_mapper.derive_npiv_map(vios_wraps, p_wwpns, v_port_wwpns, preserve=False) self.assertIsNotNone(resp) self.assertEqual(1, len(resp)) # Make sure we only got one phys port key back and it should *not* # match the existing mapping of 'preserve' testcase. unique_keys = set([i[0] for i in resp]) self.assertEqual({'10000090FA1B6899'}, unique_keys) @mock.patch('pypowervm.wrappers.virtual_io_server.VFCMapping.backing_port', new_callable=mock.PropertyMock, return_value=None) def test_derive_npiv_map_existing_no_bp(self, mock_bp): vios_file = 'fake_vios_mappings.txt' vios_w = pvm_vios.VIOS.wrap(tju.load_file(vios_file).entry) # Subset the WWPNs on that VIOS p_wwpns = ['10000090FA1B6898', '10000090FA1B6899'] v_port_wwpns = ['c05076065a7c02e4', 'c05076065a7c02e5'] resp = vfc_mapper.derive_npiv_map([vios_w], p_wwpns, v_port_wwpns) # We shouldn't have returned the existing mapping that didn't have # a backing port. self.assertEqual([], resp) def test_derive_base_npiv_map(self): vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry) vios_wraps = [vios_w] # Subset the WWPNs on that VIOS p_wwpns = ['10000090FA45473B', '10:00:00:90:fa:45:17:58'] # Run the derivation now resp = vfc_mapper.derive_base_npiv_map(vios_wraps, p_wwpns, 5) self.assertIsNotNone(resp) self.assertEqual(5, len(resp)) # Make sure we only get two unique keys back. unique_keys = set([i[0] for i in resp]) self.assertEqual({'10000090FA45473B', '10000090FA451758'}, unique_keys) # Make sure we get the 'marker' back for the values. Should now be # fused. values = set(i[1] for i in resp) self.assertEqual({vfc_mapper._FUSED_ANY_WWPN}, values) def test_derive_npiv_map_multi_vio(self): vios_wraps = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FEED)) # Subset the WWPNs on that VIOS p_wwpns = ['10000090FA5371F2', '10000090FA53720A'] # Virtual WWPNs can be faked, and simplified. v_port_wwpns = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] # Run the derivation now resp = vfc_mapper.derive_npiv_map(vios_wraps, p_wwpns, v_port_wwpns) self.assertIsNotNone(resp) self.assertEqual(5, len(resp)) # Make sure we only get two unique keys back. unique_keys = set([i[0] for i in resp]) self.assertEqual(set(p_wwpns), unique_keys) def test_derive_npiv_map_failure(self): """Make sure we get a failure in the event of no candidates.""" vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry) vios_wraps = [vios_w] # Subset the WWPNs on that VIOS. These WWPNs don't actually exist, # so the VIOSes passed in won't have these as candidate ports. p_wwpns = ['10000090FA45473bA', '10:00:00:90:fa:45:17:58A'] # Virtual WWPNs can be faked, and simplified. v_port_wwpns = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] # Run the derivation now self.assertRaises(e.UnableToFindFCPortMap, vfc_mapper.derive_npiv_map, vios_wraps, p_wwpns, v_port_wwpns) def test_find_map_port(self): vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry) # Happy path, should find the first port on the VIOS p1 = vfc_mapper._find_map_port(vios_w.pfc_ports, []) self.assertIsNotNone(p1) # Lets add a mapping where P1 is used. Should not get that result # back. p2 = vfc_mapper._find_map_port(vios_w.pfc_ports, [(p1.wwpn, '')]) self.assertIsNotNone(p2) self.assertNotEqual(p1, p2) # Now add a third and fourth port. Same assertions. p3 = vfc_mapper._find_map_port(vios_w.pfc_ports, [(p1.wwpn, ''), (p2.wwpn, '')]) self.assertIsNotNone(p3) self.assertNotIn(p3, [p1, p2]) p4 = vfc_mapper._find_map_port(vios_w.pfc_ports, [(p1.wwpn, ''), (p2.wwpn, ''), (p3.wwpn, '')]) self.assertIsNotNone(p4) self.assertNotIn(p4, [p1, p2, p3]) # Artificially inflate the use of other ports. port_use = [(p1.wwpn, ''), (p2.wwpn, ''), (p3.wwpn, ''), (p4.wwpn, ''), (p1.wwpn, ''), (p2.wwpn, ''), (p4.wwpn, '')] p_temp = vfc_mapper._find_map_port(vios_w.pfc_ports, port_use) self.assertIsNotNone(p_temp) self.assertNotIn(p_temp, [p1, p2, p4]) def test_fuse_vfc_ports(self): self.assertEqual(['A B'], vfc_mapper._fuse_vfc_ports(['a', 'b'])) self.assertEqual(['AA BB'], vfc_mapper._fuse_vfc_ports(['a:a', 'b:b'])) self.assertEqual(['A B', 'C D'], vfc_mapper._fuse_vfc_ports(['a', 'b', 'c', 'd'])) @mock.patch('pypowervm.tasks.vfc_mapper.derive_base_npiv_map') def test_build_migration_mappings_for_fabric(self, mock_derive): vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry) vios_wraps = [vios_w] # Subset the WWPNs on that VIOS p_wwpns = ['10000090FA45473B', '10:00:00:90:fa:45:17:58'] client_slots = ['1', '2'] # The derive is non-deterministic. That makes testing odd. Force # a deterministic result. mock_derive.return_value = [('10000090FA451758', 'A A'), ('10000090FA45473B', 'B B')] # Build migration mappings success case resp = vfc_mapper.build_migration_mappings_for_fabric( vios_wraps, p_wwpns, client_slots) self.assertEqual(2, len(resp)) self.assertEqual({'1/IO Server/1//fcs2', '2/IO Server/1//fcs1'}, set(resp)) def test_build_migration_mappings_for_fabric_invalid_physical_port(self): vios_w = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FILE).entry) vios_wraps = [vios_w] # Invalid WWPNs should raise an error. p_wwpns = ['10000090FA45477B'] client_slots = ['1', '2'] # Build migration mappings success case self.assertRaises(e.UnableToFindFCPortMap, vfc_mapper.build_migration_mappings_for_fabric, vios_wraps, p_wwpns, client_slots) def test_build_migration_mappings(self): vios_wraps = pvm_vios.VIOS.wrap(tju.load_file(VIOS_FEED)) fabric_data = {'A': {'slots': [3, 4], 'p_port_wwpns': ["10000090FA5371F1", "10000090FA53720A"]}, 'B': {'slots': [5, 6], 'p_port_wwpns': ["10000090FA5371F2", "10000090FA537209"]}} slot_peers = [[3, 5], [4, 6]] resp = vfc_mapper.build_migration_mappings(vios_wraps, fabric_data, slot_peers) self.assertEqual(4, len(resp)) self.assertEqual(set(resp), {'4/nimbus-ch03-p2-vios1/1//fcs1', '6/nimbus-ch03-p2-vios1/1//fcs0', '3/nimbus-ch03-p2-vios2/2//fcs0', '5/nimbus-ch03-p2-vios2/2//fcs1'}) fabric_data = {'A': {'slots': [3], 'p_port_wwpns': ["10000090FA5371F1"]}, 'B': {'slots': [5, 6], 'p_port_wwpns': ["10000090FA5371F2", "10000090FA537209"]}} slot_peers = [[3, 5], [6]] resp = vfc_mapper.build_migration_mappings(vios_wraps, fabric_data, slot_peers) self.assertEqual(3, len(resp)) self.assertEqual(set(resp), {'5/nimbus-ch03-p2-vios2/2//fcs1', '3/nimbus-ch03-p2-vios2/2//fcs0', '6/nimbus-ch03-p2-vios1/1//fcs0'}) # Use invalid ports fabric_data = {'A': {'slots': [3], 'p_port_wwpns': ["10000090FA5371F1"]}, 'B': {'slots': [5], 'p_port_wwpns': ["10000090FA537209"]}} slot_peers = [[3, 5]] self.assertRaises(e.UnableToFindFCPortMap, vfc_mapper.build_migration_mappings, vios_wraps, fabric_data, slot_peers) class TestPortMappings(twrap.TestWrapper): file = VIOS_FEED wrapper_class_to_test = pvm_vios.VIOS mock_adapter_fx_args = {} def setUp(self): super(TestPortMappings, self).setUp() href_p = mock.patch('pypowervm.wrappers.virtual_io_server.VFCMapping.' '_client_lpar_href') href = href_p.start() self.addCleanup(href_p.stop) href.return_value = 'fake_href' self.adpt.read.return_value = self.resp def test_find_vios_for_port_map(self): """Tests the find_vios_for_port_map method.""" # Try off of the client WWPNs e0 = ('bad', 'c05076079cff08da c05076079cff08db') self.assertEqual(self.entries[0], vfc_mapper.find_vios_for_port_map(self.entries, e0)) # This WWPN is on the first VIOS e1 = ('10000090FA5371f1', 'a b') self.assertEqual(self.entries[0], vfc_mapper.find_vios_for_port_map(self.entries, e1)) # This WWPN is on the second VIOS e2 = ('10000090FA537209', 'a b') self.assertEqual(self.entries[1], vfc_mapper.find_vios_for_port_map(self.entries, e2)) # Try with a bad WWPN e3 = ('BAD', 'a b') self.assertIsNone(vfc_mapper.find_vios_for_port_map(self.entries, e3)) def test_find_vios_for_vfc_wwpns(self): """Tests the find_vios_for_vfc_wwpns method.""" # This WWPN is on the first VIOS v_wwpns = ['c05076079cff0e56', 'c05076079cff0e57'] vios, vmap = vfc_mapper.find_vios_for_vfc_wwpns(self.entries, v_wwpns) self.assertEqual(self.entries[0], vios) self.assertEqual('10000090FA5371F2', vmap.backing_port.wwpn) # Have one of the ports be wrong v_wwpns = ['c05076079cff0e56', 'c05076079cff0e59'] vios, vmap = vfc_mapper.find_vios_for_vfc_wwpns(self.entries, v_wwpns) self.assertIsNone(vios) self.assertIsNone(vmap) # Try odd formatting v_wwpns = ['C05076079cff0E56', 'c0:50:76:07:9c:ff:0E:57'] vios, vmap = vfc_mapper.find_vios_for_vfc_wwpns(self.entries, v_wwpns) self.assertEqual(self.entries[0], vios) self.assertEqual('10000090FA5371F2', vmap.backing_port.wwpn) # Second VIOS v_wwpns = ['c05076079cff07ba', 'c05076079cff07bb'] vios, vmap = vfc_mapper.find_vios_for_vfc_wwpns(self.entries, v_wwpns) self.assertEqual(self.entries[1], vios) self.assertEqual('10000090FA53720A', vmap.backing_port.wwpn) # Reverse WWPNs v_wwpns = ['c05076079cff07bb', 'c05076079cff07ba'] vios, vmap = vfc_mapper.find_vios_for_vfc_wwpns(self.entries, v_wwpns) self.assertEqual(self.entries[1], vios) self.assertEqual('10000090FA53720A', vmap.backing_port.wwpn) # Set Type v_wwpns = {'c05076079cff07bb', 'c05076079cff07ba'} vios, vmap = vfc_mapper.find_vios_for_vfc_wwpns(self.entries, v_wwpns) self.assertEqual(self.entries[1], vios) self.assertEqual('10000090FA53720A', vmap.backing_port.wwpn) # test to check fabrics with no backing port are ignored mock_client_adap1 = mock.create_autospec( pvm_stor.VFCClientAdapter, spec_set=True) mock_client_adap1.configure_mock( wwpns=['C05076065A7C02E4', 'C05076065A7C02E5']) mock_map1 = mock.create_autospec(pvm_vios.VFCMapping, spec_set=True) mock_map1.configure_mock( backing_port=None, client_adapter=mock_client_adap1) vios_w = mock.Mock(vfc_mappings=[mock_map1]) v_port_wwpns = ['C05076065A7C02E4', 'C05076065A7C02E5'] vmap = vfc_mapper.find_vios_for_vfc_wwpns([vios_w], v_port_wwpns)[1] self.assertIsNone(vmap) mock_client_adap2 = mock.create_autospec( pvm_stor.VFCClientAdapter, spec_set=True) mock_client_adap2.configure_mock( wwpns=['C05076065A7C02E4', 'C05076065A7C02E5']) mock_map2 = mock.create_autospec(pvm_vios.VFCMapping, spec_set=True) mock_map2.configure_mock( backing_port="port1", client_adapter=mock_client_adap2) vios_w = mock.Mock(vfc_mappings=[mock_map1, mock_map2]) v_port_wwpns = ['C05076065A7C02E4', 'C05076065A7C02E5'] vmap = vfc_mapper.find_vios_for_vfc_wwpns([vios_w], v_port_wwpns)[1] self.assertEqual(mock_map2, vmap) def test_find_pfc_wwpn_by_name(self): vio_w = self.entries[0] self.assertEqual('10000090FA5371F1', vfc_mapper.find_pfc_wwpn_by_name(vio_w, 'fcs0')) self.assertIsNone(vfc_mapper.find_pfc_wwpn_by_name(vio_w, 'fcsX')) @mock.patch('lxml.etree.tostring') def test_add_port_bad_pfc(self, mock_tostring): """Validates that an error will be thrown with a bad pfc port.""" # Build the mappings - the provided WWPN is bad vfc_map = ('10000090FA5371F9', '0 1') # Now call the add action. This should log a warning. with self.assertLogs(vfc_mapper.__name__, level='WARNING'): self.assertRaises(e.UnableToDerivePhysicalPortForNPIV, vfc_mapper.add_map, self.entries[0], 'host_uuid', FAKE_UUID, vfc_map) mock_tostring.assert_called_once_with( self.entries[0].entry.element.element, pretty_print=True) def ensure_does_not_have_wwpns(self, vios_w, wwpns): for vfc_map in vios_w.vfc_mappings: if vfc_map.client_adapter is None: continue for c_wwpn in vfc_map.client_adapter.wwpns: if c_wwpn in wwpns: self.fail("WWPN %s in client adapter" % vfc_mapper) def ensure_has_wwpns(self, vios_w, wwpns): for my_wwpn in wwpns: has_wwpn = False for vfc_map in vios_w.vfc_mappings: if vfc_map.client_adapter is None: continue for c_wwpn in vfc_map.client_adapter.wwpns: if c_wwpn == my_wwpn: has_wwpn = True break if not has_wwpn: self.fail("Unable to find WWPN %s" % my_wwpn) def test_find_maps(self): vwrap = self.entries[0] matches = vfc_mapper.find_maps(vwrap.vfc_mappings, 10) # Make sure we got the right ones self.assertEqual( ['U7895.43X.21EF9FB-V63-C3', 'U7895.43X.21EF9FB-V66-C4', 'U7895.43X.21EF9FB-V62-C4', 'U7895.43X.21EF9FB-V10-C4'], [match.client_adapter.loc_code for match in matches]) # Bogus LPAR ID self.assertEqual([], vfc_mapper.find_maps(vwrap.vfc_mappings, 1000)) # Now try with UUID matches = vfc_mapper.find_maps(vwrap.vfc_mappings, '3ADDED46-B3A9-4E12-B6EC-8223421AF49B') self.assertEqual( ['U7895.43X.21EF9FB-V63-C3', 'U7895.43X.21EF9FB-V66-C4', 'U7895.43X.21EF9FB-V62-C4', 'U7895.43X.21EF9FB-V10-C4'], [match.client_adapter.loc_code for match in matches]) # Bogus LPAR UUID self.assertEqual([], vfc_mapper.find_maps( vwrap.vfc_mappings, '4BEEFD00-B3A9-4E12-B6EC-8223421AF49B')) def test_remove_maps(self): v_wrap = self.entries[0] len_before = len(v_wrap.vfc_mappings) resp_list = vfc_mapper.remove_maps(v_wrap, 10) expected_removals = { 'U7895.43X.21EF9FB-V63-C3', 'U7895.43X.21EF9FB-V66-C4', 'U7895.43X.21EF9FB-V62-C4', 'U7895.43X.21EF9FB-V10-C4'} self.assertEqual( set([el.client_adapter.loc_code for el in resp_list]), expected_removals) self.assertEqual(len_before - 4, len(v_wrap.vfc_mappings)) # Make sure the remaining adapters do not have the remove codes. for remaining_map in v_wrap.vfc_mappings: if remaining_map.client_adapter is not None: self.assertNotIn(remaining_map.client_adapter.loc_code, expected_removals) def test_remove_maps_client_adpt(self): """Tests the remove_maps method, with the client_adpt input.""" v_wrap = self.entries[0] len_before = len(v_wrap.vfc_mappings) c_adpt = vfc_mapper.find_maps( v_wrap.vfc_mappings, 10)[0].client_adapter resp_list = vfc_mapper.remove_maps(v_wrap, 10, client_adpt=c_adpt) expected_removals = {'U7895.43X.21EF9FB-V63-C3'} self.assertEqual( set([el.client_adapter.loc_code for el in resp_list]), expected_removals) self.assertEqual(len_before - 1, len(v_wrap.vfc_mappings)) # Make sure the remaining adapters do not have the remove codes. for remaining_map in v_wrap.vfc_mappings: if remaining_map.client_adapter is not None: self.assertNotIn(remaining_map.client_adapter.loc_code, expected_removals) def test_has_client_wwpns(self): v_wrap_1 = self.entries[0] v_wrap_2 = self.entries[1] vio_w, vfc_map = vfc_mapper.has_client_wwpns( self.entries, ['c05076079cff0e56', 'c05076079cff0e57']) self.assertEqual(v_wrap_1, vio_w) self.assertEqual('10000090FA5371F2', vfc_map.backing_port.wwpn) # Second vios. Reversed WWPNs. Mixed Case. vio_w, vfc_map = vfc_mapper.has_client_wwpns( self.entries, ['c05076079cff0e83', 'c05076079cff0E82']) self.assertEqual(v_wrap_2, vio_w) self.assertEqual('10000090FA537209', vfc_map.backing_port.wwpn) # Not found. vio_w, vfc_map = vfc_mapper.has_client_wwpns( self.entries, ['AAA', 'bbb']) self.assertIsNone(vio_w) self.assertIsNone(vfc_map) class TestAddRemoveMap(twrap.TestWrapper): file = VIOS_FEED wrapper_class_to_test = pvm_vios.VIOS mock_adapter_fx_args = {} def setUp(self): super(TestAddRemoveMap, self).setUp() href_p = mock.patch('pypowervm.wrappers.virtual_io_server.VFCMapping.' 'crt_related_href') href = href_p.start() self.addCleanup(href_p.stop) href.return_value = ( 'https://9.1.2.3:12443/rest/api/uom/ManagedSystem/' 'e7344c5b-79b5-3e73-8f64-94821424bc25/LogicalPartition/' '3ADDED46-B3A9-4E12-B6EC-8223421AF49B') self.adpt.read.return_value = self.resp self.lpar_uuid = '3ADDED46-B3A9-4E12-B6EC-8223421AF49B' def test_add_remove_map_any_wwpn(self): """Tests a loop of add map/remove map when using _ANY_WWPN.""" v_wrap = self.entries[0] len_before = len(v_wrap.vfc_mappings) # A fake mapping to the first IO Server p_map_vio1 = ('10000090FA5371F2', vfc_mapper._FUSED_ANY_WWPN) vfc_mapper.add_map(v_wrap, 'host_uuid', self.lpar_uuid, p_map_vio1) self.assertEqual(len_before + 1, len(v_wrap.vfc_mappings)) # See if we can find that mapping. maps = vfc_mapper.find_maps(v_wrap.vfc_mappings, self.lpar_uuid, port_map=p_map_vio1) self.assertEqual(1, len(maps)) # Even though we were searching for a 'FUSED' wwpn, the mapping itself # will have nothing on it, to indicate that the API should generate # the WWPNs. Therefore, we validate that we found the mapping without # any WWPNs on it. self.assertEqual([], maps[0].client_adapter.wwpns) # Now try to remove it... vfc_mapper.remove_maps(v_wrap, self.lpar_uuid, port_map=p_map_vio1) self.assertEqual(len_before, len(v_wrap.vfc_mappings)) def test_add_map(self): """Validates the add_map method.""" # Determine the vios original values vios_wrap = self.entries[0] vios1_orig_map_count = len(vios_wrap.vfc_mappings) # Subset the WWPNs on that VIOS fabric_wwpns = ['10000090FA5371F2'] # Fake Virtual WWPNs v_fabric_wwpns = ['0', '1'] # Get the mappings fabric_map = vfc_mapper.derive_npiv_map([vios_wrap], fabric_wwpns, v_fabric_wwpns)[0] # Make sure the map was not there initially. maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings, self.lpar_uuid, port_map=fabric_map) self.assertEqual(0, len(maps)) # Now call the add action resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid, fabric_map) self.assertIsNotNone(resp) self.assertIsInstance(resp, pvm_vios.VFCMapping) # Verify the update is now found. maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings, self.lpar_uuid, port_map=fabric_map) self.assertEqual(1, len(maps)) self.assertEqual(vios1_orig_map_count + 1, len(vios_wrap.vfc_mappings)) # Try to add it again...it shouldn't re-add it because its already # there. Flip WWPNs to verify set query. fabric_map = ('10000090FA5371F2', '1 0') resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid, fabric_map) self.assertIsNone(resp) self.assertEqual(vios1_orig_map_count + 1, len(vios_wrap.vfc_mappings)) # We should only find one here...the original add. Not two even though # we've called add twice. maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings, self.lpar_uuid, port_map=fabric_map) self.assertEqual(1, len(maps)) # This time, remove the backing port of the existing mapping and try # the add again. It should return an updated mapping that contains the # backing port. This simulates a VM migrating with a vfc mapping, but # no volume had been previously detached. maps[0].element.remove(maps[0].backing_port.element) resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid, fabric_map) self.assertIsNotNone(resp) self.assertIsInstance(resp, pvm_vios.VFCMapping) self.assertIsNotNone(resp.backing_port) self.assertIn('Port', resp.child_order) # Pass in slot number to be set on the VFC adapter fabric_map = ('10000090FA5371F1', '2 3') resp = vfc_mapper.add_map(vios_wrap, 'host_uuid', self.lpar_uuid, fabric_map, lpar_slot_num=3) self.assertIsNotNone(resp) self.assertEqual(vios1_orig_map_count + 2, len(vios_wrap.vfc_mappings)) # Verify the update is now found. maps = vfc_mapper.find_maps(vios_wrap.vfc_mappings, self.lpar_uuid, port_map=fabric_map) self.assertEqual(1, len(maps)) self.assertEqual(3, maps[0].client_adapter.lpar_slot_num) pypowervm-1.1.24/pypowervm/tests/tasks/test_partition.py0000664000175000017500000003736213571367171023244 0ustar neoneo00000000000000# Copyright 2015, 2018 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for pypowervm.tasks.partition.""" import mock import testtools import pypowervm.const as c import pypowervm.entities as ent import pypowervm.exceptions as ex import pypowervm.tasks.partition as tpar import pypowervm.tests.tasks.util as tju import pypowervm.tests.test_fixtures as fx import pypowervm.tests.test_utils.test_wrapper_abc as twrap import pypowervm.wrappers.base_partition as bp import pypowervm.wrappers.logical_partition as lpar import pypowervm.wrappers.virtual_io_server as vios LPAR_FEED_WITH_MGMT = 'lpar.txt' VIO_FEED_WITH_MGMT = 'fake_vios_feed.txt' LPAR_FEED_NO_MGMT = 'lpar_ibmi.txt' VIO_FEED_NO_MGMT = 'fake_vios_feed2.txt' def mock_vios(name, state, rmc_state, is_mgmt=False, uptime=3601): ret = mock.Mock() ret.configure_mock(name=name, state=state, rmc_state=rmc_state, is_mgmt_partition=is_mgmt, uptime=uptime) return ret class TestPartition(testtools.TestCase): def setUp(self): super(TestPartition, self).setUp() self.adpt = self.useFixture( fx.AdapterFx(traits=fx.RemotePVMTraits)).adpt self.mgmt_vio = tju.load_file(VIO_FEED_WITH_MGMT, self.adpt) self.mgmt_lpar = tju.load_file(LPAR_FEED_WITH_MGMT, self.adpt) self.nomgmt_vio = tju.load_file(VIO_FEED_NO_MGMT, self.adpt) self.nomgmt_lpar = tju.load_file(LPAR_FEED_NO_MGMT, self.adpt) def test_get_mgmt_lpar(self): "Happy path where the LPAR is the mgmt VM is a LPAR." self.adpt.read.side_effect = [self.nomgmt_vio, self.mgmt_lpar] mgmt_w = tpar.get_mgmt_partition(self.adpt) self.assertTrue(mgmt_w.is_mgmt_partition) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', mgmt_w.uuid) self.assertIsInstance(mgmt_w, lpar.LPAR) self.assertEqual(2, self.adpt.read.call_count) def test_get_mgmt_vio(self): "Happy path where the LPAR is the mgmt VM is a VIOS." self.adpt.read.side_effect = [self.mgmt_vio, self.nomgmt_lpar] mgmt_w = tpar.get_mgmt_partition(self.adpt) self.assertTrue(mgmt_w.is_mgmt_partition) self.assertEqual('7DBBE705-E4C4-4458-8223-3EBE07015CA9', mgmt_w.uuid) self.assertIsInstance(mgmt_w, vios.VIOS) self.assertEqual(1, self.adpt.read.call_count) def test_get_mgmt_none(self): """Failure path with no mgmt VMs.""" self.adpt.read.side_effect = [self.nomgmt_lpar, self.nomgmt_vio] self.assertRaises(ex.ManagementPartitionNotFoundException, tpar.get_mgmt_partition, self.adpt) @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.search') @mock.patch('pypowervm.wrappers.logical_partition.LPAR.search') @mock.patch('pypowervm.util.my_partition_id') def test_get_me(self, mock_my_id, mock_lp_search, mock_vio_search): """Test get_this_partition().""" # Good path - one hit on LPAR mock_lp_search.return_value = [lpar.LPAR.wrap(self.mgmt_lpar)[0]] mock_vio_search.return_value = [] mock_my_id.return_value = 9 my_w = tpar.get_this_partition(self.adpt) self.assertEqual(9, my_w.id) self.assertEqual('089FFB20-5D19-4A8C-BB80-13650627D985', my_w.uuid) mock_lp_search.assert_called_with(self.adpt, id=9) mock_vio_search.assert_called_with(self.adpt, id=9) # Good path - one hit on VIOS mock_lp_search.reset_mock() mock_lp_search.return_value = [] mock_vio_search.return_value = [vios.VIOS.wrap(self.mgmt_vio)[0]] mock_my_id.return_value = 2 my_w = tpar.get_this_partition(self.adpt) self.assertEqual(2, my_w.id) self.assertEqual('1300C76F-9814-4A4D-B1F0-5B69352A7DEA', my_w.uuid) mock_lp_search.assert_not_called() mock_vio_search.assert_called_with(self.adpt, id=2) # Bad path - no hits mock_lp_search.return_value = [] mock_vio_search.return_value = [] self.assertRaises(ex.ThisPartitionNotFoundException, tpar.get_this_partition, self.adpt) def test_has_physical_io(self): """test partition has physical io.""" part_w = mock.Mock(io_config=mock.Mock( io_slots=[mock.Mock(description='1 Gigabit Ethernet (UTP) 4 ' 'Port Adapter PCIE Short')])) self.assertTrue(tpar.has_physical_io(part_w)) part_w = mock.Mock(io_config=mock.Mock( io_slots=[mock.Mock(description='test Graphics 3.0 test')])) self.assertFalse(tpar.has_physical_io(part_w)) part_w = mock.Mock(io_config=mock.Mock( io_slots=[mock.Mock(description='My 3D Controller test')])) self.assertFalse(tpar.has_physical_io(part_w)) part_w = mock.Mock(io_config=mock.Mock(io_slots=[])) self.assertFalse(tpar.has_physical_io(part_w)) class TestVios(twrap.TestWrapper): file = 'fake_vios_feed2.txt' wrapper_class_to_test = vios.VIOS def setUp(self): super(TestVios, self).setUp() sleep_p = mock.patch('time.sleep') self.mock_sleep = sleep_p.start() self.addCleanup(sleep_p.stop) vioget_p = mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') self.mock_vios_get = vioget_p.start() self.addCleanup(vioget_p.stop) def test_get_active_vioses(self): self.mock_vios_get.return_value = self.entries vioses = tpar.get_active_vioses(self.adpt) self.assertEqual(1, len(vioses)) self.mock_vios_get.assert_called_once_with(self.adpt, xag=()) vio = vioses[0] self.assertEqual(bp.LPARState.RUNNING, vio.state) self.assertEqual(bp.RMCState.ACTIVE, vio.rmc_state) self.mock_vios_get.assert_called_once_with(self.adpt, xag=()) self.mock_vios_get.reset_mock() # Test with actual xag. find_min equal to the number found - works. vioses = tpar.get_active_vioses(self.adpt, xag='xaglist', find_min=1) self.assertEqual(1, len(vioses)) vio = vioses[0] self.assertEqual(bp.LPARState.RUNNING, vio.state) self.assertEqual(bp.RMCState.ACTIVE, vio.rmc_state) self.mock_vios_get.assert_called_once_with(self.adpt, xag='xaglist') # Violates find_min self.assertRaises(ex.NotEnoughActiveVioses, tpar.get_active_vioses, self.adpt, find_min=2) def test_get_active_vioses_w_vios_wraps(self): mock_vios1 = mock_vios('vios1', 'running', 'active') mock_vios2 = mock_vios('vios2', 'running', 'inactive') mock_vios3 = mock_vios('mgmt', 'running', 'inactive', is_mgmt=True) vios_wraps = [mock_vios1, mock_vios2, mock_vios3] vioses = tpar.get_active_vioses(self.adpt, vios_wraps=vios_wraps) self.assertEqual(2, len(vioses)) self.mock_vios_get.assert_not_called() # The first should be the mgmt partition vio = vioses[0] self.assertEqual(bp.LPARState.RUNNING, vio.state) self.assertEqual(bp.RMCState.INACTIVE, vio.rmc_state) # The second should be the active one vio = vioses[1] self.assertEqual(bp.LPARState.RUNNING, vio.state) self.assertEqual(bp.RMCState.ACTIVE, vio.rmc_state) self.mock_vios_get.assert_not_called() def test_get_physical_wwpns(self): self.mock_vios_get.return_value = self.entries expected = {'21000024FF649104'} result = set(tpar.get_physical_wwpns(self.adpt)) self.assertSetEqual(expected, result) self.mock_vios_get.assert_called_once_with( self.adpt, xag=[c.XAG.VIO_STOR]) # Test caching self.mock_vios_get.reset_mock() result = set(tpar.get_physical_wwpns(self.adpt, force_refresh=False)) self.assertSetEqual(expected, result) self.mock_vios_get.assert_not_called() # Test force_refresh result = set(tpar.get_physical_wwpns(self.adpt, force_refresh=True)) self.assertSetEqual(expected, result) self.mock_vios_get.assert_called_once_with( self.adpt, xag=[c.XAG.VIO_STOR]) @mock.patch('pypowervm.tasks.partition.get_active_vioses') @mock.patch('pypowervm.utils.transaction.FeedTask') def test_build_active_vio_feed_task(self, mock_feed_task, mock_get_active_vioses): mock_get_active_vioses.return_value = ['vios1', 'vios2'] mock_feed_task.return_value = 'mock_feed' self.assertEqual('mock_feed', tpar.build_active_vio_feed_task('adpt')) mock_get_active_vioses.assert_called_once_with( 'adpt', xag=(c.XAG.VIO_STOR, c.XAG.VIO_SMAP, c.XAG.VIO_FMAP), find_min=1) @mock.patch('pypowervm.tasks.partition.get_active_vioses') def test_build_tx_feed_task_w_empty_feed(self, mock_get_active_vioses): mock_get_active_vioses.return_value = [] self.assertRaises( ex.FeedTaskEmptyFeed, tpar.build_active_vio_feed_task, mock.MagicMock()) def _mk_mock_vioses(self): # No mock_vios1 = mock_vios('vios1', bp.LPARState.NOT_ACTIVATED, bp.RMCState.INACTIVE) # No mock_vios2 = mock_vios('vios2', bp.LPARState.RUNNING, bp.RMCState.BUSY) # Yes mock_vios3 = mock_vios('vios3', bp.LPARState.RUNNING, bp.RMCState.UNKNOWN) # No mock_vios4 = mock_vios('vios4', bp.LPARState.UNKNOWN, bp.RMCState.ACTIVE) # No mock_vios5 = mock_vios('vios5', bp.LPARState.RUNNING, bp.RMCState.ACTIVE) # Yes mock_vios6 = mock_vios('vios6', bp.LPARState.RUNNING, bp.RMCState.INACTIVE) # No mock_vios7 = mock_vios('vios7', bp.LPARState.RUNNING, bp.RMCState.INACTIVE, is_mgmt=True) return [mock_vios1, mock_vios2, mock_vios3, mock_vios4, mock_vios5, mock_vios6, mock_vios7] @mock.patch('pypowervm.tasks.partition.LOG.warning') def test_timeout_short(self, mock_warn): """Short timeout because relevant VIOSes have been up a while.""" self.mock_vios_get.return_value = self._mk_mock_vioses() tpar.validate_vios_ready('adap') # We slept 120s, (24 x 5s) because all VIOSes have been up >1h self.assertEqual(24, self.mock_sleep.call_count) self.mock_sleep.assert_called_with(5) # We wound up with rmc_down_vioses mock_warn.assert_called_once_with(mock.ANY, {'time': 120, 'vioses': 'vios3, vios6'}) # We didn't raise - because some VIOSes were okay. @mock.patch('pypowervm.tasks.partition.LOG.warning') def test_rmc_down_vioses(self, mock_warn): """Time out waiting for up/inactive partitions, but succeed.""" vioses = self._mk_mock_vioses() # This one booted "recently" vioses[5].uptime = 3559 self.mock_vios_get.return_value = vioses tpar.validate_vios_ready('adap') # We slept 600s, (120 x 5s) because one VIOS booted "recently" self.assertEqual(120, self.mock_sleep.call_count) self.mock_sleep.assert_called_with(5) # We wound up with rmc_down_vioses mock_warn.assert_called_once_with(mock.ANY, {'time': 600, 'vioses': 'vios3, vios6'}) # We didn't raise - because some VIOSes were okay. @mock.patch('pypowervm.tasks.partition.LOG.warning') def test_no_vioses(self, mock_warn): """In the (highly unusual) case of no VIOSes, no warning, but raise.""" self.mock_vios_get.return_value = [] self.assertRaises(ex.ViosNotAvailable, tpar.validate_vios_ready, 'adp') mock_warn.assert_not_called() @mock.patch('pypowervm.tasks.partition.LOG.warning') def test_max_wait_on_exception(self, mock_warn): """VIOS.get raises repeatedly until max_wait_time is exceeded.""" self.mock_vios_get.side_effect = ValueError('foo') self.assertRaises(ex.ViosNotAvailable, tpar.validate_vios_ready, 'adp', 10) self.assertEqual(mock_warn.call_count, 3) @mock.patch('pypowervm.tasks.partition.LOG.warning') def test_exception_and_good_path(self, mock_warn): """VIOS.get raises, then succeeds with some halfsies, then succeeds.""" vios1_good = mock_vios('vios1', bp.LPARState.RUNNING, bp.RMCState.BUSY) vios2_bad = mock_vios('vios2', bp.LPARState.RUNNING, bp.RMCState.UNKNOWN) vios2_good = mock_vios('vios2', bp.LPARState.RUNNING, bp.RMCState.ACTIVE) self.mock_vios_get.side_effect = (ValueError('foo'), [vios1_good, vios2_bad], [vios1_good, vios2_good]) tpar.validate_vios_ready('adap') self.assertEqual(3, self.mock_vios_get.call_count) self.assertEqual(2, self.mock_sleep.call_count) mock_warn.assert_called_once_with(mock.ANY) @mock.patch('pypowervm.tasks.partition.get_mgmt_partition') @mock.patch('pypowervm.wrappers.logical_partition.LPAR.get') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get') def test_get_partitions(self, mock_vio_get, mock_lpar_get, mock_mgmt_get): adpt = mock.Mock() # Test with the MGMT as a VIOS mgmt = mock.Mock(uuid='1') vioses = [mock.Mock(uuid='2'), mgmt] lpars = [mock.Mock(uuid='3'), mock.Mock(uuid='4')] mock_mgmt_get.return_value = mgmt mock_vio_get.return_value = vioses mock_lpar_get.return_value = lpars # Basic case self.assertEqual(vioses + lpars, tpar.get_partitions(adpt)) # Different permutations self.assertEqual(lpars + [mgmt], tpar.get_partitions( adpt, vioses=False, mgmt=True)) self.assertEqual(vioses, tpar.get_partitions( adpt, lpars=False, mgmt=True)) # Now test with the MGMT as a LPAR vioses = [mock.Mock(uuid='2')] lpars = [mock.Mock(uuid='3'), mock.Mock(uuid='4'), mgmt] mock_vio_get.return_value = vioses mock_lpar_get.return_value = lpars # Basic case self.assertEqual(vioses + lpars, tpar.get_partitions(adpt)) # Different permutations self.assertEqual(lpars, tpar.get_partitions( adpt, vioses=False, mgmt=True)) self.assertEqual(vioses + [mgmt], tpar.get_partitions( adpt, lpars=False, mgmt=True)) @mock.patch('pypowervm.wrappers.job.Job.run_job') def test_clone_uuid(self, mock_run_job): mock_resp = mock.MagicMock() mock_resp.entry = ent.Entry( {}, ent.Element('Dummy', self.adpt), self.adpt) self.adpt.read.side_effect = [mock_resp] mock_run_job.side_effect = tju.get_parm_checker( self, '1234', [('targetLparName', 'abc')]) tpar.clone_uuid(self.adpt, '1234', 'abc') self.adpt.read.assert_called_once_with('LogicalPartition', root_id='1234', suffix_type='do', suffix_parm='CloneUUID') pypowervm-1.1.24/setup.cfg0000664000175000017500000000246113571367172015075 0ustar neoneo00000000000000[metadata] name = pypowervm summary = Python binding for the PowerVM REST API description-file = README.rst author = IBM author-email = kyleh@us.ibm.com,thorst@us.ibm.com,efried@us.ibm.com,clbush@us.ibm.com home-page = http://github.com/powervm/ classifier = Development Status :: 5 - Production/Stable Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] packages = pypowervm [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = pypowervm/locale domain = pypowervm [init_catalog] domain = pypowervm output_dir = pypowervm/locale input_file = pypowervm/locale/pypowervm.pot [update_catalog] domain = pypowervm output_dir = pypowervm/locale input_file = pypowervm/locale/pypowervm.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = pypowervm/locale/pypowervm.pot [bdist_wheel] universal = 1 [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 pypowervm-1.1.24/README.rst0000664000175000017500000001504213571367171014741 0ustar neoneo00000000000000========================================== pypowervm - Python API wrapper for PowerVM ========================================== NOTE ---- Current versions should utilize the local authentication mechanism. The remote authentication mechanism is intended only for development and test purposes for the time being. Overview -------- pypowervm provides a Python-based API wrapper for interaction with IBM PowerVM-based systems. License ------- The library's license can be found in the LICENSE_ file. It must be reviewed prior to use. .. _LICENSE: LICENSE Project Structure ----------------- - ``debian/``: Debian packaging metadata and controls. - ``pypowervm/``: Project source code. - ``helpers/``: Decorator methods suitable for passing to the ``helpers`` parameter of the ``pypowervm.adapter.Adapter`` initializer. - ``locale/``: Translated message files for internationalization (I18N). - ``tasks/``: Modules for performing complex tasks on PowerVM objects. - ``monitor/``: Modules for tasks specific to the PowerVM Performance and Capacity Monitoring (PCM) API. - ``tests/``: Functional and unit tests. The directory and file structure mirrors that of the project code. For example, tests for module ``pypowervm/wrappers/logical_partition.py`` can be found in ``pypowervm/tests/wrappers/test_logical_partition.py``. - ``data/``: Data files used by test cases. These are generally XML dumps obtained from real PowerVM REST API servers, often via the utilities found in ``pypowervm/tests/test_utils/``. - ``helpers/``: Tests for modules under ``pypowervm/helpers/``. - ``locale/``: Directory structure containing sample internationalization (I18N) files for I18N testing. - ``tasks/``: Tests for modules under ``pypowervm/tasks/``. - ``monitor/``: Tests for modules under ``pypowervm/tasks/monitor/``. - ``test_utils/``: Utilities useful for test development and implementation. - ``utils/``: Tests for modules under ``pypowervm/utils/``. - ``wrappers/``: Tests for modules under ``pypowervm/wrappers/``. - ``pcm/``: Tests for modules under ``pypowervm/wrappers/pcm/``. - ``utils/``: Common helper utilities. - ``wrappers/``: Modules presenting intuitive hierarchical views and controls on PowerVM REST objects. Simple operations involving getting or setting single, independent attributes on an object are handled by the wrappers defined here. - ``pcm/``: Wrapper modules specific to the PowerVM Performance and Capacity Monitoring (PCM) API. Using Sonar ----------- To enable sonar code scans through tox there are a few steps involved. - Install sonar locally. See: http://www.sonarqube.org/downloads/ - Create a host mapping in /etc/hosts for the name 'sonar-server'. If the sonar server were on the local host then the entry might be:: 127.0.0.1 sonar-server Alternatively, you can set the environment variable SONAR_SERVER prior to invoking tox, to specify the server to use. - The following environment variable must be set in order to log onto the sonar server:: SONAR_USER SONAR_PASSWORD An example invocation:: # SONAR_USER=user SONAR_PASSWORD=password tox -e sonar - Sonar output is placed in:: .sonar/ Developer Notes --------------- - The property ``pypowervm.base_partition.IOSlot.adapter`` is deprecated and will be removed no sooner than January 1st, 2017. It has been replaced by the ``pypowervm.base_partition.IOSlot.io_adapter`` property. Removal will break compatibility with PowerVC 1.3.0.0 and 1.3.0.1. The issue is resolved as of PowerVC 1.3.0.2. - The ``xag`` argument to the ``pypowervm.wrappers.entry_wrapper.EntryWrapper.update`` method is deprecated and will be removed no sooner than January 1st, 2017. - The ``xags`` member of the ``pypowervm.wrappers.virtual_io_server.VIOS`` class is deprecated and will be removed no sooner than January 1st, 2017. Please use the members of ``pypowervm.const.XAG`` instead. - Remote Restart in a NovaLink environment is handled by the consuming management layer, not by NovaLink itself. As such, the properties ``rr_enabled`` and ``rr_state`` of ``pypowervm.wrappers.logical_partition.LPAR`` should not be used. These properties are now deprecated and will be removed no sooner than January 1st, 2017. Use the ``srr_enabled`` property instead. - The method ``pypowervm.tasks.storage.crt_lu_linked_clone`` is deprecated and will be removed no sooner than January 1st, 2017. You should now use the ``pypowervm.tasks.storage.crt_lu`` method to create a linked clone by passing the source image LU wrapper via the ``clone`` parameter. - The Adapter cache is removed as of release 1.0.0.4. Attempting to create an Adapter with ``use_cache=True`` will result in a ``CacheNotSupportedException``. - The property ``pypowervm.wrappers.managed_system.IOSlot.pci_sub_dev_id`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.pci_subsys_dev_id`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.pci_revision_id`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.pci_rev_id`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.pci_sub_vendor_id`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.pci_subsys_vendor_id`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.dyn_reconfig_conn_index`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.drc_index`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.dyn_reconfig_conn_name`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.drc_name`` property. - Passing an arbitrary dictionary into the add_parms argument of ``pypowervm.tasks.power.power_on`` and ``power_off`` is deprecated. Consumers should migrate to using ``pypowervm.tasks.power_opts.PowerOnOpts`` and ``PowerOffOpts`` instead. - The ``pypowervm.tasks.power.power_off`` method is deprecated and will be removed no sooner than January 1st, 2019. Consumers should migrate to using ``pypowervm.tasks.power.PowerOp.stop`` for single power-off; or ``pypowervm.tasks.power.power_off_progressive`` for soft-retry flows. pypowervm-1.1.24/LICENSE0000664000175000017500000002363713571367171014270 0ustar neoneo00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. pypowervm-1.1.24/.pylint.rc0000664000175000017500000000207313571367171015175 0ustar neoneo00000000000000[Messages Control] # C0111: Don't require docstrings on every method # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. disable=C0111,W0511,W0142,W0622 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=([a-z_][a-z0-9_]{2,50}|setUp|tearDown)$ # Module names matching nova-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 [Variables] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ pypowervm-1.1.24/requirements.txt0000664000175000017500000000101413571367171016530 0ustar neoneo00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. lxml!=3.7.0,>=3.4.1 # BSD oslo.concurrency>=3.8.0 # Apache-2.0 oslo.context>=2.12.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=3.11.0 # Apache-2.0 oslo.utils>=3.20.0 # Apache-2.0 pbr>=2.0.0 # Apache-2.0 pyasn1-modules # BSD pyasn1 # BSD pytz>=2013.6 # MIT requests!=2.12.2,!=2.13.0,>=2.10.0 # Apache-2.0 six>=1.9.0 # MIT futures>=3.0;python_version=='2.7' or python_version=='2.6' # BSD taskflow>=2.16.0 # Apache-2.0 pypowervm-1.1.24/rpm/0000775000175000017500000000000013571367172014047 5ustar neoneo00000000000000pypowervm-1.1.24/rpm/pypowervm.spec0000664000175000017500000000337513571367171017002 0ustar neoneo00000000000000# Spec file for pypowervm package Summary: Python API wrapper for PowerVM Name: pypowervm BuildArch: noarch Version: %{_pvm_version} Release: %{_pvm_release} Group: Applications/System License: IBM Corp. Packager: IBM URL: http://github.com/powervm/pypowervm Vendor: IBM Corp. Requires: python-lxml >= 3.4.1 Requires: python-oslo-i18n >= 1.2.0 Requires: python-oslo-log >= 1.0.0 Requires: python-oslo-utils >= 1.2.0 Requires: python-pbr >= 0.5.21 Requires: python-pyasn1-modules >= 0.0.5 Requires: python-pyasn1 >= 0.0.12a Requires: python-requests >= 2.3.0 Requires: python-six >= 1.7.0 Requires: python-oslo-concurrency >= 0.3.0 Requires: pytz Requires: python-futures Requires: python-taskflow >= 0.7.1 Requires: python-oslo-context %description Python API wrapper for PowerVM %build # Build logic taken from debian/rules file. site-packages directory is used for RHEL PYVERSION=python$(python -c 'import sys; print("%s.%s" % (sys.version_info[0], sys.version_info[1]))') python setup.py clean -a mkdir -p $RPM_BUILD_ROOT/usr/lib/$PYVERSION/site-packages/ python setup.py install --no-compile --root=$RPM_BUILD_ROOT --install-lib=/usr/lib/$PYVERSION/site-packages/ --install-scripts=/usr/lib/$PYVERSION/site-packages/ find $RPM_BUILD_ROOT/usr/lib/$PYVERSION/site-packages -type f -name "*.pyc" -delete for lc in $(ls -d pypowervm/locale/*/ | cut -f3 -d'/'); do mkdir -p $RPM_BUILD_ROOT/usr/share/locale/$lc/LC_MESSAGES python setup.py compile_catalog -f --input-file $RPM_SOURCE_DIR/pypowervm/locale/$lc/pypowervm.po --output-file $RPM_BUILD_ROOT/usr/share/locale/$lc/LC_MESSAGES/pypowervm.mo done %files %attr (755, root, root) /usr/lib/python*/site-packages/* %attr (744, root, root) /usr/share/locale/*/LC_MESSAGES/* %clean echo "Do NOT clean the buildroot directory" pypowervm-1.1.24/babel.cfg0000664000175000017500000000002113571367171014767 0ustar neoneo00000000000000[python: **.py] pypowervm-1.1.24/HACKING.rst0000664000175000017500000000145313571367171015051 0ustar neoneo00000000000000PyPowerVM Style Commandments =============================== We generally follow the guidelines set out by the OpenStack community. We've found them helpful in our development of PyPowerVM. - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on PyPowerVM Specific Commandments ---------------------------------- - [P301] LOG.warn() is not allowed. Use LOG.warning() Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. pypowervm-1.1.24/sonar-project.properties0000664000175000017500000000313713571367171020160 0ustar neoneo00000000000000# Required metadata sonar.projectKey=com.ibm.powervm:pypowervm sonar.projectName=pypowervm sonar.projectVersion=1.0 # Comma-separated paths to directories with sources (required) sonar.sources=. # Language sonar.language=py # Encoding of the source files sonar.sourceEncoding=UTF-8 # Disable certain rules for test files sonar.issue.ignore.multicriteria=e1,e2,e3,e4 # Access to a protected member of a client class sonar.issue.ignore.multicriteria.e1.ruleKey=Pylint:W0212 sonar.issue.ignore.multicriteria.e1.resourceKey=pypowervm/tests/** # Access to a protected member of a client class # For wrapper creation, we use @classmethods that access private methods to set # initial values. Some day we may figure out a different way to do this. # Until then, disable this rule for all wrappers. sonar.issue.ignore.multicriteria.e2.ruleKey=Pylint:W0212 sonar.issue.ignore.multicriteria.e2.resourceKey=pypowervm/wrappers/** # Abstract method is not overridden # This is to work around a sonar limitation. _LUBase is itself abstract, so # shouldn't need to implement an abstract method. LU and LUEnt both inherit # from concrete classes which implement the abstract method, but those are # second in the MRO, so sonar isn't picking them up. sonar.issue.ignore.multicriteria.e3.ruleKey=Pylint:W0223 sonar.issue.ignore.multicriteria.e3.resourceKey=pypowervm/wrappers/storage.py # Function names should comply with a naming convention # Allow retry module to use const-ish method names for delay_func sonar.issue.ignore.multicriteria.e4.ruleKey=python:S1542 sonar.issue.ignore.multicriteria.e4.resourceKey=pypowervm/utils/retry.py pypowervm-1.1.24/ChangeLog0000664000175000017500000000006513571367172015024 0ustar neoneo00000000000000CHANGES ======= * Fix reconnet to console exception pypowervm-1.1.24/AUTHORS0000664000175000017500000000010513571367172014315 0ustar neoneo00000000000000Jenkins user vijarad1 pypowervm-1.1.24/PKG-INFO0000664000175000017500000002122613571367172014351 0ustar neoneo00000000000000Metadata-Version: 1.1 Name: pypowervm Version: 1.1.24 Summary: Python binding for the PowerVM REST API Home-page: http://github.com/powervm/ Author: IBM Author-email: kyleh@us.ibm.com,thorst@us.ibm.com,efried@us.ibm.com,clbush@us.ibm.com License: UNKNOWN Description: ========================================== pypowervm - Python API wrapper for PowerVM ========================================== NOTE ---- Current versions should utilize the local authentication mechanism. The remote authentication mechanism is intended only for development and test purposes for the time being. Overview -------- pypowervm provides a Python-based API wrapper for interaction with IBM PowerVM-based systems. License ------- The library's license can be found in the LICENSE_ file. It must be reviewed prior to use. .. _LICENSE: LICENSE Project Structure ----------------- - ``debian/``: Debian packaging metadata and controls. - ``pypowervm/``: Project source code. - ``helpers/``: Decorator methods suitable for passing to the ``helpers`` parameter of the ``pypowervm.adapter.Adapter`` initializer. - ``locale/``: Translated message files for internationalization (I18N). - ``tasks/``: Modules for performing complex tasks on PowerVM objects. - ``monitor/``: Modules for tasks specific to the PowerVM Performance and Capacity Monitoring (PCM) API. - ``tests/``: Functional and unit tests. The directory and file structure mirrors that of the project code. For example, tests for module ``pypowervm/wrappers/logical_partition.py`` can be found in ``pypowervm/tests/wrappers/test_logical_partition.py``. - ``data/``: Data files used by test cases. These are generally XML dumps obtained from real PowerVM REST API servers, often via the utilities found in ``pypowervm/tests/test_utils/``. - ``helpers/``: Tests for modules under ``pypowervm/helpers/``. - ``locale/``: Directory structure containing sample internationalization (I18N) files for I18N testing. - ``tasks/``: Tests for modules under ``pypowervm/tasks/``. - ``monitor/``: Tests for modules under ``pypowervm/tasks/monitor/``. - ``test_utils/``: Utilities useful for test development and implementation. - ``utils/``: Tests for modules under ``pypowervm/utils/``. - ``wrappers/``: Tests for modules under ``pypowervm/wrappers/``. - ``pcm/``: Tests for modules under ``pypowervm/wrappers/pcm/``. - ``utils/``: Common helper utilities. - ``wrappers/``: Modules presenting intuitive hierarchical views and controls on PowerVM REST objects. Simple operations involving getting or setting single, independent attributes on an object are handled by the wrappers defined here. - ``pcm/``: Wrapper modules specific to the PowerVM Performance and Capacity Monitoring (PCM) API. Using Sonar ----------- To enable sonar code scans through tox there are a few steps involved. - Install sonar locally. See: http://www.sonarqube.org/downloads/ - Create a host mapping in /etc/hosts for the name 'sonar-server'. If the sonar server were on the local host then the entry might be:: 127.0.0.1 sonar-server Alternatively, you can set the environment variable SONAR_SERVER prior to invoking tox, to specify the server to use. - The following environment variable must be set in order to log onto the sonar server:: SONAR_USER SONAR_PASSWORD An example invocation:: # SONAR_USER=user SONAR_PASSWORD=password tox -e sonar - Sonar output is placed in:: .sonar/ Developer Notes --------------- - The property ``pypowervm.base_partition.IOSlot.adapter`` is deprecated and will be removed no sooner than January 1st, 2017. It has been replaced by the ``pypowervm.base_partition.IOSlot.io_adapter`` property. Removal will break compatibility with PowerVC 1.3.0.0 and 1.3.0.1. The issue is resolved as of PowerVC 1.3.0.2. - The ``xag`` argument to the ``pypowervm.wrappers.entry_wrapper.EntryWrapper.update`` method is deprecated and will be removed no sooner than January 1st, 2017. - The ``xags`` member of the ``pypowervm.wrappers.virtual_io_server.VIOS`` class is deprecated and will be removed no sooner than January 1st, 2017. Please use the members of ``pypowervm.const.XAG`` instead. - Remote Restart in a NovaLink environment is handled by the consuming management layer, not by NovaLink itself. As such, the properties ``rr_enabled`` and ``rr_state`` of ``pypowervm.wrappers.logical_partition.LPAR`` should not be used. These properties are now deprecated and will be removed no sooner than January 1st, 2017. Use the ``srr_enabled`` property instead. - The method ``pypowervm.tasks.storage.crt_lu_linked_clone`` is deprecated and will be removed no sooner than January 1st, 2017. You should now use the ``pypowervm.tasks.storage.crt_lu`` method to create a linked clone by passing the source image LU wrapper via the ``clone`` parameter. - The Adapter cache is removed as of release 1.0.0.4. Attempting to create an Adapter with ``use_cache=True`` will result in a ``CacheNotSupportedException``. - The property ``pypowervm.wrappers.managed_system.IOSlot.pci_sub_dev_id`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.pci_subsys_dev_id`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.pci_revision_id`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.pci_rev_id`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.pci_sub_vendor_id`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.pci_subsys_vendor_id`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.dyn_reconfig_conn_index`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.drc_index`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.dyn_reconfig_conn_name`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.drc_name`` property. - Passing an arbitrary dictionary into the add_parms argument of ``pypowervm.tasks.power.power_on`` and ``power_off`` is deprecated. Consumers should migrate to using ``pypowervm.tasks.power_opts.PowerOnOpts`` and ``PowerOffOpts`` instead. - The ``pypowervm.tasks.power.power_off`` method is deprecated and will be removed no sooner than January 1st, 2019. Consumers should migrate to using ``pypowervm.tasks.power.PowerOp.stop`` for single power-off; or ``pypowervm.tasks.power.power_off_progressive`` for soft-retry flows. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 pypowervm-1.1.24/test-requirements.txt0000664000175000017500000000063513571367171017515 0ustar neoneo00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. hacking!=0.13.0,<0.14,>=0.12.0 coverage>=4.0 # Apache-2.0 discover fixtures>=3.0.0 # Apache-2.0/BSD pylint==1.4.5 # GPLv2 python-subunit>=0.0.18 # Apache-2.0/BSD sphinx>=1.5.1 # BSD oslosphinx>=4.7.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT mock>=2.0 # BSD pypowervm-1.1.24/debian/0000775000175000017500000000000013571367172014473 5ustar neoneo00000000000000pypowervm-1.1.24/debian/changelog0000664000175000017500000000023013571367171016337 0ustar neoneo00000000000000pypowervm (0.0.1-beta) unstable; urgency=low * Initial Debian package creation. -- Minh Nguyen Wed, 13 May 2015 15:53:15 -0500 pypowervm-1.1.24/debian/control0000664000175000017500000000134513571367171016100 0ustar neoneo00000000000000Source: pypowervm Section: python Priority: optional Maintainer: Eric P. Fried Build-Depends: debhelper(>= 9) Standards-Version: 3.9.5 Homepage: http://github.com/powervm/pypowervm Package: pypowervm Section: python Priority: optional Architecture: all Depends: ${shlibs:Depends}, ${misc:Depends}, linuxvnc(>=0.9.9), python-lxml (>=3.4.1), python-oslo.i18n (>=1.2.0), python-oslo.log (>=1.0.0), python-oslo.utils (>=1.2.0), python-pbr (>>0.5.21), python-pyasn1-modules (>=0.0.5), python-pyasn1 (>=0.0.12a), python-requests (>=2.3.0), python-six (>=1.7.0), python-oslo.concurrency (>=0.3.0), python-tz, python-concurrent.futures, python-taskflow (>=0.7.1), python-oslo.context Description: Python API wrapper for PowerVM pypowervm-1.1.24/debian/rules0000775000175000017500000000124413571367171015553 0ustar neoneo00000000000000#!/usr/bin/make -f PACKAGE=pypowervm %: dh $@ --with python2 override_dh_auto_install: python setup.py install --root=debian/pypowervm --install-layout=deb --install-lib=/usr/lib/`pyversions -d`/dist-packages/ --install-scripts=/usr/lib/`pyversions -d`/dist-packages/ ; \ for lc in $$(ls -d pypowervm/locale/*/ | cut -f3 -d'/'); do \ mkdir -p debian/pypowervm/usr/share/locale/$$lc/LC_MESSAGES ; \ python setup.py compile_catalog -f --input-file pypowervm/locale/$$lc/pypowervm.po --output-file debian/pypowervm/usr/share/locale/$$lc/LC_MESSAGES/pypowervm.mo; \ done override_dh_clean: dh_clean override_dh_install: dh_install pypowervm-1.1.24/debian/copyright0000664000175000017500000000042213571367171016423 0ustar neoneo00000000000000Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: pypowervm Source: http://github.com/powervm/pypowervm Files: * Copyright: 2015 IBM Corporation License: Apache-2.0 Files: debian/* Copyright: 2015 IBM Corporation License: Apache-2.0 pypowervm-1.1.24/debian/docs0000664000175000017500000000006213571367171015343 0ustar neoneo00000000000000README.rst requirements.txt test-requirements.txt pypowervm-1.1.24/debian/README0000664000175000017500000000054213571367171015353 0ustar neoneo00000000000000The Debian Package pypowervm ---------------------------- Overview -------- pypowervm provides a Python based API wrapper for interaction with IBM PowerVM based systems. License ------- The library's license can be found under the LICENSE file. It must be reviewed prior to use. -- Eric P. Fried Thu, 19 Nov 2015 15:28:15 -0500 pypowervm-1.1.24/debian/compat0000664000175000017500000000000213571367171015670 0ustar neoneo000000000000009 pypowervm-1.1.24/.stestr.conf0000664000175000017500000000006113571367171015516 0ustar neoneo00000000000000[DEFAULT] test_path=./pypowervm/tests top_dir=./ pypowervm-1.1.24/tox.ini0000664000175000017500000000375513571367171014575 0ustar neoneo00000000000000[tox] minversion = 1.6 envlist = py{35,27},pep8 skipsdist = True [testenv] usedevelop = True install_command = pip install {opts} {packages} setenv = VIRTUAL_ENV={envdir} LANGUAGE=en_US LC_ALL=en_US.utf-8 OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = find sonar-runner commands = find . -type f -name "*.pyc" -delete [testenv:py27] # TODO(efried): Remove this once https://github.com/tox-dev/tox/issues/425 is fixed. basepython = python2.7 commands = {[testenv]commands} stestr run {posargs} stestr slowest [testenv:py35] # TODO(efried): Remove this once https://github.com/tox-dev/tox/issues/425 is fixed. basepython = python3.5 commands = {[testenv]commands} stestr run {posargs} stestr slowest [testenv:pep8] # TODO(efried): Remove this once https://github.com/tox-dev/tox/issues/425 is fixed. basepython = python3 commands = flake8 [testenv:venv] basepython = python3 commands = {posargs} [testenv:cover] basepython = python3 # TODO(stephenfin): Remove the PYTHON hack below in favour of a [coverage] # section once we rely on coverage 4.3+ # # https://bitbucket.org/ned/coveragepy/issues/519/ setenv = {[testenv]setenv} PYTHON=coverage run --source pypowervm --parallel-mode commands = {[testenv]commands} coverage erase stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml coverage report [flake8] ignore = exclude = .venv,.git,.tox,*egg [hacking] local-check-factory = pypowervm.hacking.checks.factory [testenv:pylint] commands = pylint pypowervm --rcfile=.pylint.rc [testenv:sonar] commands = sonar-runner -Dsonar.login={env:SONAR_USER:} -Dsonar.password={env:SONAR_PASSWORD:} -Dsonar.analysis.mode=incremental -Dsonar.scm-stats.enabled=false -Dsonar.scm.enabled=false -Dsonar.host.url=http://{env:SONAR_SERVER:sonar-server}:9000 -Dsonar.jdbc.url=jdbc:mysql://{env:SONAR_SERVER:sonar-server}:3306/sonar pypowervm-1.1.24/setup.py0000664000175000017500000000135713571367171014770 0ustar neoneo00000000000000#!/usr/bin/env python # Copyright 2014, 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools setuptools.setup( setup_requires=['pbr'], pbr=True) pypowervm-1.1.24/pypowervm.egg-info/0000775000175000017500000000000013571367172017013 5ustar neoneo00000000000000pypowervm-1.1.24/pypowervm.egg-info/not-zip-safe0000664000175000017500000000000113571367172021241 0ustar neoneo00000000000000 pypowervm-1.1.24/pypowervm.egg-info/SOURCES.txt0000664000175000017500000002264413571367172020707 0ustar neoneo00000000000000.pylint.rc .stestr.conf AUTHORS ChangeLog HACKING.rst LICENSE README.rst babel.cfg requirements.txt setup.cfg setup.py sonar-project.properties test-requirements.txt tox.ini debian/README debian/changelog debian/compat debian/control debian/copyright debian/docs debian/rules pypowervm/__init__.py pypowervm/adapter.py pypowervm/const.py pypowervm/entities.py pypowervm/exceptions.py pypowervm/i18n.py pypowervm/log.py pypowervm/traits.py pypowervm/util.py pypowervm.egg-info/PKG-INFO pypowervm.egg-info/SOURCES.txt pypowervm.egg-info/dependency_links.txt pypowervm.egg-info/not-zip-safe pypowervm.egg-info/pbr.json pypowervm.egg-info/requires.txt pypowervm.egg-info/top_level.txt pypowervm/hacking/__init__.py pypowervm/hacking/checks.py pypowervm/helpers/__init__.py pypowervm/helpers/log_helper.py pypowervm/helpers/sample_helper.py pypowervm/helpers/vios_busy.py pypowervm/locale/pypowervm.pot pypowervm/locale/de/pypowervm.po pypowervm/locale/es/pypowervm.po pypowervm/locale/fr/pypowervm.po pypowervm/locale/it/pypowervm.po pypowervm/locale/ja/pypowervm.po pypowervm/locale/ko/pypowervm.po pypowervm/locale/pt-BR/pypowervm.po pypowervm/locale/ru/pypowervm.po pypowervm/locale/zh-Hans/pypowervm.po pypowervm/locale/zh-Hant/pypowervm.po pypowervm/tasks/__init__.py pypowervm/tasks/client_storage.py pypowervm/tasks/cluster_ssp.py pypowervm/tasks/cna.py pypowervm/tasks/ibmi.py pypowervm/tasks/management_console.py pypowervm/tasks/master_mode.py pypowervm/tasks/memory.py pypowervm/tasks/migration.py pypowervm/tasks/network_bridger.py pypowervm/tasks/partition.py pypowervm/tasks/power.py pypowervm/tasks/power_opts.py pypowervm/tasks/scsi_mapper.py pypowervm/tasks/slot_map.py pypowervm/tasks/sriov.py pypowervm/tasks/storage.py pypowervm/tasks/vfc_mapper.py pypowervm/tasks/vopt.py pypowervm/tasks/vterm.py pypowervm/tasks/hdisk/__init__.py pypowervm/tasks/hdisk/_fc.py pypowervm/tasks/hdisk/_iscsi.py pypowervm/tasks/hdisk/_rbd.py pypowervm/tasks/monitor/__init__.py pypowervm/tasks/monitor/host_cpu.py pypowervm/tasks/monitor/lpar.py pypowervm/tasks/monitor/util.py pypowervm/tests/__init__.py pypowervm/tests/lib.py pypowervm/tests/test_adapter.py pypowervm/tests/test_exceptions.py pypowervm/tests/test_fixtures.py pypowervm/tests/test_helpers.py pypowervm/tests/test_i18n.py pypowervm/tests/test_session.py pypowervm/tests/test_traits.py pypowervm/tests/test_util.py pypowervm/tests/data/cdata.xml pypowervm/tests/data/cluster.txt pypowervm/tests/data/cluster_LULinkedClone_job_template.txt pypowervm/tests/data/cluster_create_job_template.txt pypowervm/tests/data/cna_feed.txt pypowervm/tests/data/cna_feed1.txt pypowervm/tests/data/enterprise_pool_feed.txt pypowervm/tests/data/enterprise_pool_member_feed.txt pypowervm/tests/data/event.xml pypowervm/tests/data/event_feed.txt pypowervm/tests/data/fake_cna.txt pypowervm/tests/data/fake_httperror.txt pypowervm/tests/data/fake_httperror_service_unavail.txt pypowervm/tests/data/fake_lpar_feed.txt pypowervm/tests/data/fake_managedsystem.txt pypowervm/tests/data/fake_network_bridge.txt pypowervm/tests/data/fake_vios.txt pypowervm/tests/data/fake_vios_feed.txt pypowervm/tests/data/fake_vios_feed2.txt pypowervm/tests/data/fake_vios_feed3.txt pypowervm/tests/data/fake_vios_feed_multi.txt pypowervm/tests/data/fake_vios_feed_no_vg.txt pypowervm/tests/data/fake_vios_hosting_vios_feed.txt pypowervm/tests/data/fake_vios_mappings.txt pypowervm/tests/data/fake_vios_ssp_npiv.txt pypowervm/tests/data/fake_vios_with_volume_group_data.txt pypowervm/tests/data/fake_virtual_network_feed.txt pypowervm/tests/data/fake_virtual_switch.txt pypowervm/tests/data/fake_volume_group.txt pypowervm/tests/data/fake_volume_group2.txt pypowervm/tests/data/fake_volume_group_no_vg.txt pypowervm/tests/data/fake_volume_group_with_vio_data.txt pypowervm/tests/data/fake_vswitch_feed.txt pypowervm/tests/data/file_feed.txt pypowervm/tests/data/get_volume_group.txt pypowervm/tests/data/get_volume_group_no_rep.txt pypowervm/tests/data/job_request_power_off.txt pypowervm/tests/data/job_response_completed_failed.txt pypowervm/tests/data/job_response_completed_ok.txt pypowervm/tests/data/job_response_exception.txt pypowervm/tests/data/logon.xml pypowervm/tests/data/logon_file.xml pypowervm/tests/data/lpar.txt pypowervm/tests/data/lpar_builder.txt pypowervm/tests/data/lpar_ibmi.txt pypowervm/tests/data/lpar_pcm_data.txt pypowervm/tests/data/lpar_sections.txt pypowervm/tests/data/ltm_feed.txt pypowervm/tests/data/ltm_feed2.txt pypowervm/tests/data/ltm_feed_lpars.txt pypowervm/tests/data/lufeed.txt pypowervm/tests/data/managedsystem.txt pypowervm/tests/data/managementconsole.txt pypowervm/tests/data/managementconsole_ssh.txt pypowervm/tests/data/nbbr_network_bridge.txt pypowervm/tests/data/nbbr_network_bridge_failover.txt pypowervm/tests/data/nbbr_network_bridge_peer.txt pypowervm/tests/data/nbbr_virtual_network.txt pypowervm/tests/data/nbbr_virtual_switch.txt pypowervm/tests/data/pcm_pref.txt pypowervm/tests/data/phyp_pcm_data.txt pypowervm/tests/data/phyp_pcm_data2.txt pypowervm/tests/data/phyp_pcm_data3.txt pypowervm/tests/data/shrprocpool.txt pypowervm/tests/data/sriov_lp_feed.txt pypowervm/tests/data/ssp.txt pypowervm/tests/data/stm_feed.txt pypowervm/tests/data/sys_with_sriov.txt pypowervm/tests/data/tier.txt pypowervm/tests/data/token_file pypowervm/tests/data/upload_file.txt pypowervm/tests/data/upload_volgrp.txt pypowervm/tests/data/upload_volgrp2.txt pypowervm/tests/data/vfc_client_adapter_feed.txt pypowervm/tests/data/vio_multi_vscsi_mapping.txt pypowervm/tests/data/vios_pcm_data.txt pypowervm/tests/data/vios_pcm_data_sparse.txt pypowervm/tests/data/vnic_feed.txt pypowervm/tests/data/vscsibus_feed.txt pypowervm/tests/data/vswitch_feed.txt pypowervm/tests/helpers/__init__.py pypowervm/tests/helpers/test_loghelper.py pypowervm/tests/helpers/test_sample.py pypowervm/tests/helpers/test_vios_busy.py pypowervm/tests/locale/en_US/LC_MESSAGES/pypowervm.mo pypowervm/tests/locale/en_US/LC_MESSAGES/pypowervm.po pypowervm/tests/tasks/__init__.py pypowervm/tests/tasks/create_cluster.py pypowervm/tests/tasks/test_client_storage.py pypowervm/tests/tasks/test_cluster_ssp.py pypowervm/tests/tasks/test_cna.py pypowervm/tests/tasks/test_ibmi.py pypowervm/tests/tasks/test_master_mode.py pypowervm/tests/tasks/test_memory.py pypowervm/tests/tasks/test_mgmtconsole.py pypowervm/tests/tasks/test_migration.py pypowervm/tests/tasks/test_network_bridger.py pypowervm/tests/tasks/test_partition.py pypowervm/tests/tasks/test_power.py pypowervm/tests/tasks/test_power_opts.py pypowervm/tests/tasks/test_scsi_mapper.py pypowervm/tests/tasks/test_slot_map.py pypowervm/tests/tasks/test_sriov.py pypowervm/tests/tasks/test_storage.py pypowervm/tests/tasks/test_vfc_mapper.py pypowervm/tests/tasks/test_vopt.py pypowervm/tests/tasks/test_vterm.py pypowervm/tests/tasks/util.py pypowervm/tests/tasks/hdisk/__init__.py pypowervm/tests/tasks/hdisk/test_fc.py pypowervm/tests/tasks/hdisk/test_iscsi.py pypowervm/tests/tasks/hdisk/test_rbd.py pypowervm/tests/tasks/monitor/__init__.py pypowervm/tests/tasks/monitor/test_host_cpu.py pypowervm/tests/tasks/monitor/test_monitor.py pypowervm/tests/test_utils/__init__.py pypowervm/tests/test_utils/create_httpresp.py pypowervm/tests/test_utils/pvmhttp.py pypowervm/tests/test_utils/refresh_httpresp.py pypowervm/tests/test_utils/test_wrapper_abc.py pypowervm/tests/test_utils/xml_sections.py pypowervm/tests/utils/__init__.py pypowervm/tests/utils/test_lpar_bldr.py pypowervm/tests/utils/test_retry.py pypowervm/tests/utils/test_transaction.py pypowervm/tests/utils/test_uuid.py pypowervm/tests/utils/test_validation.py pypowervm/tests/wrappers/__init__.py pypowervm/tests/wrappers/test_cdata.py pypowervm/tests/wrappers/test_cluster.py pypowervm/tests/wrappers/test_enterprise_pool.py pypowervm/tests/wrappers/test_entry.py pypowervm/tests/wrappers/test_event.py pypowervm/tests/wrappers/test_http_error.py pypowervm/tests/wrappers/test_iocard.py pypowervm/tests/wrappers/test_job.py pypowervm/tests/wrappers/test_logical_partition.py pypowervm/tests/wrappers/test_managed_system.py pypowervm/tests/wrappers/test_mgmt_console.py pypowervm/tests/wrappers/test_monitor.py pypowervm/tests/wrappers/test_network.py pypowervm/tests/wrappers/test_search.py pypowervm/tests/wrappers/test_shared_proc_pool.py pypowervm/tests/wrappers/test_storage.py pypowervm/tests/wrappers/test_vios_file.py pypowervm/tests/wrappers/test_virtual_io_server.py pypowervm/tests/wrappers/test_wrapper_properties.py pypowervm/tests/wrappers/pcm/__init__.py pypowervm/tests/wrappers/pcm/test_lpar.py pypowervm/tests/wrappers/pcm/test_phyp.py pypowervm/tests/wrappers/pcm/test_vios.py pypowervm/utils/__init__.py pypowervm/utils/lpar_builder.py pypowervm/utils/retry.py pypowervm/utils/transaction.py pypowervm/utils/uuid.py pypowervm/utils/validation.py pypowervm/utils/wrappers.py pypowervm/wrappers/__init__.py pypowervm/wrappers/base_partition.py pypowervm/wrappers/cluster.py pypowervm/wrappers/enterprise_pool.py pypowervm/wrappers/entry_wrapper.py pypowervm/wrappers/event.py pypowervm/wrappers/http_error.py pypowervm/wrappers/iocard.py pypowervm/wrappers/job.py pypowervm/wrappers/logical_partition.py pypowervm/wrappers/managed_system.py pypowervm/wrappers/management_console.py pypowervm/wrappers/monitor.py pypowervm/wrappers/mtms.py pypowervm/wrappers/network.py pypowervm/wrappers/shared_proc_pool.py pypowervm/wrappers/storage.py pypowervm/wrappers/vios_file.py pypowervm/wrappers/virtual_io_server.py pypowervm/wrappers/pcm/__init__.py pypowervm/wrappers/pcm/lpar.py pypowervm/wrappers/pcm/phyp.py pypowervm/wrappers/pcm/vios.py rpm/pypowervm.specpypowervm-1.1.24/pypowervm.egg-info/pbr.json0000664000175000017500000000005713571367172020473 0ustar neoneo00000000000000{"git_version": "353cd65", "is_release": false}pypowervm-1.1.24/pypowervm.egg-info/requires.txt0000664000175000017500000000044513571367172021416 0ustar neoneo00000000000000lxml!=3.7.0,>=3.4.1 oslo.concurrency>=3.8.0 oslo.context>=2.12.0 oslo.i18n>=2.1.0 oslo.log>=3.11.0 oslo.utils>=3.20.0 pbr>=2.0.0 pyasn1-modules pyasn1 pytz>=2013.6 requests!=2.12.2,!=2.13.0,>=2.10.0 six>=1.9.0 taskflow>=2.16.0 [:(python_version=='2.7' or python_version=='2.6')] futures>=3.0 pypowervm-1.1.24/pypowervm.egg-info/PKG-INFO0000664000175000017500000002122613571367172020113 0ustar neoneo00000000000000Metadata-Version: 1.1 Name: pypowervm Version: 1.1.24 Summary: Python binding for the PowerVM REST API Home-page: http://github.com/powervm/ Author: IBM Author-email: kyleh@us.ibm.com,thorst@us.ibm.com,efried@us.ibm.com,clbush@us.ibm.com License: UNKNOWN Description: ========================================== pypowervm - Python API wrapper for PowerVM ========================================== NOTE ---- Current versions should utilize the local authentication mechanism. The remote authentication mechanism is intended only for development and test purposes for the time being. Overview -------- pypowervm provides a Python-based API wrapper for interaction with IBM PowerVM-based systems. License ------- The library's license can be found in the LICENSE_ file. It must be reviewed prior to use. .. _LICENSE: LICENSE Project Structure ----------------- - ``debian/``: Debian packaging metadata and controls. - ``pypowervm/``: Project source code. - ``helpers/``: Decorator methods suitable for passing to the ``helpers`` parameter of the ``pypowervm.adapter.Adapter`` initializer. - ``locale/``: Translated message files for internationalization (I18N). - ``tasks/``: Modules for performing complex tasks on PowerVM objects. - ``monitor/``: Modules for tasks specific to the PowerVM Performance and Capacity Monitoring (PCM) API. - ``tests/``: Functional and unit tests. The directory and file structure mirrors that of the project code. For example, tests for module ``pypowervm/wrappers/logical_partition.py`` can be found in ``pypowervm/tests/wrappers/test_logical_partition.py``. - ``data/``: Data files used by test cases. These are generally XML dumps obtained from real PowerVM REST API servers, often via the utilities found in ``pypowervm/tests/test_utils/``. - ``helpers/``: Tests for modules under ``pypowervm/helpers/``. - ``locale/``: Directory structure containing sample internationalization (I18N) files for I18N testing. - ``tasks/``: Tests for modules under ``pypowervm/tasks/``. - ``monitor/``: Tests for modules under ``pypowervm/tasks/monitor/``. - ``test_utils/``: Utilities useful for test development and implementation. - ``utils/``: Tests for modules under ``pypowervm/utils/``. - ``wrappers/``: Tests for modules under ``pypowervm/wrappers/``. - ``pcm/``: Tests for modules under ``pypowervm/wrappers/pcm/``. - ``utils/``: Common helper utilities. - ``wrappers/``: Modules presenting intuitive hierarchical views and controls on PowerVM REST objects. Simple operations involving getting or setting single, independent attributes on an object are handled by the wrappers defined here. - ``pcm/``: Wrapper modules specific to the PowerVM Performance and Capacity Monitoring (PCM) API. Using Sonar ----------- To enable sonar code scans through tox there are a few steps involved. - Install sonar locally. See: http://www.sonarqube.org/downloads/ - Create a host mapping in /etc/hosts for the name 'sonar-server'. If the sonar server were on the local host then the entry might be:: 127.0.0.1 sonar-server Alternatively, you can set the environment variable SONAR_SERVER prior to invoking tox, to specify the server to use. - The following environment variable must be set in order to log onto the sonar server:: SONAR_USER SONAR_PASSWORD An example invocation:: # SONAR_USER=user SONAR_PASSWORD=password tox -e sonar - Sonar output is placed in:: .sonar/ Developer Notes --------------- - The property ``pypowervm.base_partition.IOSlot.adapter`` is deprecated and will be removed no sooner than January 1st, 2017. It has been replaced by the ``pypowervm.base_partition.IOSlot.io_adapter`` property. Removal will break compatibility with PowerVC 1.3.0.0 and 1.3.0.1. The issue is resolved as of PowerVC 1.3.0.2. - The ``xag`` argument to the ``pypowervm.wrappers.entry_wrapper.EntryWrapper.update`` method is deprecated and will be removed no sooner than January 1st, 2017. - The ``xags`` member of the ``pypowervm.wrappers.virtual_io_server.VIOS`` class is deprecated and will be removed no sooner than January 1st, 2017. Please use the members of ``pypowervm.const.XAG`` instead. - Remote Restart in a NovaLink environment is handled by the consuming management layer, not by NovaLink itself. As such, the properties ``rr_enabled`` and ``rr_state`` of ``pypowervm.wrappers.logical_partition.LPAR`` should not be used. These properties are now deprecated and will be removed no sooner than January 1st, 2017. Use the ``srr_enabled`` property instead. - The method ``pypowervm.tasks.storage.crt_lu_linked_clone`` is deprecated and will be removed no sooner than January 1st, 2017. You should now use the ``pypowervm.tasks.storage.crt_lu`` method to create a linked clone by passing the source image LU wrapper via the ``clone`` parameter. - The Adapter cache is removed as of release 1.0.0.4. Attempting to create an Adapter with ``use_cache=True`` will result in a ``CacheNotSupportedException``. - The property ``pypowervm.wrappers.managed_system.IOSlot.pci_sub_dev_id`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.pci_subsys_dev_id`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.pci_revision_id`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.pci_rev_id`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.pci_sub_vendor_id`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.pci_subsys_vendor_id`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.dyn_reconfig_conn_index`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.drc_index`` property. - The property ``pypowervm.wrappers.managed_system.IOSlot.dyn_reconfig_conn_name`` is deprecated and will be removed no sooner than January 1st, 2019. It has been replaced by the ``pypowervm.wrappers.managed_system.IOSlot.drc_name`` property. - Passing an arbitrary dictionary into the add_parms argument of ``pypowervm.tasks.power.power_on`` and ``power_off`` is deprecated. Consumers should migrate to using ``pypowervm.tasks.power_opts.PowerOnOpts`` and ``PowerOffOpts`` instead. - The ``pypowervm.tasks.power.power_off`` method is deprecated and will be removed no sooner than January 1st, 2019. Consumers should migrate to using ``pypowervm.tasks.power.PowerOp.stop`` for single power-off; or ``pypowervm.tasks.power.power_off_progressive`` for soft-retry flows. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 pypowervm-1.1.24/pypowervm.egg-info/top_level.txt0000664000175000017500000000001213571367172021536 0ustar neoneo00000000000000pypowervm pypowervm-1.1.24/pypowervm.egg-info/dependency_links.txt0000664000175000017500000000000113571367172023061 0ustar neoneo00000000000000