ironic-inspector-7.2.0/0000775000175100017510000000000013241324014015025 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/0000775000175100017510000000000013241324014020376 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/main.py0000666000175100017510000002724113241323457021716 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import flask from oslo_utils import uuidutils import six import werkzeug from ironic_inspector import api_tools from ironic_inspector.common import context from ironic_inspector.common.i18n import _ from ironic_inspector.common import ironic as ir_utils from ironic_inspector.common import swift import ironic_inspector.conf from ironic_inspector.conf import opts as conf_opts from ironic_inspector import introspect from ironic_inspector import node_cache from ironic_inspector import process from ironic_inspector import rules from ironic_inspector import utils CONF = ironic_inspector.conf.CONF app = flask.Flask(__name__) LOG = utils.getProcessingLogger(__name__) MINIMUM_API_VERSION = (1, 0) CURRENT_API_VERSION = (1, 12) DEFAULT_API_VERSION = CURRENT_API_VERSION _LOGGING_EXCLUDED_KEYS = ('logs',) def _get_version(): ver = flask.request.headers.get(conf_opts.VERSION_HEADER, _DEFAULT_API_VERSION) try: requested = tuple(int(x) for x in ver.split('.')) except (ValueError, TypeError): return error_response(_('Malformed API version: expected string ' 'in form of X.Y'), code=400) return requested def _format_version(ver): return '%d.%d' % ver _DEFAULT_API_VERSION = _format_version(DEFAULT_API_VERSION) def error_response(exc, code=500): res = flask.jsonify(error={'message': str(exc)}) res.status_code = code LOG.debug('Returning error to client: %s', exc) return res def convert_exceptions(func): @six.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except utils.Error as exc: return error_response(exc, exc.http_code) except werkzeug.exceptions.HTTPException as exc: return error_response(exc, exc.code or 400) except Exception as exc: LOG.exception('Internal server error') msg = _('Internal server error') if CONF.debug: msg += ' (%s): %s' % (exc.__class__.__name__, exc) return error_response(msg) return wrapper @app.before_request def check_api_version(): requested = _get_version() if requested < MINIMUM_API_VERSION or requested > CURRENT_API_VERSION: return error_response(_('Unsupported API version %(requested)s, ' 'supported range is %(min)s to %(max)s') % {'requested': _format_version(requested), 'min': _format_version(MINIMUM_API_VERSION), 'max': _format_version(CURRENT_API_VERSION)}, code=406) @app.after_request def add_version_headers(res): res.headers[conf_opts.MIN_VERSION_HEADER] = '%s.%s' % MINIMUM_API_VERSION res.headers[conf_opts.MAX_VERSION_HEADER] = '%s.%s' % CURRENT_API_VERSION return res def create_link_object(urls): links = [] for url in urls: links.append({"rel": "self", "href": os.path.join(flask.request.url_root, url)}) return links def generate_resource_data(resources): data = [] for resource in resources: item = {} item['name'] = str(resource).split('/')[-1] item['links'] = create_link_object([str(resource)[1:]]) data.append(item) return data def generate_introspection_status(node): """Return a dict representing current node status. :param node: a NodeInfo instance :return: dictionary """ started_at = node.started_at.isoformat() finished_at = node.finished_at.isoformat() if node.finished_at else None status = {} status['uuid'] = node.uuid status['finished'] = bool(node.finished_at) status['state'] = node.state status['started_at'] = started_at status['finished_at'] = finished_at status['error'] = node.error status['links'] = create_link_object( ["v%s/introspection/%s" % (CURRENT_API_VERSION[0], node.uuid)]) return status def api(path, is_public_api=False, rule=None, verb_to_rule_map=None, **flask_kwargs): """Decorator to wrap api methods. Performs flask routing, exception convertion, generation of oslo context for request and API access policy enforcement. :param path: flask app route path :param is_public_api: whether this API path should be treated as public, with minimal access enforcement :param rule: API access policy rule to enforce. If rule is None, the 'default' policy rule will be enforced, which is "deny all" if not overridden in policy confif file. :param verb_to_rule_map: if both rule and this are given, defines mapping between http verbs (uppercase) and strings to format the 'rule' string with :param kwargs: all the rest kwargs are passed to flask app.route """ def outer(func): @app.route(path, **flask_kwargs) @convert_exceptions @six.wraps(func) def wrapper(*args, **kwargs): flask.request.context = context.RequestContext.from_environ( flask.request.environ, is_public_api=is_public_api) if verb_to_rule_map and rule: policy_rule = rule.format( verb_to_rule_map[flask.request.method.upper()]) else: policy_rule = rule utils.check_auth(flask.request, rule=policy_rule) return func(*args, **kwargs) return wrapper return outer @api('/', rule='introspection', is_public_api=True, methods=['GET']) def api_root(): versions = [ { "status": "CURRENT", "id": '%s.%s' % CURRENT_API_VERSION, }, ] for version in versions: version['links'] = create_link_object( ["v%s" % version['id'].split('.')[0]]) return flask.jsonify(versions=versions) @api('/', rule='introspection:version', is_public_api=True, methods=['GET']) def version_root(version): pat = re.compile("^\/%s\/[^\/]*?$" % version) resources = [] for url in app.url_map.iter_rules(): if pat.match(str(url)): resources.append(url) if not resources: raise utils.Error(_('Version not found.'), code=404) return flask.jsonify(resources=generate_resource_data(resources)) @api('/v1/continue', rule="introspection:continue", is_public_api=True, methods=['POST']) def api_continue(): data = flask.request.get_json(force=True) if not isinstance(data, dict): raise utils.Error(_('Invalid data: expected a JSON object, got %s') % data.__class__.__name__) logged_data = {k: (v if k not in _LOGGING_EXCLUDED_KEYS else '') for k, v in data.items()} LOG.debug("Received data from the ramdisk: %s", logged_data, data=data) return flask.jsonify(process.process(data)) # TODO(sambetts) Add API discovery for this endpoint @api('/v1/introspection/', rule="introspection:{}", verb_to_rule_map={'GET': 'status', 'POST': 'start'}, methods=['GET', 'POST']) def api_introspection(node_id): if flask.request.method == 'POST': introspect.introspect(node_id, token=flask.request.headers.get('X-Auth-Token')) return '', 202 else: node_info = node_cache.get_node(node_id) return flask.json.jsonify(generate_introspection_status(node_info)) @api('/v1/introspection', rule='introspection:status', methods=['GET']) def api_introspection_statuses(): nodes = node_cache.get_node_list( marker=api_tools.marker_field(), limit=api_tools.limit_field(default=CONF.api_max_limit) ) data = { 'introspection': [generate_introspection_status(node) for node in nodes] } return flask.json.jsonify(data) @api('/v1/introspection//abort', rule="introspection:abort", methods=['POST']) def api_introspection_abort(node_id): introspect.abort(node_id, token=flask.request.headers.get('X-Auth-Token')) return '', 202 @api('/v1/introspection//data', rule="introspection:data", methods=['GET']) def api_introspection_data(node_id): if CONF.processing.store_data == 'swift': if not uuidutils.is_uuid_like(node_id): node = ir_utils.get_node(node_id, fields=['uuid']) node_id = node.uuid res = swift.get_introspection_data(node_id) return res, 200, {'Content-Type': 'application/json'} else: return error_response(_('Inspector is not configured to store data. ' 'Set the [processing] store_data ' 'configuration option to change this.'), code=404) @api('/v1/introspection//data/unprocessed', rule="introspection:reapply", methods=['POST']) def api_introspection_reapply(node_id): if flask.request.content_length: return error_response(_('User data processing is not ' 'supported yet'), code=400) if CONF.processing.store_data == 'swift': process.reapply(node_id) return '', 202 else: return error_response(_('Inspector is not configured to store' ' data. Set the [processing] ' 'store_data configuration option to ' 'change this.'), code=400) def rule_repr(rule, short): result = rule.as_dict(short=short) result['links'] = [{ 'href': flask.url_for('api_rule', uuid=result['uuid']), 'rel': 'self' }] return result @api('/v1/rules', rule="introspection:rule:{}", verb_to_rule_map={'GET': 'get', 'POST': 'create', 'DELETE': 'delete'}, methods=['GET', 'POST', 'DELETE']) def api_rules(): if flask.request.method == 'GET': res = [rule_repr(rule, short=True) for rule in rules.get_all()] return flask.jsonify(rules=res) elif flask.request.method == 'DELETE': rules.delete_all() return '', 204 else: body = flask.request.get_json(force=True) if body.get('uuid') and not uuidutils.is_uuid_like(body['uuid']): raise utils.Error(_('Invalid UUID value'), code=400) rule = rules.create(conditions_json=body.get('conditions', []), actions_json=body.get('actions', []), uuid=body.get('uuid'), description=body.get('description')) response_code = (200 if _get_version() < (1, 6) else 201) return flask.make_response( flask.jsonify(rule_repr(rule, short=False)), response_code) @api('/v1/rules/', rule="introspection:rule:{}", verb_to_rule_map={'GET': 'get', 'DELETE': 'delete'}, methods=['GET', 'DELETE']) def api_rule(uuid): if flask.request.method == 'GET': rule = rules.get(uuid) return flask.jsonify(rule_repr(rule, short=False)) else: rules.delete(uuid) return '', 204 @app.errorhandler(404) def handle_404(error): return error_response(error, code=404) ironic-inspector-7.2.0/ironic_inspector/node_cache.py0000666000175100017510000010435713241323457023046 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Cache for nodes currently under introspection.""" import collections import contextlib import copy import datetime import json from automaton import exceptions as automaton_errors from ironicclient import exceptions from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import excutils from oslo_utils import reflection from oslo_utils import timeutils from oslo_utils import uuidutils import six from sqlalchemy.orm import exc as orm_errors from sqlalchemy import text from ironic_inspector.common.i18n import _ from ironic_inspector.common import ironic as ir_utils from ironic_inspector import db from ironic_inspector import introspection_state as istate from ironic_inspector import utils CONF = cfg.CONF LOG = utils.getProcessingLogger(__name__) MACS_ATTRIBUTE = 'mac' _LOCK_TEMPLATE = 'node-%s' _SEMAPHORES = lockutils.Semaphores() def _get_lock(uuid): """Get lock object for a given node UUID.""" return lockutils.internal_lock(_LOCK_TEMPLATE % uuid, semaphores=_SEMAPHORES) def _get_lock_ctx(uuid): """Get context manager yielding a lock object for a given node UUID.""" return lockutils.lock(_LOCK_TEMPLATE % uuid, semaphores=_SEMAPHORES) class NodeInfo(object): """Record about a node in the cache. This class optionally allows to acquire a lock on a node. Note that the class instance itself is NOT thread-safe, you need to create a new instance for every thread. """ def __init__(self, uuid, version_id=None, state=None, started_at=None, finished_at=None, error=None, node=None, ports=None, ironic=None, lock=None): self.uuid = uuid self.started_at = started_at self.finished_at = finished_at self.error = error self.invalidate_cache() self._version_id = version_id self._state = state self._node = node if ports is not None and not isinstance(ports, dict): ports = {p.address: p for p in ports} self._ports = ports self._attributes = None self._ironic = ironic # This is a lock on a node UUID, not on a NodeInfo object self._lock = lock if lock is not None else _get_lock(uuid) # Whether lock was acquired using this NodeInfo object self._locked = lock is not None self._fsm = None def __del__(self): if self._locked: LOG.warning('BUG: node lock was not released by the moment ' 'node info object is deleted') self._lock.release() def __str__(self): """Self represented as an UUID and a state.""" parts = [self.uuid] if self._state: parts += [_('state'), self._state] return ' '.join(parts) def acquire_lock(self, blocking=True): """Acquire a lock on the associated node. Exits with success if a lock is already acquired using this NodeInfo object. :param blocking: if True, wait for lock to be acquired, otherwise return immediately. :returns: boolean value, whether lock was acquired successfully """ if self._locked: return True LOG.debug('Attempting to acquire lock', node_info=self) if self._lock.acquire(blocking): self._locked = True LOG.debug('Successfully acquired lock', node_info=self) return True else: LOG.debug('Unable to acquire lock', node_info=self) return False def release_lock(self): """Release a lock on a node. Does nothing if lock was not acquired using this NodeInfo object. """ if self._locked: LOG.debug('Successfully released lock', node_info=self) self._lock.release() self._locked = False @property def version_id(self): """Get the version id""" if self._version_id is None: row = db.model_query(db.Node).get(self.uuid) if row is None: raise utils.NotFoundInCacheError(_('Node not found in the ' 'cache'), node_info=self) self._version_id = row.version_id return self._version_id def _set_version_id(self, value, session): row = self._row(session) row.version_id = value row.save(session) self._version_id = value def _row(self, session=None): """Get a row from the database with self.uuid and self.version_id""" try: # race condition if version_id changed outside of this node_info return db.model_query(db.Node, session=session).filter_by( uuid=self.uuid, version_id=self.version_id).one() except (orm_errors.NoResultFound, orm_errors.StaleDataError): raise utils.NodeStateRaceCondition(node_info=self) def _commit(self, **fields): """Commit the fields into the DB.""" LOG.debug('Committing fields: %s', fields, node_info=self) with db.ensure_transaction() as session: self._set_version_id(uuidutils.generate_uuid(), session) row = self._row(session) row.update(fields) def commit(self): """Commit current node status into the database.""" # state and version_id are updated separately self._commit(started_at=self.started_at, finished_at=self.finished_at, error=self.error) @property def state(self): """State of the node_info object.""" if self._state is None: row = self._row() self._state = row.state return self._state def _set_state(self, value): self._commit(state=value) self._state = value def _get_fsm(self): """Get an fsm instance initialized with self.state.""" if self._fsm is None: self._fsm = istate.FSM.copy(shallow=True) self._fsm.initialize(start_state=self.state) return self._fsm @contextlib.contextmanager def _fsm_ctx(self): fsm = self._get_fsm() try: yield fsm finally: if fsm.current_state != self.state: LOG.info('Updating node state: %(current)s --> %(new)s', {'current': self.state, 'new': fsm.current_state}, node_info=self) self._set_state(fsm.current_state) def fsm_event(self, event, strict=False): """Update node_info.state based on a fsm.process_event(event) call. An AutomatonException triggers an error event. If strict, node_info.finished(istate.Events.error, error=str(exc)) is called with the AutomatonException instance and a EventError raised. :param event: an event to process by the fsm :strict: whether to fail the introspection upon an invalid event :raises: NodeStateInvalidEvent """ with self._fsm_ctx() as fsm: LOG.debug('Executing fsm(%(state)s).process_event(%(event)s)', {'state': fsm.current_state, 'event': event}, node_info=self) try: fsm.process_event(event) except automaton_errors.NotFound as exc: msg = _('Invalid event: %s') % exc if strict: LOG.error(msg, node_info=self) # assuming an error event is always possible self.finished(istate.Events.error, error=str(exc)) else: LOG.warning(msg, node_info=self) raise utils.NodeStateInvalidEvent(str(exc), node_info=self) @property def options(self): """Node introspection options as a dict.""" if self._options is None: rows = db.model_query(db.Option).filter_by( uuid=self.uuid) self._options = {row.name: json.loads(row.value) for row in rows} return self._options @property def attributes(self): """Node look up attributes as a dict.""" if self._attributes is None: self._attributes = {} rows = db.model_query(db.Attribute).filter_by( node_uuid=self.uuid) for row in rows: self._attributes.setdefault(row.name, []).append(row.value) return self._attributes @property def ironic(self): """Ironic client instance.""" if self._ironic is None: self._ironic = ir_utils.get_client() return self._ironic def set_option(self, name, value): """Set an option for a node.""" encoded = json.dumps(value) self.options[name] = value with db.ensure_transaction() as session: db.model_query(db.Option, session=session).filter_by( uuid=self.uuid, name=name).delete() db.Option(uuid=self.uuid, name=name, value=encoded).save( session) def finished(self, event, error=None): """Record status for this node and process a terminal transition. Also deletes look up attributes from the cache. :param event: the event to process :param error: error message """ self.release_lock() self.finished_at = timeutils.utcnow() self.error = error with db.ensure_transaction() as session: self.fsm_event(event) self._commit(finished_at=self.finished_at, error=self.error) db.model_query(db.Attribute, session=session).filter_by( node_uuid=self.uuid).delete() db.model_query(db.Option, session=session).filter_by( uuid=self.uuid).delete() def add_attribute(self, name, value, session=None): """Store look up attribute for a node in the database. :param name: attribute name :param value: attribute value or list of possible values :param session: optional existing database session """ if not isinstance(value, list): value = [value] with db.ensure_transaction(session) as session: for v in value: db.Attribute(uuid=uuidutils.generate_uuid(), name=name, value=v, node_uuid=self.uuid).save(session) # Invalidate attributes so they're loaded on next usage self._attributes = None @classmethod def from_row(cls, row, ironic=None, lock=None, node=None): """Construct NodeInfo from a database row.""" fields = {key: row[key] for key in ('uuid', 'version_id', 'state', 'started_at', 'finished_at', 'error')} return cls(ironic=ironic, lock=lock, node=node, **fields) def invalidate_cache(self): """Clear all cached info, so that it's reloaded next time.""" self._options = None self._node = None self._ports = None self._attributes = None self._ironic = None self._fsm = None self._state = None self._version_id = None def node(self, ironic=None): """Get Ironic node object associated with the cached node record.""" if self._node is None: ironic = ironic or self.ironic self._node = ir_utils.get_node(self.uuid, ironic=ironic) return self._node def create_ports(self, ports, ironic=None): """Create one or several ports for this node. :param ports: List of ports with all their attributes e.g [{'mac': xx, 'ip': xx, 'client_id': None}, {'mac': xx, 'ip': None, 'client_id': None}] It also support the old style of list of macs. A warning is issued if port already exists on a node. :param ironic: Ironic client to use instead of self.ironic """ existing_macs = [] for port in ports: mac = port extra = {} pxe_enabled = True if isinstance(port, dict): mac = port['mac'] client_id = port.get('client_id') if client_id: extra = {'client-id': client_id} pxe_enabled = port.get('pxe', True) if mac not in self.ports(): self._create_port(mac, ironic=ironic, extra=extra, pxe_enabled=pxe_enabled) else: existing_macs.append(mac) if existing_macs: LOG.warning('Did not create ports %s as they already exist', existing_macs, node_info=self) def ports(self, ironic=None): """Get Ironic port objects associated with the cached node record. This value is cached as well, use invalidate_cache() to clean. :return: dict MAC -> port object """ if self._ports is None: ironic = ironic or self.ironic port_list = ironic.node.list_ports(self.uuid, limit=0, detail=True) self._ports = {p.address: p for p in port_list} return self._ports def _create_port(self, mac, ironic=None, **kwargs): ironic = ironic or self.ironic try: port = ironic.port.create( node_uuid=self.uuid, address=mac, **kwargs) LOG.info('Port %(uuid)s was created successfully, MAC: %(mac)s,' 'attributes: %(attrs)s', {'uuid': port.uuid, 'mac': port.address, 'attrs': kwargs}, node_info=self) except exceptions.Conflict: LOG.warning('Port %s already exists, skipping', mac, node_info=self) # NOTE(dtantsur): we didn't get port object back, so we have to # reload ports on next access self._ports = None else: self._ports[mac] = port def patch(self, patches, ironic=None): """Apply JSON patches to a node. Refreshes cached node instance. :param patches: JSON patches to apply :param ironic: Ironic client to use instead of self.ironic :raises: ironicclient exceptions """ ironic = ironic or self.ironic # NOTE(aarefiev): support path w/o ahead forward slash # as Ironic cli does for patch in patches: if patch.get('path') and not patch['path'].startswith('/'): patch['path'] = '/' + patch['path'] LOG.debug('Updating node with patches %s', patches, node_info=self) self._node = ironic.node.update(self.uuid, patches) def patch_port(self, port, patches, ironic=None): """Apply JSON patches to a port. :param port: port object or its MAC :param patches: JSON patches to apply :param ironic: Ironic client to use instead of self.ironic """ ironic = ironic or self.ironic ports = self.ports() if isinstance(port, six.string_types): port = ports[port] LOG.debug('Updating port %(mac)s with patches %(patches)s', {'mac': port.address, 'patches': patches}, node_info=self) new_port = ironic.port.update(port.uuid, patches) ports[port.address] = new_port def update_properties(self, ironic=None, **props): """Update properties on a node. :param props: properties to update :param ironic: Ironic client to use instead of self.ironic """ ironic = ironic or self.ironic patches = [{'op': 'add', 'path': '/properties/%s' % k, 'value': v} for k, v in props.items()] self.patch(patches, ironic) def update_capabilities(self, ironic=None, **caps): """Update capabilities on a node. :param caps: capabilities to update :param ironic: Ironic client to use instead of self.ironic """ existing = ir_utils.capabilities_to_dict( self.node().properties.get('capabilities')) existing.update(caps) self.update_properties( ironic=ironic, capabilities=ir_utils.dict_to_capabilities(existing)) def delete_port(self, port, ironic=None): """Delete port. :param port: port object or its MAC :param ironic: Ironic client to use instead of self.ironic """ ironic = ironic or self.ironic ports = self.ports() if isinstance(port, six.string_types): port = ports[port] ironic.port.delete(port.uuid) del ports[port.address] def get_by_path(self, path): """Get field value by ironic-style path (e.g. /extra/foo). :param path: path to a field :returns: field value :raises: KeyError if field was not found """ path = path.strip('/') try: if '/' in path: prop, key = path.split('/', 1) return getattr(self.node(), prop)[key] else: return getattr(self.node(), path) except AttributeError: raise KeyError(path) def replace_field(self, path, func, **kwargs): """Replace a field on ironic node. :param path: path to a field as used by the ironic client :param func: function accepting an old value and returning a new one :param kwargs: if 'default' value is passed here, it will be used when no existing value is found. :raises: KeyError if value is not found and default is not set :raises: everything that patch() may raise """ ironic = kwargs.pop("ironic", None) or self.ironic try: value = self.get_by_path(path) op = 'replace' except KeyError: if 'default' in kwargs: value = kwargs['default'] op = 'add' else: raise ref_value = copy.deepcopy(value) value = func(value) if value != ref_value: self.patch([{'op': op, 'path': path, 'value': value}], ironic) def triggers_fsm_error_transition(errors=(Exception,), no_errors=(utils.NodeStateInvalidEvent, utils.NodeStateRaceCondition)): """Trigger an fsm error transition upon certain errors. It is assumed the first function arg of the decorated function is always a NodeInfo instance. :param errors: a tuple of exceptions upon which an error event is triggered. Re-raised. :param no_errors: a tuple of exceptions that won't trigger the error event. """ def outer(func): @six.wraps(func) def inner(node_info, *args, **kwargs): ret = None try: ret = func(node_info, *args, **kwargs) except no_errors as exc: LOG.debug('Not processing error event for the ' 'exception: %(exc)s raised by %(func)s', {'exc': exc, 'func': reflection.get_callable_name(func)}, node_info=node_info) except errors as exc: with excutils.save_and_reraise_exception(): LOG.error('Processing the error event because of an ' 'exception %(exc_type)s: %(exc)s raised by ' '%(func)s', {'exc_type': type(exc), 'exc': exc, 'func': reflection.get_callable_name(func)}, node_info=node_info) # an error event should be possible from all states node_info.finished(istate.Events.error, error=str(exc)) return ret return inner return outer def fsm_event_before(event, strict=False): """Trigger an fsm event before the function execution. It is assumed the first function arg of the decorated function is always a NodeInfo instance. :param event: the event to process before the function call :param strict: make an invalid fsm event trigger an error event """ def outer(func): @six.wraps(func) def inner(node_info, *args, **kwargs): LOG.debug('Processing event %(event)s before calling ' '%(func)s', {'event': event, 'func': func}, node_info=node_info) node_info.fsm_event(event, strict=strict) return func(node_info, *args, **kwargs) return inner return outer def fsm_event_after(event, strict=False): """Trigger an fsm event after the function execution. It is assumed the first function arg of the decorated function is always a NodeInfo instance. :param event: the event to process after the function call :param strict: make an invalid fsm event trigger an error event """ def outer(func): @six.wraps(func) def inner(node_info, *args, **kwargs): ret = func(node_info, *args, **kwargs) LOG.debug('Processing event %(event)s after calling ' '%(func)s', {'event': event, 'func': func}, node_info=node_info) node_info.fsm_event(event, strict=strict) return ret return inner return outer def fsm_transition(event, reentrant=True, **exc_kwargs): """Decorate a function to perform a (non-)reentrant transition. If True, reentrant transition will be performed at the end of a function call. If False, the transition will be performed before the function call. The function is decorated with the triggers_fsm_error_transition decorator as well. :param event: the event to bind the transition to. :param reentrant: whether the transition is reentrant. :param exc_kwargs: passed on to the triggers_fsm_error_transition decorator """ def outer(func): inner = triggers_fsm_error_transition(**exc_kwargs)(func) if not reentrant: return fsm_event_before(event, strict=True)(inner) return fsm_event_after(event)(inner) return outer def release_lock(func): """Decorate a node_info-function to release the node_info lock. Assumes the first parameter of the function func is always a NodeInfo instance. """ @six.wraps(func) def inner(node_info, *args, **kwargs): try: return func(node_info, *args, **kwargs) finally: # FIXME(milan) hacking the test cases to work # with release_lock.assert_called_once... if node_info._locked: node_info.release_lock() return inner def start_introspection(uuid, **kwargs): """Start the introspection of a node. If a node_info record exists in the DB, a start transition is used rather than dropping the record in order to check for the start transition validity in particular node state. :param uuid: Ironic node UUID :param kwargs: passed on to add_node() :raises: NodeStateInvalidEvent in case the start transition is invalid in the current node state :raises: NodeStateRaceCondition if a mismatch was detected between the node_info cache and the DB :returns: NodeInfo """ with db.ensure_transaction(): node_info = NodeInfo(uuid) # check that the start transition is possible try: node_info.fsm_event(istate.Events.start) except utils.NotFoundInCacheError: # node not found while in the fsm_event handler LOG.debug('Node missing in the cache; adding it now', node_info=node_info) state = istate.States.starting else: state = node_info.state return add_node(uuid, state, **kwargs) def add_node(uuid, state, **attributes): """Store information about a node under introspection. All existing information about this node is dropped. Empty values are skipped. :param uuid: Ironic node UUID :param state: The initial state of the node :param attributes: attributes known about this node (like macs, BMC etc); also ironic client instance may be passed under 'ironic' :returns: NodeInfo """ started_at = timeutils.utcnow() with db.ensure_transaction() as session: _delete_node(uuid) version_id = uuidutils.generate_uuid() db.Node(uuid=uuid, state=state, version_id=version_id, started_at=started_at).save(session) node_info = NodeInfo(uuid=uuid, state=state, started_at=started_at, version_id=version_id, ironic=attributes.pop('ironic', None)) for (name, value) in attributes.items(): if not value: continue node_info.add_attribute(name, value, session=session) return node_info def delete_nodes_not_in_list(uuids): """Delete nodes which don't exist in Ironic node UUIDs. :param uuids: Ironic node UUIDs """ inspector_uuids = _list_node_uuids() for uuid in inspector_uuids - uuids: LOG.warning('Node %s was deleted from Ironic, dropping from Ironic ' 'Inspector database', uuid) with _get_lock_ctx(uuid): _delete_node(uuid) def _delete_node(uuid, session=None): """Delete information about a node. :param uuid: Ironic node UUID :param session: optional existing database session """ with db.ensure_transaction(session) as session: db.model_query(db.Attribute, session=session).filter_by( node_uuid=uuid).delete() for model in (db.Option, db.Node): db.model_query(model, session=session).filter_by(uuid=uuid).delete() def introspection_active(): """Check if introspection is active for at least one node.""" # FIXME(dtantsur): is there a better way to express it? return (db.model_query(db.Node.uuid).filter_by(finished_at=None).first() is not None) def active_macs(): """List all MAC's that are on introspection right now.""" return ({x.value for x in db.model_query(db.Attribute.value). filter_by(name=MACS_ATTRIBUTE)}) def _list_node_uuids(): """Get all nodes' uuid from cache. :returns: Set of nodes' uuid. """ return {x.uuid for x in db.model_query(db.Node.uuid)} def get_node(node_id, ironic=None, locked=False): """Get node from cache. :param node_id: node UUID or name. :param ironic: optional ironic client instance :param locked: if True, get a lock on node before fetching its data :returns: structure NodeInfo. """ if uuidutils.is_uuid_like(node_id): node = None uuid = node_id else: node = ir_utils.get_node(node_id, ironic=ironic) uuid = node.uuid if locked: lock = _get_lock(uuid) lock.acquire() else: lock = None try: row = db.model_query(db.Node).filter_by(uuid=uuid).first() if row is None: raise utils.Error(_('Could not find node %s in cache') % uuid, code=404) return NodeInfo.from_row(row, ironic=ironic, lock=lock, node=node) except Exception: with excutils.save_and_reraise_exception(): if lock is not None: lock.release() def find_node(**attributes): """Find node in cache. Looks up a node based on attributes in a best-match fashion. This function acquires a lock on a node. :param attributes: attributes known about this node (like macs, BMC etc) also ironic client instance may be passed under 'ironic' :returns: structure NodeInfo with attributes ``uuid`` and ``created_at`` :raises: Error if node is not found or multiple nodes match the attributes """ ironic = attributes.pop('ironic', None) # NOTE(dtantsur): sorting is not required, but gives us predictability found = collections.Counter() for (name, value) in sorted(attributes.items()): if not value: LOG.debug('Empty value for attribute %s', name) continue if not isinstance(value, list): value = [value] LOG.debug('Trying to use %s of value %s for node look up', name, value) value_list = [] for v in value: value_list.append("name='%s' AND value='%s'" % (name, v)) stmt = ('select distinct node_uuid from attributes where ' + ' OR '.join(value_list)) rows = (db.model_query(db.Attribute.node_uuid).from_statement( text(stmt)).all()) found.update(row.node_uuid for row in rows) if not found: raise utils.NotFoundInCacheError(_( 'Could not find a node for attributes %s') % attributes) most_common = found.most_common() LOG.debug('The following nodes match the attributes: %(attributes)s, ' 'scoring: %(most_common)s', {'most_common': ', '.join('%s: %d' % tpl for tpl in most_common), 'attributes': ', '.join('%s=%s' % tpl for tpl in attributes.items())}) # NOTE(milan) most_common is sorted, higher scores first highest_score = most_common[0][1] found = [item[0] for item in most_common if highest_score == item[1]] if len(found) > 1: raise utils.Error(_( 'Multiple nodes match the same number of attributes ' '%(attr)s: %(found)s') % {'attr': attributes, 'found': found}, code=404) uuid = found.pop() node_info = NodeInfo(uuid=uuid, ironic=ironic) node_info.acquire_lock() try: row = (db.model_query(db.Node.started_at, db.Node.finished_at). filter_by(uuid=uuid).first()) if not row: raise utils.Error(_( 'Could not find node %s in introspection cache, ' 'probably it\'s not on introspection now') % uuid, code=404) if row.finished_at: raise utils.Error(_( 'Introspection for node %(node)s already finished on ' '%(finish)s') % {'node': uuid, 'finish': row.finished_at}) node_info.started_at = row.started_at return node_info except Exception: with excutils.save_and_reraise_exception(): node_info.release_lock() def clean_up(): """Clean up the cache. * Finish introspection for timed out nodes. * Drop outdated node status information. :return: list of timed out node UUID's """ if CONF.node_status_keep_time > 0: status_keep_threshold = (timeutils.utcnow() - datetime.timedelta( seconds=CONF.node_status_keep_time)) with db.ensure_transaction() as session: db.model_query(db.Node, session=session).filter( db.Node.finished_at.isnot(None), db.Node.finished_at < status_keep_threshold).delete() timeout = CONF.timeout if timeout <= 0: return [] threshold = timeutils.utcnow() - datetime.timedelta(seconds=timeout) uuids = [row.uuid for row in db.model_query(db.Node.uuid).filter( db.Node.started_at < threshold, db.Node.finished_at.is_(None)).all()] if not uuids: return [] LOG.error('Introspection for nodes %s has timed out', uuids) for u in uuids: node_info = get_node(u, locked=True) try: if node_info.finished_at or node_info.started_at > threshold: continue if node_info.state != istate.States.waiting: LOG.error('Something went wrong, timeout occurred ' 'while introspection in "%s" state', node_info.state, node_info=node_info) node_info.finished( istate.Events.timeout, error='Introspection timeout') finally: node_info.release_lock() return uuids def create_node(driver, ironic=None, **attributes): """Create ironic node and cache it. * Create new node in ironic. * Cache it in inspector. * Sets node_info state to enrolling. :param driver: driver for Ironic node. :param ironic: ronic client instance. :param attributes: dict, additional keyword arguments to pass to the ironic client on node creation. :return: NodeInfo, or None in case error happened. """ if ironic is None: ironic = ir_utils.get_client() try: node = ironic.node.create(driver=driver, **attributes) except exceptions.InvalidAttribute as e: LOG.error('Failed to create new node: %s', e) else: LOG.info('Node %s was created successfully', node.uuid) return add_node(node.uuid, istate.States.enrolling, ironic=ironic) def get_node_list(ironic=None, marker=None, limit=None): """Get node list from the cache. The list of the nodes is ordered based on the (started_at, uuid) attribute pair, newer items first. :param ironic: optional ironic client instance :param marker: pagination marker (an UUID or None) :param limit: pagination limit; None for default CONF.api_max_limit :returns: a list of NodeInfo instances. """ if marker is not None: # uuid marker -> row marker for pagination marker = db.model_query(db.Node).get(marker) if marker is None: raise utils.Error(_('Node not found for marker: %s') % marker, code=404) rows = db.model_query(db.Node) # ordered based on (started_at, uuid); newer first rows = db_utils.paginate_query(rows, db.Node, limit, ('started_at', 'uuid'), marker=marker, sort_dir='desc') return [NodeInfo.from_row(row, ironic=ironic) for row in rows] ironic-inspector-7.2.0/ironic_inspector/common/0000775000175100017510000000000013241324014021666 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/common/context.py0000666000175100017510000000351313241323457023742 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_context import context class RequestContext(context.RequestContext): """Extends security contexts from the oslo.context library.""" def __init__(self, is_public_api=False, **kwargs): """Initialize the RequestContext :param is_public_api: Specifies whether the request should be processed without authentication. :param kwargs: additional arguments passed to oslo.context. """ super(RequestContext, self).__init__(**kwargs) self.is_public_api = is_public_api def to_policy_values(self): policy_values = super(RequestContext, self).to_policy_values() policy_values.update({'is_public_api': self.is_public_api}) return policy_values @classmethod def from_dict(cls, values, **kwargs): kwargs.setdefault('is_public_api', values.get('is_public_api', False)) return super(RequestContext, RequestContext).from_dict(values, **kwargs) @classmethod def from_environ(cls, environ, **kwargs): kwargs.setdefault('is_public_api', environ.get('is_public_api', False)) return super(RequestContext, RequestContext).from_environ(environ, **kwargs) ironic-inspector-7.2.0/ironic_inspector/common/lldp_tlvs.py0000666000175100017510000002440313241323457024262 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Link Layer Discovery Protocol TLVs """ import functools # See http://construct.readthedocs.io/en/latest/index.html import construct from construct import core import netaddr from ironic_inspector import utils LOG = utils.getProcessingLogger(__name__) # Constants defined according to 802.1AB-2016 LLDP spec # https://standards.ieee.org/findstds/standard/802.1AB-2016.html # TLV types LLDP_TLV_END_LLDPPDU = 0 LLDP_TLV_CHASSIS_ID = 1 LLDP_TLV_PORT_ID = 2 LLDP_TLV_TTL = 3 LLDP_TLV_PORT_DESCRIPTION = 4 LLDP_TLV_SYS_NAME = 5 LLDP_TLV_SYS_DESCRIPTION = 6 LLDP_TLV_SYS_CAPABILITIES = 7 LLDP_TLV_MGMT_ADDRESS = 8 LLDP_TLV_ORG_SPECIFIC = 127 # 802.1Q defines from http://www.ieee802.org/1/pages/802.1Q-2014.html, Annex D LLDP_802dot1_OUI = "0080c2" # subtypes dot1_PORT_VLANID = 1 dot1_PORT_PROTOCOL_VLANID = 2 dot1_VLAN_NAME = 3 dot1_PROTOCOL_IDENTITY = 4 dot1_MANAGEMENT_VID = 6 dot1_LINK_AGGREGATION = 7 # 802.3 defines from http://standards.ieee.org/about/get/802/802.3.html, # section 79 LLDP_802dot3_OUI = "00120f" # Subtypes dot3_MACPHY_CONFIG_STATUS = 1 dot3_LINK_AGGREGATION = 3 # Deprecated, but still in use dot3_MTU = 4 def bytes_to_int(obj): """Convert bytes to an integer :param: obj - array of bytes """ return functools.reduce(lambda x, y: x << 8 | y, obj) def mapping_for_enum(mapping): """Return tuple used for keys as a dict :param: mapping - dict with tuple as keys """ return dict(mapping.keys()) def mapping_for_switch(mapping): """Return dict from values :param: mapping - dict with tuple as keys """ return {key[0]: value for key, value in mapping.items()} IPv4Address = core.ExprAdapter( core.Byte[4], encoder=lambda obj, ctx: netaddr.IPAddress(obj).words, decoder=lambda obj, ctx: str(netaddr.IPAddress(bytes_to_int(obj))) ) IPv6Address = core.ExprAdapter( core.Byte[16], encoder=lambda obj, ctx: netaddr.IPAddress(obj).words, decoder=lambda obj, ctx: str(netaddr.IPAddress(bytes_to_int(obj))) ) MACAddress = core.ExprAdapter( core.Byte[6], encoder=lambda obj, ctx: netaddr.EUI(obj).words, decoder=lambda obj, ctx: str(netaddr.EUI(bytes_to_int(obj), dialect=netaddr.mac_unix_expanded)) ) IANA_ADDRESS_FAMILY_ID_MAPPING = { ('ipv4', 1): IPv4Address, ('ipv6', 2): IPv6Address, ('mac', 6): MACAddress, } IANAAddress = core.Embedded(core.Struct( 'family' / core.Enum(core.Int8ub, **mapping_for_enum( IANA_ADDRESS_FAMILY_ID_MAPPING)), 'value' / core.Switch(construct.this.family, mapping_for_switch( IANA_ADDRESS_FAMILY_ID_MAPPING)))) # Note that 'GreedyString()' is used in cases where string len is not defined CHASSIS_ID_MAPPING = { ('entPhysAlias_c', 1): core.Struct('value' / core.GreedyString("utf8")), ('ifAlias', 2): core.Struct('value' / core.GreedyString("utf8")), ('entPhysAlias_p', 3): core.Struct('value' / core.GreedyString("utf8")), ('mac_address', 4): core.Struct('value' / MACAddress), ('IANA_address', 5): IANAAddress, ('ifName', 6): core.Struct('value' / core.GreedyString("utf8")), ('local', 7): core.Struct('value' / core.GreedyString("utf8")) } # # Basic Management Set TLV field definitions # # Chassis ID value is based on the subtype ChassisId = core.Struct( 'subtype' / core.Enum(core.Byte, **mapping_for_enum( CHASSIS_ID_MAPPING)), 'value' / core.Embedded(core.Switch(construct.this.subtype, mapping_for_switch(CHASSIS_ID_MAPPING))) ) PORT_ID_MAPPING = { ('ifAlias', 1): core.Struct('value' / core.GreedyString("utf8")), ('entPhysicalAlias', 2): core.Struct('value' / core.GreedyString("utf8")), ('mac_address', 3): core.Struct('value' / MACAddress), ('IANA_address', 4): IANAAddress, ('ifName', 5): core.Struct('value' / core.GreedyString("utf8")), ('local', 7): core.Struct('value' / core.GreedyString("utf8")) } # Port ID value is based on the subtype PortId = core.Struct( 'subtype' / core.Enum(core.Byte, **mapping_for_enum( PORT_ID_MAPPING)), 'value' / core.Embedded(core.Switch(construct.this.subtype, mapping_for_switch(PORT_ID_MAPPING))) ) PortDesc = core.Struct('value' / core.GreedyString("utf8")) SysName = core.Struct('value' / core.GreedyString("utf8")) SysDesc = core.Struct('value' / core.GreedyString("utf8")) MgmtAddress = core.Struct( 'len' / core.Int8ub, 'family' / core.Enum(core.Int8ub, **mapping_for_enum( IANA_ADDRESS_FAMILY_ID_MAPPING)), 'address' / core.Switch(construct.this.family, mapping_for_switch( IANA_ADDRESS_FAMILY_ID_MAPPING)) ) Capabilities = core.BitStruct( core.Padding(5), 'tpmr' / core.Bit, 'svlan' / core.Bit, 'cvlan' / core.Bit, 'station' / core.Bit, 'docsis' / core.Bit, 'telephone' / core.Bit, 'router' / core.Bit, 'wlan' / core.Bit, 'bridge' / core.Bit, 'repeater' / core.Bit, core.Padding(1) ) SysCapabilities = core.Struct( 'system' / Capabilities, 'enabled' / Capabilities ) OrgSpecific = core.Struct( 'oui' / core.Bytes(3), 'subtype' / core.Int8ub ) # # 802.1Q TLV field definitions # See http://www.ieee802.org/1/pages/802.1Q-2014.html, Annex D # Dot1_UntaggedVlanId = core.Struct('value' / core.Int16ub) Dot1_PortProtocolVlan = core.Struct( 'flags' / core.BitStruct( core.Padding(5), 'enabled' / core.Flag, 'supported' / core.Flag, core.Padding(1), ), 'vlanid' / core.Int16ub ) Dot1_VlanName = core.Struct( 'vlanid' / core.Int16ub, 'name_len' / core.Rebuild(core.Int8ub, construct.len_(construct.this.value)), 'vlan_name' / core.String(construct.this.name_len, "utf8") ) Dot1_ProtocolIdentity = core.Struct( 'len' / core.Rebuild(core.Int8ub, construct.len_(construct.this.value)), 'protocol' / core.Bytes(construct.this.len) ) Dot1_MgmtVlanId = core.Struct('value' / core.Int16ub) Dot1_LinkAggregationId = core.Struct( 'status' / core.BitStruct( core.Padding(6), 'enabled' / core.Flag, 'supported' / core.Flag ), 'portid' / core.Int32ub ) # # 802.3 TLV field definitions # See http://standards.ieee.org/about/get/802/802.3.html, # section 79 # def get_autoneg_cap(pmd): """Get autonegotiated capability strings This returns a list of capability strings from the Physical Media Dependent (PMD) capability bits. :param pmd: PMD bits :return: Sorted ist containing capability strings """ caps_set = set() pmd_map = [ (pmd._10base_t_hdx, '10BASE-T hdx'), (pmd._10base_t_hdx, '10BASE-T fdx'), (pmd._10base_t4, '10BASE-T4'), (pmd._100base_tx_hdx, '100BASE-TX hdx'), (pmd._100base_tx_fdx, '100BASE-TX fdx'), (pmd._100base_t2_hdx, '100BASE-T2 hdx'), (pmd._100base_t2_fdx, '100BASE-T2 fdx'), (pmd.pause_fdx, 'PAUSE fdx'), (pmd.asym_pause, 'Asym PAUSE fdx'), (pmd.sym_pause, 'Sym PAUSE fdx'), (pmd.asym_sym_pause, 'Asym and Sym PAUSE fdx'), (pmd._1000base_x_hdx, '1000BASE-X hdx'), (pmd._1000base_x_fdx, '1000BASE-X fdx'), (pmd._1000base_t_hdx, '1000BASE-T hdx'), (pmd._1000base_t_fdx, '1000BASE-T fdx')] for bit, cap in pmd_map: if bit: caps_set.add(cap) return sorted(caps_set) Dot3_MACPhy_Config_Status = core.Struct( 'autoneg' / core.BitStruct( core.Padding(6), 'enabled' / core.Flag, 'supported' / core.Flag, ), # See IANAifMauAutoNegCapBits # RFC 4836, Definitions of Managed Objects for IEEE 802.3 'pmd_autoneg' / core.BitStruct( core.Padding(1), '_10base_t_hdx' / core.Bit, '_10base_t_fdx' / core.Bit, '_10base_t4' / core.Bit, '_100base_tx_hdx' / core.Bit, '_100base_tx_fdx' / core.Bit, '_100base_t2_hdx' / core.Bit, '_100base_t2_fdx' / core.Bit, 'pause_fdx' / core.Bit, 'asym_pause' / core.Bit, 'sym_pause' / core.Bit, 'asym_sym_pause' / core.Bit, '_1000base_x_hdx' / core.Bit, '_1000base_x_fdx' / core.Bit, '_1000base_t_hdx' / core.Bit, '_1000base_t_fdx' / core.Bit ), 'mau_type' / core.Int16ub ) # See ifMauTypeList in # RFC 4836, Definitions of Managed Objects for IEEE 802.3 OPER_MAU_TYPES = { 0: "Unknown", 1: "AUI", 2: "10BASE-5", 3: "FOIRL", 4: "10BASE-2", 5: "10BASE-T duplex mode unknown", 6: "10BASE-FP", 7: "10BASE-FB", 8: "10BASE-FL duplex mode unknown", 9: "10BROAD36", 10: "10BASE-T half duplex", 11: "10BASE-T full duplex", 12: "10BASE-FL half duplex", 13: "10BASE-FL full duplex", 14: "100 BASE-T4", 15: "100BASE-TX half duplex", 16: "100BASE-TX full duplex", 17: "100BASE-FX half duplex", 18: "100BASE-FX full duplex", 19: "100BASE-T2 half duplex", 20: "100BASE-T2 full duplex", 21: "1000BASE-X half duplex", 22: "1000BASE-X full duplex", 23: "1000BASE-LX half duplex", 24: "1000BASE-LX full duplex", 25: "1000BASE-SX half duplex", 26: "1000BASE-SX full duplex", 27: "1000BASE-CX half duplex", 28: "1000BASE-CX full duplex", 29: "1000BASE-T half duplex", 30: "1000BASE-T full duplex", 31: "10GBASE-X", 32: "10GBASE-LX4", 33: "10GBASE-R", 34: "10GBASE-ER", 35: "10GBASE-LR", 36: "10GBASE-SR", 37: "10GBASE-W", 38: "10GBASE-EW", 39: "10GBASE-LW", 40: "10GBASE-SW", 41: "10GBASE-CX4", 42: "2BASE-TL", 43: "10PASS-TS", 44: "100BASE-BX10D", 45: "100BASE-BX10U", 46: "100BASE-LX10", 47: "1000BASE-BX10D", 48: "1000BASE-BX10U", 49: "1000BASE-LX10", 50: "1000BASE-PX10D", 51: "1000BASE-PX10U", 52: "1000BASE-PX20D", 53: "1000BASE-PX20U", } Dot3_MTU = core.Struct('value' / core.Int16ub) ironic-inspector-7.2.0/ironic_inspector/common/swift.py0000666000175100017510000001307513241323457023416 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Mostly copied from ironic/common/swift.py import json from oslo_config import cfg from swiftclient import client as swift_client from swiftclient import exceptions as swift_exceptions from ironic_inspector.common.i18n import _ from ironic_inspector.common import keystone from ironic_inspector import utils CONF = cfg.CONF OBJECT_NAME_PREFIX = 'inspector_data' SWIFT_SESSION = None def reset_swift_session(): """Reset the global session variable. Mostly useful for unit tests. """ global SWIFT_SESSION SWIFT_SESSION = None class SwiftAPI(object): """API for communicating with Swift.""" def __init__(self): """Constructor for creating a SwiftAPI object. Authentification is loaded from config file. """ global SWIFT_SESSION if not SWIFT_SESSION: SWIFT_SESSION = keystone.get_session('swift') adapter_opts = dict() # TODO(pas-ha): remove handling deprecated options in Rocky if CONF.swift.os_region and not CONF.swift.region_name: adapter_opts['region_name'] = CONF.swift.os_region adapter = keystone.get_adapter('swift', session=SWIFT_SESSION, **adapter_opts) # TODO(pas-ha) reverse-construct SSL-related session options here params = { 'os_options': { 'object_storage_url': adapter.get_endpoint()}} self.connection = swift_client.Connection(session=SWIFT_SESSION, **params) def create_object(self, object, data, container=CONF.swift.container, headers=None): """Uploads a given string to Swift. :param object: The name of the object in Swift :param data: string data to put in the object :param container: The name of the container for the object. :param headers: the headers for the object to pass to Swift :returns: The Swift UUID of the object :raises: utils.Error, if any operation with Swift fails. """ try: self.connection.put_container(container) except swift_exceptions.ClientException as e: err_msg = (_('Swift failed to create container %(container)s. ' 'Error was: %(error)s') % {'container': container, 'error': e}) raise utils.Error(err_msg) if CONF.swift.delete_after > 0: headers = headers or {} headers['X-Delete-After'] = CONF.swift.delete_after try: obj_uuid = self.connection.put_object(container, object, data, headers=headers) except swift_exceptions.ClientException as e: err_msg = (_('Swift failed to create object %(object)s in ' 'container %(container)s. Error was: %(error)s') % {'object': object, 'container': container, 'error': e}) raise utils.Error(err_msg) return obj_uuid def get_object(self, object, container=CONF.swift.container): """Downloads a given object from Swift. :param object: The name of the object in Swift :param container: The name of the container for the object. :returns: Swift object :raises: utils.Error, if the Swift operation fails. """ try: headers, obj = self.connection.get_object(container, object) except swift_exceptions.ClientException as e: err_msg = (_('Swift failed to get object %(object)s in ' 'container %(container)s. Error was: %(error)s') % {'object': object, 'container': container, 'error': e}) raise utils.Error(err_msg) return obj def store_introspection_data(data, uuid, suffix=None): """Uploads introspection data to Swift. :param data: data to store in Swift :param uuid: UUID of the Ironic node that the data came from :param suffix: optional suffix to add to the underlying swift object name :returns: name of the Swift object that the data is stored in """ swift_api = SwiftAPI() swift_object_name = '%s-%s' % (OBJECT_NAME_PREFIX, uuid) if suffix is not None: swift_object_name = '%s-%s' % (swift_object_name, suffix) swift_api.create_object(swift_object_name, json.dumps(data)) return swift_object_name def get_introspection_data(uuid, suffix=None): """Downloads introspection data from Swift. :param uuid: UUID of the Ironic node that the data came from :param suffix: optional suffix to add to the underlying swift object name :returns: Swift object with the introspection data """ swift_api = SwiftAPI() swift_object_name = '%s-%s' % (OBJECT_NAME_PREFIX, uuid) if suffix is not None: swift_object_name = '%s-%s' % (swift_object_name, suffix) return swift_api.get_object(swift_object_name) ironic-inspector-7.2.0/ironic_inspector/common/__init__.py0000666000175100017510000000000013241323457024001 0ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/common/i18n.py0000666000175100017510000000141713241323457023036 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='ironic_inspector') # The primary translation function using the well-known name "_" _ = _translators.primary ironic-inspector-7.2.0/ironic_inspector/common/lldp_parsers.py0000666000175100017510000003407013241323457024752 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Names and mapping functions used to map LLDP TLVs to name/value pairs """ import binascii from construct import core import netaddr from ironic_inspector.common.i18n import _ from ironic_inspector.common import lldp_tlvs as tlv from ironic_inspector import utils LOG = utils.getProcessingLogger(__name__) # Names used in name/value pair from parsed TLVs LLDP_CHASSIS_ID_NM = 'switch_chassis_id' LLDP_PORT_ID_NM = 'switch_port_id' LLDP_PORT_DESC_NM = 'switch_port_description' LLDP_SYS_NAME_NM = 'switch_system_name' LLDP_SYS_DESC_NM = 'switch_system_description' LLDP_SWITCH_CAP_NM = 'switch_capabilities' LLDP_CAP_SUPPORT_NM = 'switch_capabilities_support' LLDP_CAP_ENABLED_NM = 'switch_capabilities_enabled' LLDP_MGMT_ADDRESSES_NM = 'switch_mgmt_addresses' LLDP_PORT_VLANID_NM = 'switch_port_untagged_vlan_id' LLDP_PORT_PROT_NM = 'switch_port_protocol' LLDP_PORT_PROT_VLAN_ENABLED_NM = 'switch_port_protocol_vlan_enabled' LLDP_PORT_PROT_VLAN_SUPPORT_NM = 'switch_port_protocol_vlan_support' LLDP_PORT_PROT_VLANIDS_NM = 'switch_port_protocol_vlan_ids' LLDP_PORT_VLANS_NM = 'switch_port_vlans' LLDP_PROTOCOL_IDENTITIES_NM = 'switch_protocol_identities' LLDP_PORT_MGMT_VLANID_NM = 'switch_port_management_vlan_id' LLDP_PORT_LINK_AGG_NM = 'switch_port_link_aggregation' LLDP_PORT_LINK_AGG_ENABLED_NM = 'switch_port_link_aggregation_enabled' LLDP_PORT_LINK_AGG_SUPPORT_NM = 'switch_port_link_aggregation_support' LLDP_PORT_LINK_AGG_ID_NM = 'switch_port_link_aggregation_id' LLDP_PORT_MAC_PHY_NM = 'switch_port_mac_phy_config' LLDP_PORT_LINK_AUTONEG_ENABLED_NM = 'switch_port_autonegotiation_enabled' LLDP_PORT_LINK_AUTONEG_SUPPORT_NM = 'switch_port_autonegotiation_support' LLDP_PORT_CAPABILITIES_NM = 'switch_port_physical_capabilities' LLDP_PORT_MAU_TYPE_NM = 'switch_port_mau_type' LLDP_MTU_NM = 'switch_port_mtu' class LLDPParser(object): """Base class to handle parsing of LLDP TLVs Each class that inherits from this base class must provide a parser map. Parser maps are used to associate a LLDP TLV with a function handler and arguments necessary to parse the TLV and generate one or more name/value pairs. Each LLDP TLV maps to a tuple with the following fields: function - handler function to generate name/value pairs construct - name of construct definition for TLV name - user-friendly name of TLV. For TLVs that generate only one name/value pair this is the name used len_check - boolean indicating if length check should be done on construct It's valid to have a function handler of None, this is for TLVs that are not mapped to a name/value pair(e.g.LLDP_TLV_TTL). """ def __init__(self, node_info, nv=None): """Create LLDPParser :param node_info - node being introspected :param nv - dictionary of name/value pairs to use """ self.nv_dict = nv or {} self.node_info = node_info self.parser_map = {} def set_value(self, name, value): """Set name value pair in dictionary The value for a name should not be changed if it exists. """ self.nv_dict.setdefault(name, value) def append_value(self, name, value): """Add value to a list mapped to name""" self.nv_dict.setdefault(name, []).append(value) def add_single_value(self, struct, name, data): """Add a single name/value pair to the nv dict""" self.set_value(name, struct.value) def parse_tlv(self, tlv_type, data): """Parse TLVs from mapping table This functions takes the TLV type and the raw data for this TLV and gets a tuple from the parser_map. The construct field in the tuple contains the construct lib definition of the TLV which can be parsed to access individual fields. Once the TLV is parsed, the handler function for each TLV will store the individual fields as name/value pairs in nv_dict. If the handler function does not exist, then no name/value pairs will be added to nv_dict, but since the TLV was handled, True will be returned. :param: tlv_type - type identifier for TLV :param: data - raw TLV value :returns: True if TLV in parser_map and data is valid, otherwise False. """ s = self.parser_map.get(tlv_type) if not s: return False func = s[0] # handler if not func: return True # TLV is handled try: tlv_parser = s[1] name = s[2] check_len = s[3] except KeyError as e: LOG.warning("Key error in TLV table: %s", e, node_info=self.node_info) return False # Some constructs require a length validation to ensure the # proper number of bytes has been provided, for example # when a BitStruct is used. if check_len and (tlv_parser.sizeof() != len(data)): LOG.warning("Invalid data for %(name)s expected len %(expect)d, " "got %(actual)d", {'name': name, 'expect': tlv_parser.sizeof(), 'actual': len(data)}, node_info=self.node_info) return False # Use the construct parser to parse TLV so that it's # individual fields can be accessed try: struct = tlv_parser.parse(data) except (core.ConstructError, netaddr.AddrFormatError) as e: LOG.warning("TLV parse error: %s", e, node_info=self.node_info) return False # Call functions with parsed structure try: func(struct, name, data) except ValueError as e: LOG.warning("TLV value error: %s", e, node_info=self.node_info) return False return True def add_dot1_link_aggregation(self, struct, name, data): """Add name/value pairs for TLV Dot1_LinkAggregationId This is in base class since it can be used by both dot1 and dot3. """ self.set_value(LLDP_PORT_LINK_AGG_ENABLED_NM, struct.status.enabled) self.set_value(LLDP_PORT_LINK_AGG_SUPPORT_NM, struct.status.supported) self.set_value(LLDP_PORT_LINK_AGG_ID_NM, struct.portid) class LLDPBasicMgmtParser(LLDPParser): """Class to handle parsing of 802.1AB Basic Management set This class will also handle 802.1Q and 802.3 OUI TLVs. """ def __init__(self, nv=None): super(LLDPBasicMgmtParser, self).__init__(nv) self.parser_map = { tlv.LLDP_TLV_CHASSIS_ID: (self.add_single_value, tlv.ChassisId, LLDP_CHASSIS_ID_NM, False), tlv.LLDP_TLV_PORT_ID: (self.add_single_value, tlv.PortId, LLDP_PORT_ID_NM, False), tlv.LLDP_TLV_TTL: (None, None, None, False), tlv.LLDP_TLV_PORT_DESCRIPTION: (self.add_single_value, tlv.PortDesc, LLDP_PORT_DESC_NM, False), tlv.LLDP_TLV_SYS_NAME: (self.add_single_value, tlv.SysName, LLDP_SYS_NAME_NM, False), tlv.LLDP_TLV_SYS_DESCRIPTION: (self.add_single_value, tlv.SysDesc, LLDP_SYS_DESC_NM, False), tlv.LLDP_TLV_SYS_CAPABILITIES: (self.add_capabilities, tlv.SysCapabilities, LLDP_SWITCH_CAP_NM, True), tlv.LLDP_TLV_MGMT_ADDRESS: (self.add_mgmt_address, tlv.MgmtAddress, LLDP_MGMT_ADDRESSES_NM, False), tlv.LLDP_TLV_ORG_SPECIFIC: (self.handle_org_specific_tlv, tlv.OrgSpecific, None, False), tlv.LLDP_TLV_END_LLDPPDU: (None, None, None, False) } def add_mgmt_address(self, struct, name, data): """Handle LLDP_TLV_MGMT_ADDRESS There can be multiple Mgmt Address TLVs, store in list. """ self.append_value(name, struct.address) def _get_capabilities_list(self, caps): """Get capabilities from bit map""" cap_map = [ (caps.repeater, 'Repeater'), (caps.bridge, 'Bridge'), (caps.wlan, 'WLAN'), (caps.router, 'Router'), (caps.telephone, 'Telephone'), (caps.docsis, 'DOCSIS cable device'), (caps.station, 'Station only'), (caps.cvlan, 'C-Vlan'), (caps.svlan, 'S-Vlan'), (caps.tpmr, 'TPMR')] return [cap for (bit, cap) in cap_map if bit] def add_capabilities(self, struct, name, data): """Handle LLDP_TLV_SYS_CAPABILITIES""" self.set_value(LLDP_CAP_SUPPORT_NM, self._get_capabilities_list(struct.system)) self.set_value(LLDP_CAP_ENABLED_NM, self._get_capabilities_list(struct.enabled)) def handle_org_specific_tlv(self, struct, name, data): """Handle Organizationally Unique ID TLVs This class supports 802.1Q and 802.3 OUI TLVs. See http://www.ieee802.org/1/pages/802.1Q-2014.html, Annex D and http://standards.ieee.org/about/get/802/802.3.html """ oui = binascii.hexlify(struct.oui).decode() subtype = struct.subtype oui_data = data[4:] if oui == tlv.LLDP_802dot1_OUI: parser = LLDPdot1Parser(self.node_info, self.nv_dict) if parser.parse_tlv(subtype, oui_data): LOG.debug("Handled 802.1 subtype %d", subtype) else: LOG.debug("Subtype %d not found for 802.1", subtype) elif oui == tlv.LLDP_802dot3_OUI: parser = LLDPdot3Parser(self.node_info, self.nv_dict) if parser.parse_tlv(subtype, oui_data): LOG.debug("Handled 802.3 subtype %d", subtype) else: LOG.debug("Subtype %d not found for 802.3", subtype) else: LOG.warning("Organizationally Unique ID %s not " "recognized", oui, node_info=self.node_info) class LLDPdot1Parser(LLDPParser): """Class to handle parsing of 802.1Q TLVs""" def __init__(self, node_info, nv=None): super(LLDPdot1Parser, self).__init__(node_info, nv) self.parser_map = { tlv.dot1_PORT_VLANID: (self.add_single_value, tlv.Dot1_UntaggedVlanId, LLDP_PORT_VLANID_NM, False), tlv.dot1_PORT_PROTOCOL_VLANID: (self.add_dot1_port_protocol_vlan, tlv.Dot1_PortProtocolVlan, LLDP_PORT_PROT_NM, True), tlv.dot1_VLAN_NAME: (self.add_dot1_vlans, tlv.Dot1_VlanName, None, False), tlv.dot1_PROTOCOL_IDENTITY: (self.add_dot1_protocol_identities, tlv.Dot1_ProtocolIdentity, LLDP_PROTOCOL_IDENTITIES_NM, False), tlv.dot1_MANAGEMENT_VID: (self.add_single_value, tlv.Dot1_MgmtVlanId, LLDP_PORT_MGMT_VLANID_NM, False), tlv.dot1_LINK_AGGREGATION: (self.add_dot1_link_aggregation, tlv.Dot1_LinkAggregationId, LLDP_PORT_LINK_AGG_NM, True) } def add_dot1_port_protocol_vlan(self, struct, name, data): """Handle dot1_PORT_PROTOCOL_VLANID""" self.set_value(LLDP_PORT_PROT_VLAN_ENABLED_NM, struct.flags.enabled) self.set_value(LLDP_PORT_PROT_VLAN_SUPPORT_NM, struct.flags.supported) # There can be multiple port/protocol vlans TLVs, store in list self.append_value(LLDP_PORT_PROT_VLANIDS_NM, struct.vlanid) def add_dot1_vlans(self, struct, name, data): """Handle dot1_VLAN_NAME There can be multiple vlan TLVs, add dictionary entry with id/vlan to list. """ vlan_dict = {} vlan_dict['name'] = struct.vlan_name vlan_dict['id'] = struct.vlanid self.append_value(LLDP_PORT_VLANS_NM, vlan_dict) def add_dot1_protocol_identities(self, struct, name, data): """Handle dot1_PROTOCOL_IDENTITY There can be multiple protocol ids TLVs, store in list """ self.append_value(LLDP_PROTOCOL_IDENTITIES_NM, binascii.b2a_hex(struct.protocol).decode()) class LLDPdot3Parser(LLDPParser): """Class to handle parsing of 802.3 TLVs""" def __init__(self, node_info, nv=None): super(LLDPdot3Parser, self).__init__(node_info, nv) # Note that 802.3 link Aggregation has been deprecated and moved to # 802.1 spec, but it is in the same format. Use the same function as # dot1 handler. self.parser_map = { tlv.dot3_MACPHY_CONFIG_STATUS: (self.add_dot3_macphy_config, tlv.Dot3_MACPhy_Config_Status, LLDP_PORT_MAC_PHY_NM, True), tlv.dot3_LINK_AGGREGATION: (self.add_dot1_link_aggregation, tlv.Dot1_LinkAggregationId, LLDP_PORT_LINK_AGG_NM, True), tlv.dot3_MTU: (self.add_single_value, tlv.Dot3_MTU, LLDP_MTU_NM, False) } def add_dot3_macphy_config(self, struct, name, data): """Handle dot3_MACPHY_CONFIG_STATUS""" try: mau_type = tlv.OPER_MAU_TYPES[struct.mau_type] except KeyError: raise ValueError(_('Invalid index for mau type')) self.set_value(LLDP_PORT_LINK_AUTONEG_ENABLED_NM, struct.autoneg.enabled) self.set_value(LLDP_PORT_LINK_AUTONEG_SUPPORT_NM, struct.autoneg.supported) self.set_value(LLDP_PORT_CAPABILITIES_NM, tlv.get_autoneg_cap(struct.pmd_autoneg)) self.set_value(LLDP_PORT_MAU_TYPE_NM, mau_type) ironic-inspector-7.2.0/ironic_inspector/common/keystone.py0000666000175100017510000000515613241323457024124 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keystoneauth1 import loading from oslo_config import cfg CONF = cfg.CONF DEFAULT_VALID_INTERFACES = ['internal', 'public'] # TODO(pas-ha) set default values in conf.opts.set_defaults() def register_auth_opts(group, service_type): loading.register_session_conf_options(CONF, group) loading.register_auth_conf_options(CONF, group) CONF.set_default('auth_type', default='password', group=group) loading.register_adapter_conf_options(CONF, group) CONF.set_default('valid_interfaces', DEFAULT_VALID_INTERFACES, group=group) CONF.set_default('service_type', service_type, group=group) def get_session(group): auth = loading.load_auth_from_conf_options(CONF, group) session = loading.load_session_from_conf_options( CONF, group, auth=auth) return session def get_adapter(group, **adapter_kwargs): return loading.load_adapter_from_conf_options(CONF, group, **adapter_kwargs) # TODO(pas-ha) set default values in conf.opts.set_defaults() def add_auth_options(options, service_type): def add_options(opts, opts_to_add): for new_opt in opts_to_add: for opt in opts: if opt.name == new_opt.name: break else: opts.append(new_opt) opts = copy.deepcopy(options) opts.insert(0, loading.get_auth_common_conf_options()[0]) # NOTE(dims): There are a lot of auth plugins, we just generate # the config options for a few common ones plugins = ['password', 'v2password', 'v3password'] for name in plugins: plugin = loading.get_plugin_loader(name) add_options(opts, loading.get_auth_plugin_conf_options(plugin)) add_options(opts, loading.get_session_conf_options()) adapter_opts = loading.get_adapter_conf_options( include_deprecated=False) cfg.set_defaults(adapter_opts, service_type=service_type, valid_interfaces=DEFAULT_VALID_INTERFACES) add_options(opts, adapter_opts) opts.sort(key=lambda x: x.name) return opts ironic-inspector-7.2.0/ironic_inspector/common/service_utils.py0000666000175100017510000000170113241323457025133 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from ironic_inspector.conf import opts LOG = log.getLogger(__name__) CONF = cfg.CONF def prepare_service(args=None): args = [] if args is None else args log.register_options(CONF) opts.set_config_defaults() opts.parse_args(args) log.setup(CONF, 'ironic_inspector') LOG.debug("Configuration:") CONF.log_opt_values(LOG, log.DEBUG) ironic-inspector-7.2.0/ironic_inspector/common/ironic.py0000666000175100017510000001371413241323457023545 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import socket from ironicclient import client from ironicclient import exceptions as ironic_exc import netaddr from oslo_config import cfg import retrying from ironic_inspector.common.i18n import _ from ironic_inspector.common import keystone from ironic_inspector import utils CONF = cfg.CONF LOG = utils.getProcessingLogger(__name__) # See http://specs.openstack.org/openstack/ironic-specs/specs/kilo/new-ironic-state-machine.html # noqa VALID_STATES = {'enroll', 'manageable', 'inspecting', 'inspect failed'} # 1.19 is API version, which supports port.pxe_enabled DEFAULT_IRONIC_API_VERSION = '1.19' IRONIC_SESSION = None class NotFound(utils.Error): """Node not found in Ironic.""" def __init__(self, node_ident, code=404, *args, **kwargs): msg = _('Node %s was not found in Ironic') % node_ident super(NotFound, self).__init__(msg, code, *args, **kwargs) def reset_ironic_session(): """Reset the global session variable. Mostly useful for unit tests. """ global IRONIC_SESSION IRONIC_SESSION = None def get_ipmi_address(node): ipmi_fields = ['ipmi_address'] + CONF.ipmi_address_fields # NOTE(sambetts): IPMI Address is useless to us if bridging is enabled so # just ignore it and return None if node.driver_info.get("ipmi_bridging", "no") != "no": return for name in ipmi_fields: value = node.driver_info.get(name) if not value: continue try: ip = socket.gethostbyname(value) except socket.gaierror: msg = _('Failed to resolve the hostname (%(value)s)' ' for node %(uuid)s') raise utils.Error(msg % {'value': value, 'uuid': node.uuid}, node_info=node) if netaddr.IPAddress(ip).is_loopback(): LOG.warning('Ignoring loopback BMC address %s', ip, node_info=node) ip = None return ip def get_client(token=None, api_version=DEFAULT_IRONIC_API_VERSION): # pragma: no cover """Get Ironic client instance.""" global IRONIC_SESSION # NOTE: To support standalone ironic without keystone # TODO(pas-ha) remove handling of deprecated opts in Rocky # TODO(pas-ha) rewrite when ironicclient natively supports 'none' auth # via sessions https://review.openstack.org/#/c/359061/ if CONF.ironic.auth_strategy == 'noauth': CONF.set_override('auth_type', 'none', group='ironic') if not IRONIC_SESSION: IRONIC_SESSION = keystone.get_session('ironic') args = { 'os_ironic_api_version': api_version, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} adapter_opts = dict() # TODO(pas-ha) use service auth with incoming token if CONF.ironic.auth_type != 'none': if token is None: args['session'] = IRONIC_SESSION else: args['token'] = token # TODO(pas-ha): remove handling of deprecated options in Rocky if CONF.ironic.os_region and not CONF.ironic.region_name: adapter_opts['region_name'] = CONF.ironic.os_region if (CONF.ironic.auth_type == 'none' and not CONF.ironic.endpoint_override and CONF.ironic.ironic_url): adapter_opts['endpoint_override'] = CONF.ironic.ironic_url adapter = keystone.get_adapter('ironic', session=IRONIC_SESSION, **adapter_opts) endpoint = adapter.get_endpoint() return client.Client(1, endpoint, **args) def check_provision_state(node): state = node.provision_state.lower() if state not in VALID_STATES: msg = _('Invalid provision state for introspection: ' '"%(state)s", valid states are "%(valid)s"') raise utils.Error(msg % {'state': state, 'valid': list(VALID_STATES)}, node_info=node) def capabilities_to_dict(caps): """Convert the Node's capabilities into a dictionary.""" if not caps: return {} return dict([key.split(':', 1) for key in caps.split(',')]) def dict_to_capabilities(caps_dict): """Convert a dictionary into a string with the capabilities syntax.""" return ','.join(["%s:%s" % (key, value) for key, value in caps_dict.items() if value is not None]) def get_node(node_id, ironic=None, **kwargs): """Get a node from Ironic. :param node_id: node UUID or name. :param ironic: ironic client instance. :param kwargs: arguments to pass to Ironic client. :raises: Error on failure """ ironic = ironic if ironic is not None else get_client() try: return ironic.node.get(node_id, **kwargs) except ironic_exc.NotFound: raise NotFound(node_id) except ironic_exc.HttpError as exc: raise utils.Error(_("Cannot get node %(node)s: %(exc)s") % {'node': node_id, 'exc': exc}) @retrying.retry( retry_on_exception=lambda exc: isinstance(exc, ironic_exc.ClientException), stop_max_attempt_number=5, wait_fixed=1000) def call_with_retries(func, *args, **kwargs): """Call an ironic client function retrying all errors. If an ironic client exception is raised, try calling the func again, at most 5 times, waiting 1 sec between each call. If on the 5th attempt the func raises again, the exception is propagated to the caller. """ return func(*args, **kwargs) ironic-inspector-7.2.0/ironic_inspector/utils.py0000666000175100017510000001616613241323457022136 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import logging as pylog import futurist from ironicclient.v1 import node from keystonemiddleware import auth_token from oslo_config import cfg from oslo_log import log from oslo_middleware import cors as cors_middleware import pytz from ironic_inspector.common.i18n import _ from ironic_inspector import conf # noqa from ironic_inspector import policy CONF = cfg.CONF _EXECUTOR = None def get_ipmi_address_from_data(introspection_data): try: result = introspection_data['inventory']['bmc_address'] except KeyError: result = introspection_data.get('ipmi_address') if result in ('', '0.0.0.0'): # ipmitool can return these values, if it does not know the address return None else: return result def get_pxe_mac(introspection_data): pxe_mac = introspection_data.get('boot_interface') if pxe_mac and '-' in pxe_mac: # pxelinux format: 01-aa-bb-cc-dd-ee-ff pxe_mac = pxe_mac.split('-', 1)[1] pxe_mac = pxe_mac.replace('-', ':').lower() return pxe_mac def processing_logger_prefix(data=None, node_info=None): """Calculate prefix for logging. Tries to use: * node UUID, node._state * node PXE MAC, * node BMC address :param data: introspection data :param node_info: NodeInfo or ironic node object :return: logging prefix as a string """ # TODO(dtantsur): try to get MAC and BMC address for node_info as well parts = [] data = data or {} if node_info is not None: if isinstance(node_info, node.Node): parts.append(str(node_info.uuid)) else: parts.append(str(node_info)) pxe_mac = get_pxe_mac(data) if pxe_mac: parts.append('MAC %s' % pxe_mac) bmc_address = get_ipmi_address_from_data(data) if data else None if bmc_address: parts.append('BMC %s' % bmc_address) if parts: return _('[node: %s]') % ' '.join(parts) else: return _('[unidentified node]') class ProcessingLoggerAdapter(log.KeywordArgumentAdapter): def process(self, msg, kwargs): if 'data' not in kwargs and 'node_info' not in kwargs: return super(ProcessingLoggerAdapter, self).process(msg, kwargs) data = kwargs.get('data', {}) node_info = kwargs.get('node_info') prefix = processing_logger_prefix(data, node_info) msg, kwargs = super(ProcessingLoggerAdapter, self).process(msg, kwargs) return ('%s %s' % (prefix, msg)), kwargs def getProcessingLogger(name): # We can't use getLogger from oslo_log, as it's an adapter itself logger = pylog.getLogger(name) return ProcessingLoggerAdapter(logger, {}) LOG = getProcessingLogger(__name__) class Error(Exception): """Inspector exception.""" def __init__(self, msg, code=400, log_level='error', **kwargs): super(Error, self).__init__(msg) getattr(LOG, log_level)(msg, **kwargs) self.http_code = code class NotFoundInCacheError(Error): """Exception when node was not found in cache during processing.""" def __init__(self, msg, code=404, **kwargs): super(NotFoundInCacheError, self).__init__(msg, code, log_level='info', **kwargs) class NodeStateRaceCondition(Error): """State mismatch between the DB and a node_info.""" def __init__(self, *args, **kwargs): message = _('Node state mismatch detected between the DB and the ' 'cached node_info object') kwargs.setdefault('code', 500) super(NodeStateRaceCondition, self).__init__(message, *args, **kwargs) class NodeStateInvalidEvent(Error): """Invalid event attempted.""" def executor(): """Return the current futures executor.""" global _EXECUTOR if _EXECUTOR is None: _EXECUTOR = futurist.GreenThreadPoolExecutor( max_workers=CONF.max_concurrency) return _EXECUTOR def add_auth_middleware(app): """Add authentication middleware to Flask application. :param app: application. """ auth_conf = dict(CONF.keystone_authtoken) auth_conf['delay_auth_decision'] = True app.wsgi_app = auth_token.AuthProtocol(app.wsgi_app, auth_conf) def add_cors_middleware(app): """Create a CORS wrapper Attach ironic-inspector-specific defaults that must be included in all CORS responses. :param app: application """ app.wsgi_app = cors_middleware.CORS(app.wsgi_app, CONF) def check_auth(request, rule=None, target=None): """Check authentication on request. :param request: Flask request :param rule: policy rule to check the request against :raises: utils.Error if access is denied """ if CONF.auth_strategy == 'noauth': return if not request.context.is_public_api: if request.headers.get('X-Identity-Status', '').lower() == 'invalid': raise Error(_('Authentication required'), code=401) target = {} if target is None else target if not policy.authorize(rule, target, request.context.to_policy_values()): raise Error(_("Access denied by policy"), code=403) def get_valid_macs(data): """Get a list of valid MAC's from the introspection data.""" return [m['mac'] for m in data.get('all_interfaces', {}).values() if m.get('mac')] _INVENTORY_MANDATORY_KEYS = ('memory', 'cpu', 'interfaces') def get_inventory(data, node_info=None): """Get and validate the hardware inventory from introspection data.""" inventory = data.get('inventory') # TODO(dtantsur): validate inventory using JSON schema if not inventory: raise Error(_('Hardware inventory is empty or missing'), data=data, node_info=node_info) for key in _INVENTORY_MANDATORY_KEYS: if not inventory.get(key): raise Error(_('Invalid hardware inventory: %s key is missing ' 'or empty') % key, data=data, node_info=node_info) if not inventory.get('disks'): LOG.info('No disks were detected in the inventory, assuming this ' 'is a disk-less node', data=data, node_info=node_info) # Make sure the code iterating over it does not fail with a TypeError inventory['disks'] = [] return inventory def iso_timestamp(timestamp=None, tz=pytz.timezone('utc')): """Return an ISO8601-formatted timestamp (tz: UTC) or None. :param timestamp: such as time.time() or None :param tz: timezone :returns: an ISO8601-formatted timestamp, or None """ if timestamp is None: return None date = datetime.datetime.fromtimestamp(timestamp, tz=tz) return date.isoformat() ironic-inspector-7.2.0/ironic_inspector/plugins/0000775000175100017510000000000013241324014022057 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/plugins/discovery.py0000666000175100017510000000662313241323457024463 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Enroll node not found hook hook.""" from oslo_config import cfg from ironic_inspector.common.i18n import _ from ironic_inspector.common import ironic as ir_utils from ironic_inspector import node_cache from ironic_inspector import utils CONF = cfg.CONF LOG = utils.getProcessingLogger(__name__) def _extract_node_driver_info(introspection_data): node_driver_info = {} ipmi_address = utils.get_ipmi_address_from_data(introspection_data) if ipmi_address: node_driver_info['ipmi_address'] = ipmi_address else: LOG.warning('No BMC address provided, discovered node will be ' 'created without ipmi address') return node_driver_info def _check_existing_nodes(introspection_data, node_driver_info, ironic): macs = utils.get_valid_macs(introspection_data) if macs: # verify existing ports for mac in macs: ports = ironic.port.list(address=mac) if not ports: continue raise utils.Error( _('Port %(mac)s already exists, uuid: %(uuid)s') % {'mac': mac, 'uuid': ports[0].uuid}, data=introspection_data) else: LOG.warning('No suitable interfaces found for discovered node. ' 'Check that validate_interfaces hook is listed in ' '[processing]default_processing_hooks config option') # verify existing node with discovered ipmi address ipmi_address = node_driver_info.get('ipmi_address') if ipmi_address: # FIXME(aarefiev): it's not effective to fetch all nodes, and may # impact on performance on big clusters nodes = ironic.node.list(fields=('uuid', 'driver_info'), limit=0) for node in nodes: if ipmi_address == ir_utils.get_ipmi_address(node): raise utils.Error( _('Node %(uuid)s already has BMC address ' '%(ipmi_address)s, not enrolling') % {'ipmi_address': ipmi_address, 'uuid': node.uuid}, data=introspection_data) def enroll_node_not_found_hook(introspection_data, **kwargs): node_attr = {} ironic = ir_utils.get_client() node_driver_info = _extract_node_driver_info(introspection_data) node_attr['driver_info'] = node_driver_info node_driver = CONF.discovery.enroll_node_driver _check_existing_nodes(introspection_data, node_driver_info, ironic) LOG.debug('Creating discovered node with driver %(driver)s and ' 'attributes: %(attr)s', {'driver': node_driver, 'attr': node_attr}, data=introspection_data) # NOTE(aarefiev): This flag allows to distinguish enrolled manually # and auto-discovered nodes in the introspection rules. introspection_data['auto_discovered'] = True return node_cache.create_node(node_driver, ironic=ironic, **node_attr) ironic-inspector-7.2.0/ironic_inspector/plugins/raid_device.py0000666000175100017510000001062413241323457024706 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Gather root device hint from recognized block devices.""" from ironic_inspector.plugins import base from ironic_inspector import utils LOG = utils.getProcessingLogger(__name__) class RaidDeviceDetection(base.ProcessingHook): """Processing hook for learning the root device after RAID creation. The plugin can figure out the root device in 2 runs. First, it saves the discovered block device serials in node.extra. The second run will check the difference between the recently discovered block devices and the previously saved ones. After saving the root device in node.properties, it will delete the temporarily saved block device serials in node.extra. This way, it helps to figure out the root device hint in cases when otherwise Ironic doesn't have enough information to do so. Such a usecase is DRAC RAID configuration where the BMC doesn't provide any useful information about the created RAID disks. Using this plugin immediately before and after creating the root RAID device will solve the issue of root device hints. In cases where there's no RAID volume on the node, the standard plugin will fail due to the missing local_gb value. This plugin fakes the missing value, until it's corrected during later runs. Note, that for this to work the plugin needs to take precedence over the standard plugin. """ def _get_serials(self, data): if 'inventory' in data: return [x['serial'] for x in data['inventory'].get('disks', ()) if x.get('serial')] elif 'block_devices' in data: return data['block_devices'].get('serials', ()) def before_processing(self, introspection_data, **kwargs): """Adds fake local_gb value if it's missing from introspection_data.""" if not introspection_data.get('local_gb'): LOG.info('No volume is found on the node. Adding a fake ' 'value for "local_gb"', data=introspection_data) introspection_data['local_gb'] = 1 def before_update(self, introspection_data, node_info, **kwargs): current_devices = self._get_serials(introspection_data) if not current_devices: LOG.warning('No block device was received from ramdisk', node_info=node_info, data=introspection_data) return node = node_info.node() if 'root_device' in node.properties: LOG.info('Root device is already known for the node', node_info=node_info, data=introspection_data) return if 'block_devices' in node.extra: # Compare previously discovered devices with the current ones previous_devices = node.extra['block_devices']['serials'] new_devices = [device for device in current_devices if device not in previous_devices] if len(new_devices) > 1: LOG.warning('Root device cannot be identified because ' 'multiple new devices were found', node_info=node_info, data=introspection_data) return elif len(new_devices) == 0: LOG.warning('No new devices were found', node_info=node_info, data=introspection_data) return node_info.patch([ {'op': 'remove', 'path': '/extra/block_devices'}, {'op': 'add', 'path': '/properties/root_device', 'value': {'serial': new_devices[0]}} ]) else: # No previously discovered devices - save the inspector block # devices in node.extra node_info.patch([{'op': 'add', 'path': '/extra/block_devices', 'value': {'serials': current_devices}}]) ironic-inspector-7.2.0/ironic_inspector/plugins/__init__.py0000666000175100017510000000000013241323457024172 0ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/plugins/base.py0000666000175100017510000001702013241323457023357 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Base code for plugins support.""" import abc from oslo_config import cfg from oslo_log import log import six import stevedore from ironic_inspector.common.i18n import _ CONF = cfg.CONF LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class ProcessingHook(object): # pragma: no cover """Abstract base class for introspection data processing hooks.""" dependencies = [] """An ordered list of hooks that must be enabled before this one. The items here should be entry point names, not classes. """ def before_processing(self, introspection_data, **kwargs): """Hook to run before any other data processing. This hook is run even before sanity checks. :param introspection_data: raw information sent by the ramdisk, may be modified by the hook. :param kwargs: used for extensibility without breaking existing hooks :returns: nothing. """ def before_update(self, introspection_data, node_info, **kwargs): """Hook to run before Ironic node update. This hook is run after node is found and ports are created, just before the node is updated with the data. :param introspection_data: processed data from the ramdisk. :param node_info: NodeInfo instance. :param kwargs: used for extensibility without breaking existing hooks. :returns: nothing. [RFC 6902] - http://tools.ietf.org/html/rfc6902 """ class WithValidation(object): REQUIRED_PARAMS = set() """Set with names of required parameters.""" OPTIONAL_PARAMS = set() """Set with names of optional parameters.""" def validate(self, params, **kwargs): """Validate params passed during creation. Default implementation checks for presence of fields from REQUIRED_PARAMS and fails for unexpected fields (not from REQUIRED_PARAMS + OPTIONAL_PARAMS). :param params: params as a dictionary :param kwargs: used for extensibility without breaking existing plugins :raises: ValueError on validation failure """ passed = {k for k, v in params.items() if v is not None} missing = self.REQUIRED_PARAMS - passed unexpected = passed - self.REQUIRED_PARAMS - self.OPTIONAL_PARAMS msg = [] if missing: msg.append(_('missing required parameter(s): %s') % ', '.join(missing)) if unexpected: msg.append(_('unexpected parameter(s): %s') % ', '.join(unexpected)) if msg: raise ValueError('; '.join(msg)) @six.add_metaclass(abc.ABCMeta) class RuleConditionPlugin(WithValidation): # pragma: no cover """Abstract base class for rule condition plugins.""" REQUIRED_PARAMS = {'value'} ALLOW_NONE = False """Whether this condition accepts None when field is not found.""" @abc.abstractmethod def check(self, node_info, field, params, **kwargs): """Check if condition holds for a given field. :param node_info: NodeInfo object :param field: field value :param params: parameters as a dictionary, changing it here will change what will be stored in database :param kwargs: used for extensibility without breaking existing plugins :raises ValueError: on unacceptable field value :returns: True if check succeeded, otherwise False """ @six.add_metaclass(abc.ABCMeta) class RuleActionPlugin(WithValidation): # pragma: no cover """Abstract base class for rule action plugins.""" FORMATTED_PARAMS = [] """List of params will be formatted with python format.""" @abc.abstractmethod def apply(self, node_info, params, **kwargs): """Run action on successful rule match. :param node_info: NodeInfo object :param params: parameters as a dictionary :param kwargs: used for extensibility without breaking existing plugins :raises: utils.Error on failure """ _HOOKS_MGR = None _NOT_FOUND_HOOK_MGR = None _CONDITIONS_MGR = None _ACTIONS_MGR = None def missing_entrypoints_callback(names): """Raise MissingHookError with comma-separated list of missing hooks""" error = _('The following hook(s) are missing or failed to load: %s') raise RuntimeError(error % ', '.join(names)) def processing_hooks_manager(*args): """Create a Stevedore extension manager for processing hooks. :param args: arguments to pass to the hooks constructor. """ global _HOOKS_MGR if _HOOKS_MGR is None: names = [x.strip() for x in CONF.processing.processing_hooks.split(',') if x.strip()] _HOOKS_MGR = stevedore.NamedExtensionManager( 'ironic_inspector.hooks.processing', names=names, invoke_on_load=True, invoke_args=args, on_missing_entrypoints_callback=missing_entrypoints_callback, name_order=True) return _HOOKS_MGR def validate_processing_hooks(): """Validate the enabled processing hooks. :raises: MissingHookError on missing or failed to load hooks :raises: RuntimeError on validation failure :returns: the list of hooks passed validation """ hooks = [ext for ext in processing_hooks_manager()] enabled = set() errors = [] for hook in hooks: deps = getattr(hook.obj, 'dependencies', ()) missing = [d for d in deps if d not in enabled] if missing: errors.append('Hook %(hook)s requires the following hooks to be ' 'enabled before it: %(deps)s. The following hooks ' 'are missing: %(missing)s.' % {'hook': hook.name, 'deps': ', '.join(deps), 'missing': ', '.join(missing)}) enabled.add(hook.name) if errors: raise RuntimeError("Some hooks failed to load due to dependency " "problems:\n%s" % "\n".join(errors)) return hooks def node_not_found_hook_manager(*args): global _NOT_FOUND_HOOK_MGR if _NOT_FOUND_HOOK_MGR is None: name = CONF.processing.node_not_found_hook if name: _NOT_FOUND_HOOK_MGR = stevedore.DriverManager( 'ironic_inspector.hooks.node_not_found', name=name) return _NOT_FOUND_HOOK_MGR def rule_conditions_manager(): """Create a Stevedore extension manager for conditions in rules.""" global _CONDITIONS_MGR if _CONDITIONS_MGR is None: _CONDITIONS_MGR = stevedore.ExtensionManager( 'ironic_inspector.rules.conditions', invoke_on_load=True) return _CONDITIONS_MGR def rule_actions_manager(): """Create a Stevedore extension manager for actions in rules.""" global _ACTIONS_MGR if _ACTIONS_MGR is None: _ACTIONS_MGR = stevedore.ExtensionManager( 'ironic_inspector.rules.actions', invoke_on_load=True) return _ACTIONS_MGR ironic-inspector-7.2.0/ironic_inspector/plugins/example.py0000666000175100017510000000254513241323457024106 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Example plugin.""" from oslo_log import log from ironic_inspector.plugins import base LOG = log.getLogger('ironic_inspector.plugins.example') class ExampleProcessingHook(base.ProcessingHook): # pragma: no cover def before_processing(self, introspection_data, **kwargs): LOG.debug('before_processing: %s', introspection_data) def before_update(self, introspection_data, node_info, **kwargs): LOG.debug('before_update: %s (node %s)', introspection_data, node_info.uuid) def example_not_found_hook(introspection_data, **kwargs): LOG.debug('Processing node not found %s', introspection_data) class ExampleRuleAction(base.RuleActionPlugin): # pragma: no cover def apply(self, node_info, params, **kwargs): LOG.debug('apply action to %s: %s', node_info.uuid, params) ironic-inspector-7.2.0/ironic_inspector/plugins/lldp_basic.py0000666000175100017510000000624413241323457024547 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """LLDP Processing Hook for basic TLVs""" import binascii from ironic_inspector.common import lldp_parsers from ironic_inspector.plugins import base from ironic_inspector import utils LOG = utils.getProcessingLogger(__name__) class LLDPBasicProcessingHook(base.ProcessingHook): """Process mandatory and optional LLDP packet fields Loop through raw LLDP TLVs and parse those from the basic management, 802.1, and 802.3 TLV sets. Store parsed data back to the ironic-inspector database. """ def _parse_lldp_tlvs(self, tlvs, node_info): """Parse LLDP TLVs into dictionary of name/value pairs :param tlvs: list of raw TLVs :param node_info: node being introspected :returns nv: dictionary of name/value pairs. The LLDP user-friendly names, e.g. "switch_port_id" are the keys """ # Generate name/value pairs for each TLV supported by this plugin. parser = lldp_parsers.LLDPBasicMgmtParser(node_info) for tlv_type, tlv_value in tlvs: try: data = bytearray(binascii.a2b_hex(tlv_value)) except TypeError as e: LOG.warning( "TLV value for TLV type %(tlv_type)d not in correct " "format, value must be in hexadecimal: %(msg)s", {'tlv_type': tlv_type, 'msg': e}, node_info=node_info) continue if parser.parse_tlv(tlv_type, data): LOG.debug("Handled TLV type %d", tlv_type, node_info=node_info) else: LOG.debug("LLDP TLV type %d not handled", tlv_type, node_info=node_info) return parser.nv_dict def before_update(self, introspection_data, node_info, **kwargs): """Process LLDP data and update all_interfaces with processed data""" inventory = utils.get_inventory(introspection_data) for iface in inventory['interfaces']: if_name = iface['name'] tlvs = iface.get('lldp') if tlvs is None: LOG.warning("No LLDP Data found for interface %s", if_name, node_info=node_info) continue LOG.debug("Processing LLDP Data for interface %s", if_name, node_info=node_info) nv = self._parse_lldp_tlvs(tlvs, node_info) if nv: # Store lldp data per interface in "all_interfaces" iface_to_update = introspection_data['all_interfaces'][if_name] iface_to_update['lldp_processed'] = nv ironic-inspector-7.2.0/ironic_inspector/plugins/pci_devices.py0000666000175100017510000000630413241323457024725 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Gather and distinguish PCI devices from inventory.""" import collections import json from oslo_config import cfg from ironic_inspector.common.i18n import _ from ironic_inspector.plugins import base from ironic_inspector import utils PCI_DEVICES_OPTS = [ cfg.MultiStrOpt('alias', default=[], help=_('An alias for PCI device identified by ' '\'vendor_id\' and \'product_id\' fields. Format: ' '{"vendor_id": "1234", "product_id": "5678", ' '"name": "pci_dev1"}')), ] def list_opts(): return [ ('pci_devices', PCI_DEVICES_OPTS) ] CONF = cfg.CONF CONF.register_opts(PCI_DEVICES_OPTS, group='pci_devices') LOG = utils.getProcessingLogger(__name__) def _parse_pci_alias_entry(): parsed_pci_devices = [] for pci_alias_entry in CONF.pci_devices.alias: try: parsed_entry = json.loads(pci_alias_entry) if set(parsed_entry) != {'vendor_id', 'product_id', 'name'}: raise KeyError("The 'alias' entry should contain " "exactly 'vendor_id', 'product_id' and " "'name' keys") parsed_pci_devices.append(parsed_entry) except (ValueError, KeyError) as ex: LOG.error("Error parsing 'alias' option: %s", ex) return {(dev['vendor_id'], dev['product_id']): dev['name'] for dev in parsed_pci_devices} class PciDevicesHook(base.ProcessingHook): """Processing hook for counting and distinguishing various PCI devices. That information can be later used by nova for node scheduling. """ aliases = _parse_pci_alias_entry() def _found_pci_devices_count(self, found_pci_devices): return collections.Counter([(dev['vendor_id'], dev['product_id']) for dev in found_pci_devices if (dev['vendor_id'], dev['product_id']) in self.aliases]) def before_update(self, introspection_data, node_info, **kwargs): if 'pci_devices' not in introspection_data: if CONF.pci_devices.alias: LOG.warning('No PCI devices information was received from ' 'the ramdisk.') return alias_count = {self.aliases[id_pair]: count for id_pair, count in self._found_pci_devices_count( introspection_data['pci_devices']).items()} if alias_count: node_info.update_capabilities(**alias_count) LOG.info('Found the following PCI devices: %s', alias_count) ironic-inspector-7.2.0/ironic_inspector/plugins/local_link_connection.py0000666000175100017510000001402413241323457026774 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic LLDP Processing Hook""" import binascii from construct import core import netaddr from oslo_config import cfg from oslo_utils import netutils from ironic_inspector.common import lldp_parsers from ironic_inspector.common import lldp_tlvs as tlv from ironic_inspector.plugins import base from ironic_inspector import utils LOG = utils.getProcessingLogger(__name__) CONF = cfg.CONF PORT_ID_ITEM_NAME = "port_id" SWITCH_ID_ITEM_NAME = "switch_id" LLDP_PROC_DATA_MAPPING =\ {lldp_parsers.LLDP_CHASSIS_ID_NM: SWITCH_ID_ITEM_NAME, lldp_parsers.LLDP_PORT_ID_NM: PORT_ID_ITEM_NAME} class GenericLocalLinkConnectionHook(base.ProcessingHook): """Process mandatory LLDP packet fields Non-vendor specific LLDP packet fields processed for each NIC found for a baremetal node, port ID and chassis ID. These fields if found and if valid will be saved into the local link connection info port id and switch id fields on the Ironic port that represents that NIC. """ def _get_local_link_patch(self, tlv_type, tlv_value, port, node_info): try: data = bytearray(binascii.unhexlify(tlv_value)) except TypeError: LOG.warning("TLV value for TLV type %d not in correct" "format, ensure TLV value is in " "hexidecimal format when sent to " "inspector", tlv_type, node_info=node_info) return item = value = None if tlv_type == tlv.LLDP_TLV_PORT_ID: try: port_id = tlv.PortId.parse(data) except (core.MappingError, netaddr.AddrFormatError) as e: LOG.warning("TLV parse error for Port ID: %s", e, node_info=node_info) return item = PORT_ID_ITEM_NAME value = port_id.value elif tlv_type == tlv.LLDP_TLV_CHASSIS_ID: try: chassis_id = tlv.ChassisId.parse(data) except (core.MappingError, netaddr.AddrFormatError) as e: LOG.warning("TLV parse error for Chassis ID: %s", e, node_info=node_info) return # Only accept mac address for chassis ID if 'mac_address' in chassis_id.subtype: item = SWITCH_ID_ITEM_NAME value = chassis_id.value if item and value: if (not CONF.processing.overwrite_existing and item in port.local_link_connection): return return {'op': 'add', 'path': '/local_link_connection/%s' % item, 'value': value} def _get_lldp_processed_patch(self, name, item, lldp_proc_data, port, node_info): if 'lldp_processed' not in lldp_proc_data: return value = lldp_proc_data['lldp_processed'].get(name) if value: # Only accept mac address for chassis ID if (item == SWITCH_ID_ITEM_NAME and not netutils.is_valid_mac(value)): LOG.info("Skipping switch_id since it's not a MAC: %s", value, node_info=node_info) return if (not CONF.processing.overwrite_existing and item in port.local_link_connection): return return {'op': 'add', 'path': '/local_link_connection/%s' % item, 'value': value} def before_update(self, introspection_data, node_info, **kwargs): """Process LLDP data and patch Ironic port local link connection""" inventory = utils.get_inventory(introspection_data) ironic_ports = node_info.ports() for iface in inventory['interfaces']: if iface['name'] not in introspection_data['all_interfaces']: continue mac_address = iface['mac_address'] port = ironic_ports.get(mac_address) if not port: LOG.debug("Skipping LLC processing for interface %s, matching " "port not found in Ironic.", mac_address, node_info=node_info, data=introspection_data) continue lldp_data = iface.get('lldp') if lldp_data is None: LOG.warning("No LLDP Data found for interface %s", mac_address, node_info=node_info, data=introspection_data) continue patches = [] # First check if lldp data was already processed by lldp_basic # plugin which stores data in 'all_interfaces' proc_data = introspection_data['all_interfaces'][iface['name']] for name, item in LLDP_PROC_DATA_MAPPING.items(): patch = self._get_lldp_processed_patch(name, item, proc_data, port, node_info) if patch is not None: patches.append(patch) # If no processed lldp data was available then parse raw lldp data if not patches: for tlv_type, tlv_value in lldp_data: patch = self._get_local_link_patch(tlv_type, tlv_value, port, node_info) if patch is not None: patches.append(patch) node_info.patch_port(port, patches) ironic-inspector-7.2.0/ironic_inspector/plugins/rules.py0000666000175100017510000001016513241323457023602 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Standard plugins for rules API.""" import operator import re import netaddr from ironic_inspector.common.i18n import _ from ironic_inspector.plugins import base from ironic_inspector import utils def coerce(value, expected): if isinstance(expected, float): return float(value) elif isinstance(expected, int): return int(value) else: return value class SimpleCondition(base.RuleConditionPlugin): op = None def check(self, node_info, field, params, **kwargs): value = params['value'] return self.op(coerce(field, value), value) class EqCondition(SimpleCondition): op = operator.eq class LtCondition(SimpleCondition): op = operator.lt class GtCondition(SimpleCondition): op = operator.gt class LeCondition(SimpleCondition): op = operator.le class GeCondition(SimpleCondition): op = operator.ge class NeCondition(SimpleCondition): op = operator.ne class EmptyCondition(base.RuleConditionPlugin): REQUIRED_PARAMS = set() ALLOW_NONE = True def check(self, node_info, field, params, **kwargs): return field in ('', None, [], {}) class NetCondition(base.RuleConditionPlugin): def validate(self, params, **kwargs): super(NetCondition, self).validate(params, **kwargs) # Make sure it does not raise try: netaddr.IPNetwork(params['value']) except netaddr.AddrFormatError as exc: raise ValueError('invalid value: %s' % exc) def check(self, node_info, field, params, **kwargs): network = netaddr.IPNetwork(params['value']) return netaddr.IPAddress(field) in network class ReCondition(base.RuleConditionPlugin): def validate(self, params, **kwargs): try: re.compile(params['value']) except re.error as exc: raise ValueError(_('invalid regular expression: %s') % exc) class MatchesCondition(ReCondition): def check(self, node_info, field, params, **kwargs): regexp = params['value'] if regexp[-1] != '$': regexp += '$' return re.match(regexp, str(field)) is not None class ContainsCondition(ReCondition): def check(self, node_info, field, params, **kwargs): return re.search(params['value'], str(field)) is not None class FailAction(base.RuleActionPlugin): REQUIRED_PARAMS = {'message'} def apply(self, node_info, params, **kwargs): raise utils.Error(params['message'], node_info=node_info) class SetAttributeAction(base.RuleActionPlugin): REQUIRED_PARAMS = {'path', 'value'} # TODO(dtantsur): proper validation of path FORMATTED_PARAMS = ['value'] def apply(self, node_info, params, **kwargs): node_info.patch([{'op': 'add', 'path': params['path'], 'value': params['value']}]) class SetCapabilityAction(base.RuleActionPlugin): REQUIRED_PARAMS = {'name'} OPTIONAL_PARAMS = {'value'} FORMATTED_PARAMS = ['value'] def apply(self, node_info, params, **kwargs): node_info.update_capabilities( **{params['name']: params.get('value')}) class ExtendAttributeAction(base.RuleActionPlugin): REQUIRED_PARAMS = {'path', 'value'} OPTIONAL_PARAMS = {'unique'} # TODO(dtantsur): proper validation of path FORMATTED_PARAMS = ['value'] def apply(self, node_info, params, **kwargs): def _replace(values): value = params['value'] if not params.get('unique') or value not in values: values.append(value) return values node_info.replace_field(params['path'], _replace, default=[]) ironic-inspector-7.2.0/ironic_inspector/plugins/standard.py0000666000175100017510000003140713241323457024252 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Standard set of plugins.""" from ironic_lib import utils as il_utils import netaddr from oslo_config import cfg from oslo_utils import netutils from oslo_utils import units import six from ironic_inspector.common.i18n import _ from ironic_inspector.plugins import base from ironic_inspector import utils CONF = cfg.CONF LOG = utils.getProcessingLogger('ironic_inspector.plugins.standard') class RootDiskSelectionHook(base.ProcessingHook): """Smarter root disk selection using Ironic root device hints. This hook must always go before SchedulerHook, otherwise root_disk field might not be updated. """ def _process_root_device_hints(self, introspection_data, node_info, inventory): """Detect root disk from root device hints and IPA inventory.""" hints = node_info.node().properties.get('root_device') if not hints: LOG.debug('Root device hints are not provided', node_info=node_info, data=introspection_data) return try: device = il_utils.match_root_device_hints(inventory['disks'], hints) except (TypeError, ValueError) as e: raise utils.Error( _('No disks could be found using the root device hints ' '%(hints)s because they failed to validate. ' 'Error: %(error)s') % {'hints': hints, 'error': e}, node_info=node_info, data=introspection_data) if not device: raise utils.Error(_('No disks satisfied root device hints'), node_info=node_info, data=introspection_data) LOG.debug('Disk %(disk)s of size %(size)s satisfies ' 'root device hints', {'disk': device.get('name'), 'size': device['size']}, node_info=node_info, data=introspection_data) introspection_data['root_disk'] = device def before_update(self, introspection_data, node_info, **kwargs): """Process root disk information.""" inventory = utils.get_inventory(introspection_data, node_info=node_info) self._process_root_device_hints(introspection_data, node_info, inventory) root_disk = introspection_data.get('root_disk') if root_disk: local_gb = root_disk['size'] // units.Gi if CONF.processing.disk_partitioning_spacing: local_gb -= 1 LOG.info('Root disk %(disk)s, local_gb %(local_gb)s GiB', {'disk': root_disk, 'local_gb': local_gb}, node_info=node_info, data=introspection_data) else: local_gb = 0 LOG.info('No root device found, assuming a diskless node', node_info=node_info, data=introspection_data) introspection_data['local_gb'] = local_gb if (CONF.processing.overwrite_existing or not node_info.node().properties.get('local_gb')): node_info.update_properties(local_gb=str(local_gb)) class SchedulerHook(base.ProcessingHook): """Nova scheduler required properties.""" KEYS = ('cpus', 'cpu_arch', 'memory_mb') def before_update(self, introspection_data, node_info, **kwargs): """Update node with scheduler properties.""" inventory = utils.get_inventory(introspection_data, node_info=node_info) errors = [] try: introspection_data['cpus'] = int(inventory['cpu']['count']) introspection_data['cpu_arch'] = six.text_type( inventory['cpu']['architecture']) except (KeyError, ValueError, TypeError): errors.append(_('malformed or missing CPU information: %s') % inventory.get('cpu')) try: introspection_data['memory_mb'] = int( inventory['memory']['physical_mb']) except (KeyError, ValueError, TypeError): errors.append(_('malformed or missing memory information: %s; ' 'introspection requires physical memory size ' 'from dmidecode') % inventory.get('memory')) if errors: raise utils.Error(_('The following problems encountered: %s') % '; '.join(errors), node_info=node_info, data=introspection_data) LOG.info('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, ' 'memory %(memory_mb)s MiB', {key: introspection_data.get(key) for key in self.KEYS}, node_info=node_info, data=introspection_data) overwrite = CONF.processing.overwrite_existing properties = {key: str(introspection_data[key]) for key in self.KEYS if overwrite or not node_info.node().properties.get(key)} node_info.update_properties(**properties) class ValidateInterfacesHook(base.ProcessingHook): """Hook to validate network interfaces.""" def __init__(self): # Some configuration checks if (CONF.processing.add_ports == 'disabled' and CONF.processing.keep_ports == 'added'): msg = _("Configuration error: add_ports set to disabled " "and keep_ports set to added. Please change keep_ports " "to all.") raise utils.Error(msg) def _get_interfaces(self, data=None): """Convert inventory to a dict with interfaces. :return: dict interface name -> dict with keys 'mac' and 'ip' """ result = {} inventory = utils.get_inventory(data) pxe_mac = utils.get_pxe_mac(data) for iface in inventory['interfaces']: name = iface.get('name') mac = iface.get('mac_address') ip = iface.get('ipv4_address') client_id = iface.get('client_id') if not name: LOG.error('Malformed interface record: %s', iface, data=data) continue if not mac: LOG.debug('Skipping interface %s without link information', name, data=data) continue if not netutils.is_valid_mac(mac): LOG.warning('MAC %(mac)s for interface %(name)s is ' 'not valid, skipping', {'mac': mac, 'name': name}, data=data) continue mac = mac.lower() LOG.debug('Found interface %(name)s with MAC "%(mac)s", ' 'IP address "%(ip)s" and client_id "%(client_id)s"', {'name': name, 'mac': mac, 'ip': ip, 'client_id': client_id}, data=data) result[name] = {'ip': ip, 'mac': mac, 'client_id': client_id, 'pxe': (mac == pxe_mac)} return result def _validate_interfaces(self, interfaces, data=None): """Validate interfaces on correctness and suitability. :return: dict interface name -> dict with keys 'mac' and 'ip' """ if not interfaces: raise utils.Error(_('No interfaces supplied by the ramdisk'), data=data) pxe_mac = utils.get_pxe_mac(data) if not pxe_mac and CONF.processing.add_ports == 'pxe': LOG.warning('No boot interface provided in the introspection ' 'data, will add all ports with IP addresses') result = {} for name, iface in interfaces.items(): ip = iface.get('ip') pxe = iface.get('pxe', True) if name == 'lo' or (ip and netaddr.IPAddress(ip).is_loopback()): LOG.debug('Skipping local interface %s', name, data=data) continue if CONF.processing.add_ports == 'pxe' and pxe_mac and not pxe: LOG.debug('Skipping interface %s as it was not PXE booting', name, data=data) continue elif CONF.processing.add_ports != 'all' and not ip: LOG.debug('Skipping interface %s as it did not have ' 'an IP address assigned during the ramdisk run', name, data=data) continue result[name] = iface if not result: raise utils.Error(_('No suitable interfaces found in %s') % interfaces, data=data) return result def before_processing(self, introspection_data, **kwargs): """Validate information about network interfaces.""" bmc_address = utils.get_ipmi_address_from_data(introspection_data) # Overwrite the old ipmi_address field to avoid inconsistency introspection_data['ipmi_address'] = bmc_address if not bmc_address: LOG.debug('No BMC address provided in introspection data, ' 'assuming virtual environment', data=introspection_data) all_interfaces = self._get_interfaces(introspection_data) interfaces = self._validate_interfaces(all_interfaces, introspection_data) LOG.info('Using network interface(s): %s', ', '.join('%s %s' % (name, items) for (name, items) in interfaces.items()), data=introspection_data) introspection_data['all_interfaces'] = all_interfaces introspection_data['interfaces'] = interfaces valid_macs = [iface['mac'] for iface in interfaces.values()] introspection_data['macs'] = valid_macs def before_update(self, introspection_data, node_info, **kwargs): """Create new ports and drop ports that are not present in the data.""" interfaces = introspection_data.get('interfaces') if CONF.processing.add_ports != 'disabled': node_info.create_ports(list(interfaces.values())) if CONF.processing.keep_ports == 'present': expected_macs = { iface['mac'] for iface in introspection_data['all_interfaces'].values() } elif CONF.processing.keep_ports == 'added': expected_macs = set(introspection_data['macs']) if CONF.processing.keep_ports != 'all': # list is required as we modify underlying dict for port in list(node_info.ports().values()): if port.address not in expected_macs: LOG.info("Deleting port %(port)s as its MAC %(mac)s is " "not in expected MAC list %(expected)s", {'port': port.uuid, 'mac': port.address, 'expected': list(sorted(expected_macs))}, node_info=node_info, data=introspection_data) node_info.delete_port(port) if CONF.processing.overwrite_existing: # Make sure pxe_enabled is up-to-date ports = node_info.ports() for iface in introspection_data['interfaces'].values(): try: port = ports[iface['mac']] except KeyError: continue real_pxe = iface.get('pxe', True) if port.pxe_enabled != real_pxe: LOG.info('Fixing pxe_enabled=%(val)s on port %(port)s ' 'to match introspected data', {'port': port.address, 'val': real_pxe}, node_info=node_info, data=introspection_data) node_info.patch_port(port, [{'op': 'replace', 'path': '/pxe_enabled', 'value': real_pxe}]) class RamdiskErrorHook(base.ProcessingHook): """Hook to process error send from the ramdisk.""" def before_processing(self, introspection_data, **kwargs): error = introspection_data.get('error') if error: raise utils.Error(_('Ramdisk reported error: %s') % error, data=introspection_data) ironic-inspector-7.2.0/ironic_inspector/plugins/extra_hardware.py0000666000175100017510000000750413241323457025453 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Plugin to store extra hardware information in Swift. Stores the value of the 'data' key returned by the ramdisk as a JSON encoded string in a Swift object. The object is named 'extra_hardware-' and is stored in the 'inspector' container. """ import json from ironic_inspector.common import swift from ironic_inspector.plugins import base from ironic_inspector import utils LOG = utils.getProcessingLogger(__name__) EDEPLOY_ITEM_SIZE = 4 class ExtraHardwareHook(base.ProcessingHook): """Processing hook for saving extra hardware information in Swift.""" def _store_extra_hardware(self, name, data): """Handles storing the extra hardware data from the ramdisk""" swift_api = swift.SwiftAPI() swift_api.create_object(name, data) def before_update(self, introspection_data, node_info, **kwargs): """Stores the 'data' key from introspection_data in Swift. If the 'data' key exists, updates Ironic extra column 'hardware_swift_object' key to the name of the Swift object, and stores the data in the 'inspector' container in Swift. Otherwise, it does nothing. """ if 'data' not in introspection_data: LOG.warning('No extra hardware information was received from ' 'the ramdisk', node_info=node_info, data=introspection_data) return data = introspection_data['data'] name = 'extra_hardware-%s' % node_info.uuid self._store_extra_hardware(name, json.dumps(data)) # NOTE(sambetts) If data is edeploy format, convert to dicts for rules # processing, store converted data in introspection_data['extra']. # Delete introspection_data['data'], it is assumed unusable # by rules. if self._is_edeploy_data(data): LOG.debug('Extra hardware data is in eDeploy format, ' 'converting to usable format', node_info=node_info, data=introspection_data) introspection_data['extra'] = self._convert_edeploy_data(data) else: LOG.warning('Extra hardware data was not in a recognised ' 'format (eDeploy), and will not be forwarded to ' 'introspection rules', node_info=node_info, data=introspection_data) LOG.debug('Deleting \"data\" key from introspection data as it is ' 'assumed unusable by introspection rules. Raw data is ' 'stored in swift', node_info=node_info, data=introspection_data) del introspection_data['data'] node_info.patch([{'op': 'add', 'path': '/extra/hardware_swift_object', 'value': name}]) def _is_edeploy_data(self, data): return all(isinstance(item, list) and len(item) == EDEPLOY_ITEM_SIZE for item in data) def _convert_edeploy_data(self, data): converted = {} for item in data: converted_0 = converted.setdefault(item[0], {}) converted_1 = converted_0.setdefault(item[1], {}) try: item[3] = int(item[3]) except (ValueError, TypeError): pass converted_1[item[2]] = item[3] return converted ironic-inspector-7.2.0/ironic_inspector/plugins/capabilities.py0000666000175100017510000000526513241323457025106 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Gather capabilities from inventory.""" from oslo_config import cfg from ironic_inspector.plugins import base from ironic_inspector import utils CONF = cfg.CONF LOG = utils.getProcessingLogger(__name__) class CapabilitiesHook(base.ProcessingHook): """Processing hook for detecting capabilities.""" def _detect_boot_mode(self, inventory, node_info, data=None): boot_mode = inventory.get('boot', {}).get('current_boot_mode') if boot_mode is not None: LOG.info('Boot mode was %s', boot_mode, data=data, node_info=node_info) return {'boot_mode': boot_mode} else: LOG.warning('No boot mode information available', data=data, node_info=node_info) return {} def _detect_cpu_flags(self, inventory, node_info, data=None): flags = inventory['cpu'].get('flags') if not flags: LOG.warning('No CPU flags available, please update your ' 'introspection ramdisk', data=data, node_info=node_info) return {} flags = set(flags) caps = {} for flag, name in CONF.capabilities.cpu_flags.items(): if flag in flags: caps[name] = 'true' LOG.info('CPU capabilities: %s', list(caps), data=data, node_info=node_info) return caps def before_update(self, introspection_data, node_info, **kwargs): inventory = utils.get_inventory(introspection_data) caps = {} if CONF.capabilities.boot_mode: caps.update(self._detect_boot_mode(inventory, node_info, introspection_data)) caps.update(self._detect_cpu_flags(inventory, node_info, introspection_data)) if caps: LOG.debug('New capabilities: %s', caps, node_info=node_info, data=introspection_data) node_info.update_capabilities(**caps) else: LOG.debug('No new capabilities detected', node_info=node_info, data=introspection_data) ironic-inspector-7.2.0/ironic_inspector/wsgi_service.py0000666000175100017510000001617713241323457023471 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ssl import sys import traceback as traceback_mod import eventlet from eventlet import semaphore from futurist import periodics from oslo_config import cfg from oslo_log import log from oslo_utils import reflection from ironic_inspector.common import ironic as ir_utils from ironic_inspector import db from ironic_inspector import main as app from ironic_inspector import node_cache from ironic_inspector.plugins import base as plugins_base from ironic_inspector.pxe_filter import base as pxe_filter from ironic_inspector import utils LOG = log.getLogger(__name__) CONF = cfg.CONF class WSGIService(object): """Provides ability to launch API from wsgi app.""" def __init__(self): self.app = app.app self._periodics_worker = None self._shutting_down = semaphore.Semaphore() def _init_middleware(self): """Initialize WSGI middleware. :returns: None """ if CONF.auth_strategy != 'noauth': utils.add_auth_middleware(self.app) else: LOG.warning('Starting unauthenticated, please check' ' configuration') # TODO(aarefiev): move to WorkerService once we split service if CONF.processing.store_data == 'none': LOG.warning('Introspection data will not be stored. Change ' '"[processing] store_data" option if this is not ' 'the desired behavior') elif CONF.processing.store_data == 'swift': LOG.info('Introspection data will be stored in Swift in the ' 'container %s', CONF.swift.container) utils.add_cors_middleware(self.app) def _create_ssl_context(self): if not CONF.use_ssl: return MIN_VERSION = (2, 7, 9) if sys.version_info < MIN_VERSION: LOG.warning(('Unable to use SSL in this version of Python: ' '%(current)s, please ensure your version of Python ' 'is greater than %(min)s to enable this feature.'), {'current': '.'.join(map(str, sys.version_info[:3])), 'min': '.'.join(map(str, MIN_VERSION))}) return context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH) if CONF.ssl_cert_path and CONF.ssl_key_path: try: context.load_cert_chain(CONF.ssl_cert_path, CONF.ssl_key_path) except IOError as exc: LOG.warning('Failed to load certificate or key from defined ' 'locations: %(cert)s and %(key)s, will continue ' 'to run with the default settings: %(exc)s', {'cert': CONF.ssl_cert_path, 'key': CONF.ssl_key_path, 'exc': exc}) except ssl.SSLError as exc: LOG.warning('There was a problem with the loaded certificate ' 'and key, will continue to run with the default ' 'settings: %s', exc) return context # TODO(aarefiev): move init code to WorkerService def _init_host(self): """Initialize Worker host Init db connection, load and validate processing hooks, runs periodic tasks. :returns None """ db.init() try: hooks = plugins_base.validate_processing_hooks() except Exception as exc: LOG.critical(str(exc)) sys.exit(1) LOG.info('Enabled processing hooks: %s', [h.name for h in hooks]) driver = pxe_filter.driver() driver.init_filter() periodic_clean_up_ = periodics.periodic( spacing=CONF.clean_up_period )(periodic_clean_up) self._periodics_worker = periodics.PeriodicWorker( callables=[(driver.get_periodic_sync_task(), None, None), (periodic_clean_up_, None, None)], executor_factory=periodics.ExistingExecutor(utils.executor()), on_failure=self._periodics_watchdog) utils.executor().submit(self._periodics_worker.start) def _periodics_watchdog(self, callable_, activity, spacing, exc_info, traceback=None): LOG.exception("The periodic %(callable)s failed with: %(exception)s", { 'exception': ''.join(traceback_mod.format_exception(*exc_info)), 'callable': reflection.get_callable_name(callable_)}) # NOTE(milan): spawn new thread otherwise waiting would block eventlet.spawn(self.shutdown, error=str(exc_info[1])) def shutdown(self, error=None): """Stop serving API, clean up. :returns: None """ # TODO(aarefiev): move shutdown code to WorkerService if not self._shutting_down.acquire(blocking=False): LOG.warning('Attempted to shut down while already shutting down') return LOG.debug('Shutting down') if self._periodics_worker is not None: try: self._periodics_worker.stop() self._periodics_worker.wait() except Exception as e: LOG.exception('Service error occurred when stopping ' 'periodic workers. Error: %s', e) self._periodics_worker = None if utils.executor().alive: utils.executor().shutdown(wait=True) pxe_filter.driver().tear_down_filter() self._shutting_down.release() LOG.info('Shut down successfully') sys.exit(error) def run(self): """Start serving this service using loaded application. :returns: None """ app_kwargs = {'host': CONF.listen_address, 'port': CONF.listen_port} context = self._create_ssl_context() if context: app_kwargs['ssl_context'] = context self._init_middleware() self._init_host() try: self.app.run(**app_kwargs) except Exception as e: self.shutdown(error=str(e)) else: self.shutdown() def periodic_clean_up(): # pragma: no cover try: if node_cache.clean_up(): pxe_filter.driver().sync(ir_utils.get_client()) sync_with_ironic() except Exception: LOG.exception('Periodic clean up of node cache failed') def sync_with_ironic(): ironic = ir_utils.get_client() # TODO(yuikotakada): pagination ironic_nodes = ironic.node.list(limit=0) ironic_node_uuids = {node.uuid for node in ironic_nodes} node_cache.delete_nodes_not_in_list(ironic_node_uuids) ironic-inspector-7.2.0/ironic_inspector/introspection_state.py0000666000175100017510000001102713241323457025065 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Introspection state.""" from automaton import machines class States(object): """States of an introspection.""" # received a request to abort the introspection aborting = 'aborting' # received introspection data from a nonexistent node # active - the inspector performs an operation on the node enrolling = 'enrolling' # an error appeared in a previous introspection state # passive - the inspector doesn't perform any operation on the node error = 'error' # introspection finished successfully # passive finished = 'finished' # processing introspection data from the node # active processing = 'processing' # processing stored introspection data from the node # active reapplying = 'reapplying' # received a request to start node introspection # active starting = 'starting' # waiting for node introspection data # passive waiting = 'waiting' @classmethod def all(cls): """Return a list of all states.""" return [cls.starting, cls.waiting, cls.processing, cls.finished, cls.error, cls.reapplying, cls.enrolling, cls.aborting] class Events(object): """Events that change introspection state.""" # cancel a waiting node introspection # API, user abort = 'abort' # finish the abort request # internal abort_end = 'abort_end' # mark an introspection failed # internal error = 'error' # mark an introspection finished # internal finish = 'finish' # process node introspection data # API, introspection image process = 'process' # process stored node introspection data # API, user reapply = 'reapply' # initialize node introspection # API, user start = 'start' # mark an introspection timed-out waiting for data # internal timeout = 'timeout' # mark an introspection waiting for image data # internal wait = 'wait' @classmethod def all(cls): """Return a list of all events.""" return [cls.process, cls.reapply, cls.timeout, cls.wait, cls.abort, cls.error, cls.finish] # Error transition is allowed in any state. State_space = [ { 'name': States.aborting, 'next_states': { Events.abort_end: States.error, Events.timeout: States.error, } }, { 'name': States.enrolling, 'next_states': { Events.error: States.error, Events.process: States.processing, Events.timeout: States.error, }, }, { 'name': States.error, 'next_states': { Events.abort: States.error, Events.error: States.error, Events.reapply: States.reapplying, Events.start: States.starting, }, }, { 'name': States.finished, 'next_states': { Events.finish: States.finished, Events.reapply: States.reapplying, Events.start: States.starting }, }, { 'name': States.processing, 'next_states': { Events.error: States.error, Events.finish: States.finished, Events.timeout: States.error, }, }, { 'name': States.reapplying, 'next_states': { Events.error: States.error, Events.finish: States.finished, Events.reapply: States.reapplying, Events.timeout: States.error, }, }, { 'name': States.starting, 'next_states': { Events.error: States.error, Events.wait: States.waiting, Events.timeout: States.error }, }, { 'name': States.waiting, 'next_states': { Events.abort: States.aborting, Events.process: States.processing, Events.start: States.starting, Events.timeout: States.error, }, }, ] FSM = machines.FiniteMachine.build(State_space) FSM.default_start_state = States.finished ironic-inspector-7.2.0/ironic_inspector/locale/0000775000175100017510000000000013241324014021635 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/locale/en_GB/0000775000175100017510000000000013241324014022607 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000013241324014024374 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/locale/en_GB/LC_MESSAGES/ironic_inspector.po0000666000175100017510000006060113241323457030324 0ustar zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: ironic-inspector VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-02-03 16:44+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-02-01 09:56+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en-GB\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "" "A (shell) command line to start the dnsmasq service upon filter " "initialization. Default: don't start." msgstr "" "A (shell) command line to start the dnsmasq service upon filter " "initialisation. Default: don't start." msgid "" "A (shell) command line to stop the dnsmasq service upon inspector (error) " "exit. Default: don't stop." msgstr "" "A (shell) command line to stop the dnsmasq service upon inspector (error) " "exit. Default: don't stop." msgid "Access denied by policy" msgstr "Access denied by policy" msgid "" "Amount of time in seconds, after which repeat clean up of timed out nodes " "and old nodes status information." msgstr "" "Amount of time in seconds, after which repeat clean up of timed out nodes " "and old nodes status information." msgid "" "Amount of time in seconds, after which repeat periodic update of the filter." msgstr "" "Amount of time in seconds, after which repeat periodic update of the filter." msgid "" "An alias for PCI device identified by 'vendor_id' and 'product_id' fields. " "Format: {\"vendor_id\": \"1234\", \"product_id\": \"5678\", \"name\": " "\"pci_dev1\"}" msgstr "" "An alias for PCI device identified by 'vendor_id' and 'product_id' fields. " "Format: {\"vendor_id\": \"1234\", \"product_id\": \"5678\", \"name\": " "\"pci_dev1\"}" msgid "" "Authentication method used on the ironic-inspector API. Either \"noauth\" or " "\"keystone\" are currently valid options. \"noauth\" will disable all " "authentication." msgstr "" "Authentication method used on the ironic-inspector API. Either \"noauth\" or " "\"keystone\" are currently valid options. \"noauth\" will disable all " "authentication." msgid "Authentication required" msgstr "Authentication required" #, python-format msgid "Bad request: %s" msgstr "Bad request: %s" msgid "Canceled by operator" msgstr "Cancelled by operator" #, python-format msgid "Cannot get node %(node)s: %(exc)s" msgstr "Cannot get node %(node)s: %(exc)s" msgid "" "Comma-separated list of default hooks for processing pipeline. Hook " "'scheduler' updates the node with the minimum properties required by the " "Nova scheduler. Hook 'validate_interfaces' ensures that valid NIC data was " "provided by the ramdisk. Do not exclude these two unless you really know " "what you're doing." msgstr "" "Comma-separated list of default hooks for processing pipeline. Hook " "'scheduler' updates the node with the minimum properties required by the " "Nova scheduler. Hook 'validate_interfaces' ensures that valid NIC data was " "provided by the ramdisk. Do not exclude these two unless you really know " "what you're doing." msgid "" "Comma-separated list of enabled hooks for processing pipeline. The default " "for this is $default_processing_hooks, hooks can be added before or after " "the defaults like this: \"prehook,$default_processing_hooks,posthook\"." msgstr "" "Comma-separated list of enabled hooks for processing pipeline. The default " "for this is $default_processing_hooks, hooks can be added before or after " "the defaults like this: \"prehook,$default_processing_hooks,posthook\"." msgid "" "Configuration error: add_ports set to disabled and keep_ports set to added. " "Please change keep_ports to all." msgstr "" "Configuration error: add_ports set to disabled and keep_ports set to added. " "Please change keep_ports to all." #, python-format msgid "Could not find a node for attributes %s" msgstr "Could not find a node for attributes %s" #, python-format msgid "Could not find node %s in cache" msgstr "Could not find node %s in cache" #, python-format msgid "" "Could not find node %s in introspection cache, probably it's not on " "introspection now" msgstr "" "Could not find node %s in introspection cache, probably it's not on " "introspection now" msgid "Default Swift container to use when creating objects." msgstr "Default Swift container to use when creating objects." msgid "Delay (in seconds) between two introspections." msgstr "Delay (in seconds) between two introspections." #, python-format msgid "Encountered an exception while getting the Ironic client: %s" msgstr "Encountered an exception while getting the Ironic client: %s" #, python-format msgid "" "Failed to power off node %(node)s, check its power management configuration: " "%(exc)s" msgstr "" "Failed to power off node %(node)s, check its power management configuration: " "%(exc)s" #, python-format msgid "" "Failed to power on the node, check it's power management configuration: %s" msgstr "" "Failed to power on the node, check it's power management configuration: %s" #, python-format msgid "Failed to resolve the hostname (%(value)s) for node %(uuid)s" msgstr "Failed to resolve the hostname (%(value)s) for node %(uuid)s" #, python-format msgid "Failed validation of power interface, reason: %s" msgstr "Failed validation of power interface, reason: %s" msgid "" "File name template for storing ramdisk logs. The following replacements can " "be used: {uuid} - node UUID or \"unknown\", {bmc} - node BMC address or " "\"unknown\", {dt} - current UTC date and time, {mac} - PXE booting MAC or " "\"unknown\"." msgstr "" "File name template for storing ramdisk logs. The following replacements can " "be used: {uuid} - node UUID or \"unknown\", {bmc} - node BMC address or " "\"unknown\", {dt} - current UTC date and time, {mac} - PXE booting MAC or " "\"unknown\"." msgid "" "For how much time (in seconds) to keep status information about nodes after " "introspection was finished for them. Set to 0 (the default) to disable the " "timeout." msgstr "" "For how much time (in seconds) to keep status information about nodes after " "introspection was finished for them. Set to 0 (the default) to disable the " "timeout." msgid "Hardware inventory is empty or missing" msgstr "Hardware inventory is empty or missing" msgid "IP to listen on." msgstr "IP to listen on." msgid "If set, logs from ramdisk will be stored in this directory." msgstr "If set, logs from ramdisk will be stored in this directory." msgid "" "Inspector is not configured to store data. Set the [processing] store_data " "configuration option to change this." msgstr "" "Inspector is not configured to store data. Set the [processing] store_data " "configuration option to change this." msgid "Interface on which dnsmasq listens, the default is for VM's." msgstr "Interface on which dnsmasq listens, the default is for VM's." msgid "Internal server error" msgstr "Internal server error" msgid "Interval between retries in case of conflict error (HTTP 409)." msgstr "Interval between retries in case of conflict error (HTTP 409)." #, python-format msgid "Introspection for node %(node)s already finished on %(finish)s" msgstr "Introspection for node %(node)s already finished on %(finish)s" msgid "Invalid UUID value" msgstr "Invalid UUID value" #, python-format msgid "Invalid data: expected a JSON object, got %s" msgstr "Invalid data: expected a JSON object, got %s" #, python-format msgid "Invalid event: %s" msgstr "Invalid event: %s" #, python-format msgid "Invalid formatting variable key provided: %s" msgstr "Invalid formatting variable key provided: %s" #, python-format msgid "Invalid hardware inventory: %s key is missing or empty" msgstr "Invalid hardware inventory: %s key is missing or empty" msgid "Invalid index for mau type" msgstr "Invalid index for mau type" #, python-format msgid "Invalid parameters for action %(act)s: %(error)s" msgstr "Invalid parameters for action %(act)s: %(error)s" #, python-format msgid "Invalid parameters for operator %(op)s: %(error)s" msgstr "Invalid parameters for operator %(op)s: %(error)s" #, python-format msgid "" "Invalid provision state for introspection: \"%(state)s\", valid states are " "\"%(valid)s\"" msgstr "" "Invalid provision state for introspection: \"%(state)s\", valid states are " "\"%(valid)s\"" msgid "" "Ironic API URL, used to set Ironic API URL when auth_strategy option is " "noauth or auth_type is \"none\" to work with standalone Ironic without " "keystone." msgstr "" "Ironic API URL, used to set Ironic API URL when auth_strategy option is " "noauth or auth_type is \"none\" to work with standalone Ironic without " "Keystone." msgid "Ironic driver_info fields that are equivalent to ipmi_address." msgstr "Ironic driver_info fields that are equivalent to ipmi_address." msgid "Ironic endpoint type." msgstr "Ironic endpoint type." msgid "Ironic service type." msgstr "Ironic service type." msgid "Keystone region to get endpoint for." msgstr "Keystone region to get endpoint for." msgid "Keystone region used to get Ironic endpoints." msgstr "Keystone region used to get Ironic endpoints." msgid "Limit cannot be negative" msgstr "Limit cannot be negative" #, python-format msgid "Limit over %s" msgstr "Limit over %s" msgid "Limit the number of elements an API list-call returns" msgstr "Limit the number of elements an API list-call returns" msgid "" "List of Etherent Over InfiniBand interfaces on the Inspector host which are " "used for physical access to the DHCP network. Multiple interfaces would be " "attached to a bond or bridge specified in dnsmasq_interface. The MACs of the " "InfiniBand nodes which are not in desired state are going to be blacklisted " "based on the list of neighbor MACs on these interfaces." msgstr "" "List of Ethernet Over InfiniBand interfaces on the Inspector host which are " "used for physical access to the DHCP network. Multiple interfaces would be " "attached to a bond or bridge specified in dnsmasq_interface. The MACs of the " "InfiniBand nodes which are not in desired state are going to be blacklisted " "based on the list of neighbour MACs on these interfaces." #, python-format msgid "Look up error: %s" msgstr "Look up error: %s" msgid "Malformed API version: expected string in form of X.Y" msgstr "Malformed API version: expected string in form of X.Y" msgid "" "Mapping between a CPU flag and a capability to set if this flag is present." msgstr "" "Mapping between a CPU flag and a capability to set if this flag is present." msgid "Marker not UUID-like" msgstr "Marker not UUID-like" msgid "Maximum number of retries in case of conflict error (HTTP 409)." msgstr "Maximum number of retries in case of conflict error (HTTP 409)." msgid "Maximum number of times to retry a Swift request, before failing." msgstr "Maximum number of times to retry a Swift request, before failing." msgid "" "Method for storing introspection data. If set to 'none', introspection data " "will not be stored." msgstr "" "Method for storing introspection data. If set to 'none', introspection data " "will not be stored." msgid "Method to use for authentication: noauth or keystone." msgstr "Method to use for authentication: noauth or Keystone." #, python-format msgid "Multiple nodes match the same number of attributes %(attr)s: %(found)s" msgstr "Multiple nodes match the same number of attributes %(attr)s: %(found)s" msgid "" "Name of the key to store the location of stored data in the extra column of " "the Ironic database." msgstr "" "Name of the key to store the location of stored data in the extra column of " "the Ironic database." #, python-format msgid "" "No disks could be found using the root device hints %(hints)s because they " "failed to validate. Error: %(error)s" msgstr "" "No disks could be found using the root device hints %(hints)s because they " "failed to validate. Error: %(error)s" msgid "No disks satisfied root device hints" msgstr "No disks satisfied root device hints" msgid "No interfaces supplied by the ramdisk" msgstr "No interfaces supplied by the ramdisk" msgid "" "No lookup attributes were found, inspector won't be able to find it after " "introspection, consider creating ironic ports or providing an IPMI address" msgstr "" "No lookup attributes were found, inspector won't be able to find it after " "introspection, consider creating Ironic ports or providing an IPMI address" #, python-format msgid "No suitable interfaces found in %s" msgstr "No suitable interfaces found in %s" #, python-format msgid "Node %(uuid)s already has BMC address %(ipmi_address)s, not enrolling" msgstr "Node %(uuid)s already has BMC address %(ipmi_address)s, not enrolling" #, python-format msgid "Node %s was not found in Ironic" msgstr "Node %s was not found in Ironic" msgid "Node is locked, please, retry later" msgstr "Node is locked, please, retry later" msgid "Node locked, please, try again later" msgstr "Node locked, please, try again later" #, python-format msgid "Node not found for marker: %s" msgstr "Node not found for marker: %s" #, python-format msgid "Node not found hook failed: %s" msgstr "Node not found hook failed: %s" msgid "Node not found hook returned nothing" msgstr "Node not found hook returned nothing" msgid "Node not found in the cache" msgstr "Node not found in the cache" #, python-format msgid "Node processing already finished with error: %s" msgstr "Node processing already finished with error: %s" msgid "" "Node state mismatch detected between the DB and the cached node_info object" msgstr "" "Node state mismatch detected between the DB and the cached node_info object" msgid "" "Number of seconds that the Swift object will last before being deleted. (set " "to 0 to never delete the object)." msgstr "" "Number of seconds that the Swift object will last before being deleted. (set " "to 0 to never delete the object)." msgid "PXE boot filter driver to use, such as iptables" msgstr "PXE boot filter driver to use, such as iptables" msgid "Path to SSL certificate" msgstr "Path to SSL certificate" msgid "Path to SSL key" msgstr "Path to SSL key" msgid "" "Path to the rootwrap configuration file to use for running commands as root" msgstr "" "Path to the rootwrap configuration file to use for running commands as root" #, python-format msgid "Port %(mac)s already exists, uuid: %(uuid)s" msgstr "Port %(mac)s already exists, uuid: %(uuid)s" msgid "Port to listen on." msgstr "Port to listen on." #, python-format msgid "" "Pre-processing failures detected reapplying introspection on stored data:\n" "%s" msgstr "" "Pre-processing failures detected reapplying introspection on stored data:\n" "%s" msgid "" "Purge the hostsdir upon driver initialization. Setting to false should only " "be performed when the deployment of inspector is such that there are " "multiple processes executing inside of the same host and namespace. In this " "case, the Operator is responsible for setting up a custom cleaning facility." msgstr "" "Purge the hostsdir upon driver initialisation. Setting to false should only " "be performed when the deployment of inspector is such that there are " "multiple processes executing inside of the same host and namespace. In this " "case, the Operator is responsible for setting up a custom cleaning facility." #, python-format msgid "Ramdisk reported error: %s" msgstr "Ramdisk reported error: %s" #, python-format msgid "Rule %s was not found" msgstr "Rule %s was not found" #, python-format msgid "Rule with UUID %s already exists" msgstr "Rule with UUID %s already exists" msgid "SSL Enabled/Disabled" msgstr "SSL Enabled/Disabled" msgid "Swift endpoint type." msgstr "Swift endpoint type." #, python-format msgid "Swift failed to create container %(container)s. Error was: %(error)s" msgstr "Swift failed to create container %(container)s. Error was: %(error)s" #, python-format msgid "" "Swift failed to create object %(object)s in container %(container)s. Error " "was: %(error)s" msgstr "" "Swift failed to create object %(object)s in container %(container)s. Error " "was: %(error)s" #, python-format msgid "" "Swift failed to get object %(object)s in container %(container)s. Error was: " "%(error)s" msgstr "" "Swift failed to get object %(object)s in container %(container)s. Error was: " "%(error)s" msgid "Swift service type." msgstr "Swift service type." msgid "Swift support is disabled" msgstr "Swift support is disabled" msgid "" "The MAC address cache directory, exposed to dnsmasq.This directory is " "expected to be in exclusive control of the driver." msgstr "" "The MAC address cache directory, exposed to dnsmasq.This directory is " "expected to be in the exclusive control of the driver." #, python-format msgid "" "The PXE filter driver %(driver)s: my fsm encountered an exception: %(error)s" msgstr "" "The PXE filter driver %(driver)s: my fsm encountered an exception: %(error)s" #, python-format msgid "" "The following failures happened during running pre-processing hooks:\n" "%s" msgstr "" "The following failures happened during running pre-processing hooks:\n" "%s" #, python-format msgid "The following hook(s) are missing or failed to load: %s" msgstr "The following hook(s) are missing or failed to load: %s" #, python-format msgid "The following problems encountered: %s" msgstr "The following problems encountered: %s" msgid "The green thread pool size." msgstr "The green thread pool size." msgid "" "The name of the Ironic driver used by the enroll hook when creating a new " "node in Ironic." msgstr "" "The name of the Ironic driver used by the enrol hook when creating a new " "node in Ironic." msgid "" "The name of the hook to run when inspector receives inspection information " "from a node it isn't already aware of. This hook is ignored by default." msgstr "" "The name of the hook to run when inspector receives inspection information " "from a node it isn't already aware of. This hook is ignored by default." msgid "" "Timeout after which introspection is considered failed, set to 0 to disable." msgstr "" "Timeout after which introspection is considered failed, set to 0 to disable." #, python-format msgid "Unable to parse field JSON path %(field)s: %(error)s" msgstr "Unable to parse field JSON path %(field)s: %(error)s" #, python-format msgid "" "Unexpected exception %(exc_class)s during preprocessing in hook %(hook)s: " "%(error)s" msgstr "" "Unexpected exception %(exc_class)s during preprocessing in hook %(hook)s: " "%(error)s" #, python-format msgid "Unexpected exception %(exc_class)s during processing: %(error)s" msgstr "Unexpected exception %(exc_class)s during processing: %(error)s" #, python-format msgid "" "Unexpected exception %(exc_class)s while fetching unprocessed introspection " "data from Swift: %(error)s" msgstr "" "Unexpected exception %(exc_class)s while fetching unprocessed introspection " "data from Swift: %(error)s" #, python-format msgid "" "Unsupported API version %(requested)s, supported range is %(min)s to %(max)s" msgstr "" "Unsupported API version %(requested)s, supported range is %(min)s to %(max)s" #, python-format msgid "Unsupported scheme for field: %s, valid values are node:// or data://" msgstr "Unsupported scheme for field: %s, valid values are node:// or data://" msgid "" "Use [ironic]/auth_type, for noauth case set [ironic]/auth_type to `none` and " "specify ironic API URL via [ironic]/endpoint_override option." msgstr "" "Use [ironic]/auth_type, for noauth case set [ironic]/auth_type to `none` and " "specify ironic API URL via [ironic]/endpoint_override option." msgid "Use [ironic]/endpoint_override option to set a specific ironic API url." msgstr "" "Use [ironic]/endpoint_override option to set a specific ironic API URL." msgid "Use [ironic]/region_name option instead to configure region." msgstr "Use [ironic]/region_name option instead to configure region." msgid "Use [ironic]/service_type option to set a specific type." msgstr "Use [ironic]/service_type option to set a specific type." msgid "Use [ironic]/valid_interfaces option to specify endpoint interfaces." msgstr "Use [ironic]/valid_interfaces option to specify endpoint interfaces." msgid "Use [swift]/region_name option to configure region." msgstr "Use [swift]/region_name option to configure region." msgid "Use [swift]/service_type option to set specific service type" msgstr "Use [swift]/service_type option to set specific service type" msgid "Use [swift]/valid_interfaces option to specify endpoint interfaces." msgstr "Use [swift]/valid_interfaces option to specify endpoint interfaces." msgid "User data processing is not supported yet" msgstr "User data processing is not supported yet" #, python-format msgid "Validation failed for actions: %s" msgstr "Validation failed for actions: %s" #, python-format msgid "Validation failed for conditions: %s" msgstr "Validation failed for conditions: %s" msgid "Version not found." msgstr "Version not found." msgid "" "Whether to leave 1 GiB of disk size untouched for partitioning. Only has " "effect when used with the IPA as a ramdisk, for older ramdisk local_gb is " "calculated on the ramdisk side." msgstr "" "Whether to leave 1 GiB of disk size untouched for partitioning. Only has " "effect when used with the IPA as a ramdisk, for older ramdisk local_gb is " "calculated on the ramdisk side." msgid "" "Whether to manage firewall rules for PXE port. This configuration option was " "deprecated in favor of the ``driver`` option in the ``pxe_filter`` section. " "Please, use the ``noop`` filter driver to disable the firewall filtering or " "the ``iptables`` filter driver to enable it." msgstr "" "Whether to manage firewall rules for PXE port. This configuration option was " "deprecated in favour of the ``driver`` option in the ``pxe_filter`` section. " "Please, use the ``noop`` filter driver to disable the firewall filtering or " "the ``iptables`` filter driver to enable it." msgid "" "Whether to overwrite existing values in node database. Disable this option " "to make introspection a non-destructive operation." msgstr "" "Whether to overwrite existing values in node database. Disable this option " "to make introspection a non-destructive operation." msgid "Whether to power off a node after introspection." msgstr "Whether to power off a node after introspection." msgid "" "Whether to store ramdisk logs even if it did not return an error message " "(dependent upon \"ramdisk_logs_dir\" option being set)." msgstr "" "Whether to store ramdisk logs even if it did not return an error message " "(dependent upon \"ramdisk_logs_dir\" option being set)." msgid "Whether to store the boot mode (BIOS or UEFI)." msgstr "Whether to store the boot mode (BIOS or UEFI)." msgid "" "Which MAC addresses to add as ports during introspection. Possible values: " "all (all MAC addresses), active (MAC addresses of NIC with IP addresses), " "pxe (only MAC address of NIC node PXE booted from, falls back to \"active\" " "if PXE MAC is not supplied by the ramdisk)." msgstr "" "Which MAC addresses to add as ports during introspection. Possible values: " "all (all MAC addresses), active (MAC addresses of NIC with IP addresses), " "PXE (only MAC address of NIC node PXE booted from, falls back to \"active\" " "if PXE MAC is not supplied by the ramdisk)." msgid "" "Which ports (already present on a node) to keep after introspection. " "Possible values: all (do not delete anything), present (keep ports which " "MACs were present in introspection data), added (keep only MACs that we " "added during introspection)." msgstr "" "Which ports (already present on a node) to keep after introspection. " "Possible values: all (do not delete anything), present (keep ports which " "MACs were present in introspection data), added (keep only MACs that we " "added during introspection)." #, python-format msgid "[node: %s]" msgstr "[node: %s]" msgid "[unidentified node]" msgstr "[unidentified node]" #, python-format msgid "invalid regular expression: %s" msgstr "invalid regular expression: %s" msgid "iptables chain name to use." msgstr "iptables chain name to use." #, python-format msgid "malformed or missing CPU information: %s" msgstr "malformed or missing CPU information: %s" #, python-format msgid "" "malformed or missing memory information: %s; introspection requires physical " "memory size from dmidecode" msgstr "" "malformed or missing memory information: %s; introspection requires physical " "memory size from dmidecode" #, python-format msgid "missing required parameter(s): %s" msgstr "missing required parameter(s): %s" msgid "state" msgstr "state" #, python-format msgid "unexpected parameter(s): %s" msgstr "unexpected parameter(s): %s" ironic-inspector-7.2.0/ironic_inspector/api_tools.py0000666000175100017510000000471213241323457022761 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic Rest Api tools.""" import flask from oslo_config import cfg from oslo_utils import uuidutils import six from ironic_inspector.common.i18n import _ from ironic_inspector import utils CONF = cfg.CONF def raises_coercion_exceptions(fn): """Convert coercion function exceptions to utils.Error. :raises: utils.Error when the coercion function raises an AssertionError or a ValueError """ @six.wraps(fn) def inner(*args, **kwargs): try: ret = fn(*args, **kwargs) except (AssertionError, ValueError) as exc: raise utils.Error(_('Bad request: %s') % exc, code=400) return ret return inner def request_field(field_name): """Decorate a function that coerces the specified field. :param field_name: name of the field to fetch :returns: a decorator """ def outer(fn): @six.wraps(fn) def inner(*args, **kwargs): default = kwargs.pop('default', None) field = flask.request.args.get(field_name, default=default) if field == default: # field not found or the same as the default, just return return default return fn(field, *args, **kwargs) return inner return outer @request_field('marker') @raises_coercion_exceptions def marker_field(value): """Fetch the pagination marker field from flask.request.args. :returns: an uuid """ assert uuidutils.is_uuid_like(value), _('Marker not UUID-like') return value @request_field('limit') @raises_coercion_exceptions def limit_field(value): """Fetch the pagination limit field from flask.request.args. :returns: the limit """ # limit of zero means the default limit value = int(value) or CONF.api_max_limit assert value >= 0, _('Limit cannot be negative') assert value <= CONF.api_max_limit, _('Limit over %s') % CONF.api_max_limit return value ironic-inspector-7.2.0/ironic_inspector/__init__.py0000666000175100017510000000000013241323457022511 0ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/process.py0000666000175100017510000003543713241323457022456 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Handling introspection data from the ramdisk.""" import copy import datetime import json import os from oslo_config import cfg from oslo_serialization import base64 from oslo_utils import excutils from oslo_utils import timeutils from ironic_inspector.common.i18n import _ from ironic_inspector.common import ironic as ir_utils from ironic_inspector.common import swift from ironic_inspector import introspection_state as istate from ironic_inspector import node_cache from ironic_inspector.plugins import base as plugins_base from ironic_inspector.pxe_filter import base as pxe_filter from ironic_inspector import rules from ironic_inspector import utils CONF = cfg.CONF LOG = utils.getProcessingLogger(__name__) _STORAGE_EXCLUDED_KEYS = {'logs'} _UNPROCESSED_DATA_STORE_SUFFIX = 'UNPROCESSED' def _store_logs(introspection_data, node_info): logs = introspection_data.get('logs') if not logs: LOG.warning('No logs were passed by the ramdisk', data=introspection_data, node_info=node_info) return if not CONF.processing.ramdisk_logs_dir: LOG.warning('Failed to store logs received from the ramdisk ' 'because ramdisk_logs_dir configuration option ' 'is not set', data=introspection_data, node_info=node_info) return fmt_args = { 'uuid': node_info.uuid if node_info is not None else 'unknown', 'mac': (utils.get_pxe_mac(introspection_data) or 'unknown').replace(':', ''), 'dt': datetime.datetime.utcnow(), 'bmc': (utils.get_ipmi_address_from_data(introspection_data) or 'unknown') } file_name = CONF.processing.ramdisk_logs_filename_format.format(**fmt_args) try: if not os.path.exists(CONF.processing.ramdisk_logs_dir): os.makedirs(CONF.processing.ramdisk_logs_dir) with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name), 'wb') as fp: fp.write(base64.decode_as_bytes(logs)) except EnvironmentError: LOG.exception('Could not store the ramdisk logs', data=introspection_data, node_info=node_info) else: LOG.info('Ramdisk logs were stored in file %s', file_name, data=introspection_data, node_info=node_info) def _find_node_info(introspection_data, failures): try: return node_cache.find_node( bmc_address=utils.get_ipmi_address_from_data(introspection_data), mac=utils.get_valid_macs(introspection_data)) except utils.NotFoundInCacheError as exc: not_found_hook = plugins_base.node_not_found_hook_manager() if not_found_hook is None: failures.append(_('Look up error: %s') % exc) return LOG.debug('Running node_not_found_hook %s', CONF.processing.node_not_found_hook, data=introspection_data) # NOTE(sambetts): If not_found_hook is not none it means that we were # unable to find the node in the node cache and there is a node not # found hook defined so we should try to send the introspection data # to that hook to generate the node info before bubbling up the error. try: node_info = not_found_hook.driver(introspection_data) if node_info: return node_info failures.append(_("Node not found hook returned nothing")) except Exception as exc: failures.append(_("Node not found hook failed: %s") % exc) except utils.Error as exc: failures.append(_('Look up error: %s') % exc) def _run_pre_hooks(introspection_data, failures): hooks = plugins_base.processing_hooks_manager() for hook_ext in hooks: LOG.debug('Running pre-processing hook %s', hook_ext.name, data=introspection_data) # NOTE(dtantsur): catch exceptions, so that we have changes to update # node introspection status after look up try: hook_ext.obj.before_processing(introspection_data) except utils.Error as exc: LOG.error('Hook %(hook)s failed, delaying error report ' 'until node look up: %(error)s', {'hook': hook_ext.name, 'error': exc}, data=introspection_data) failures.append('Preprocessing hook %(hook)s: %(error)s' % {'hook': hook_ext.name, 'error': exc}) except Exception as exc: LOG.exception('Hook %(hook)s failed, delaying error report ' 'until node look up: %(error)s', {'hook': hook_ext.name, 'error': exc}, data=introspection_data) failures.append(_('Unexpected exception %(exc_class)s during ' 'preprocessing in hook %(hook)s: %(error)s') % {'hook': hook_ext.name, 'exc_class': exc.__class__.__name__, 'error': exc}) def _filter_data_excluded_keys(data): return {k: v for k, v in data.items() if k not in _STORAGE_EXCLUDED_KEYS} def _store_data(node_info, data, suffix=None): if CONF.processing.store_data != 'swift': LOG.debug("Swift support is disabled, introspection data " "won't be stored", node_info=node_info) return swift_object_name = swift.store_introspection_data( _filter_data_excluded_keys(data), node_info.uuid, suffix=suffix ) LOG.info('Introspection data was stored in Swift in object ' '%s', swift_object_name, node_info=node_info) if CONF.processing.store_data_location: node_info.patch([{'op': 'add', 'path': '/extra/%s' % CONF.processing.store_data_location, 'value': swift_object_name}]) def _store_unprocessed_data(node_info, data): # runs in background try: _store_data(node_info, data, suffix=_UNPROCESSED_DATA_STORE_SUFFIX) except Exception: LOG.exception('Encountered exception saving unprocessed ' 'introspection data', node_info=node_info, data=data) def _get_unprocessed_data(uuid): if CONF.processing.store_data == 'swift': LOG.debug('Fetching unprocessed introspection data from ' 'Swift for %s', uuid) return json.loads( swift.get_introspection_data( uuid, suffix=_UNPROCESSED_DATA_STORE_SUFFIX ) ) else: raise utils.Error(_('Swift support is disabled'), code=400) def process(introspection_data): """Process data from the ramdisk. This function heavily relies on the hooks to do the actual data processing. """ unprocessed_data = copy.deepcopy(introspection_data) failures = [] _run_pre_hooks(introspection_data, failures) node_info = _find_node_info(introspection_data, failures) if node_info: # Locking is already done in find_node() but may be not done in a # node_not_found hook node_info.acquire_lock() if failures or node_info is None: msg = _('The following failures happened during running ' 'pre-processing hooks:\n%s') % '\n'.join(failures) if node_info is not None: node_info.finished(istate.Events.error, error='\n'.join(failures)) _store_logs(introspection_data, node_info) raise utils.Error(msg, node_info=node_info, data=introspection_data) LOG.info('Matching node is %s', node_info.uuid, node_info=node_info, data=introspection_data) if node_info.finished_at is not None: # race condition or introspection canceled raise utils.Error(_('Node processing already finished with ' 'error: %s') % node_info.error, node_info=node_info, code=400) # Note(mkovacik): store data now when we're sure that a background # thread won't race with other process() or introspect.abort() # call utils.executor().submit(_store_unprocessed_data, node_info, unprocessed_data) try: node = node_info.node() except ir_utils.NotFound as exc: with excutils.save_and_reraise_exception(): node_info.finished(istate.Events.error, error=str(exc)) _store_logs(introspection_data, node_info) try: result = _process_node(node_info, node, introspection_data) except utils.Error as exc: node_info.finished(istate.Events.error, error=str(exc)) with excutils.save_and_reraise_exception(): _store_logs(introspection_data, node_info) except Exception as exc: LOG.exception('Unexpected exception during processing') msg = _('Unexpected exception %(exc_class)s during processing: ' '%(error)s') % {'exc_class': exc.__class__.__name__, 'error': exc} node_info.finished(istate.Events.error, error=msg) _store_logs(introspection_data, node_info) raise utils.Error(msg, node_info=node_info, data=introspection_data, code=500) if CONF.processing.always_store_ramdisk_logs: _store_logs(introspection_data, node_info) return result def _run_post_hooks(node_info, introspection_data): hooks = plugins_base.processing_hooks_manager() for hook_ext in hooks: LOG.debug('Running post-processing hook %s', hook_ext.name, node_info=node_info, data=introspection_data) hook_ext.obj.before_update(introspection_data, node_info) @node_cache.fsm_transition(istate.Events.process, reentrant=False) def _process_node(node_info, node, introspection_data): # NOTE(dtantsur): repeat the check in case something changed ir_utils.check_provision_state(node) _run_post_hooks(node_info, introspection_data) _store_data(node_info, introspection_data) ironic = ir_utils.get_client() pxe_filter.driver().sync(ironic) node_info.invalidate_cache() rules.apply(node_info, introspection_data) resp = {'uuid': node.uuid} utils.executor().submit(_finish, node_info, ironic, introspection_data, power_off=CONF.processing.power_off) return resp @node_cache.triggers_fsm_error_transition() def _finish(node_info, ironic, introspection_data, power_off=True): if power_off: LOG.debug('Forcing power off of node %s', node_info.uuid) try: ironic.node.set_power_state(node_info.uuid, 'off') except Exception as exc: if node_info.node().provision_state == 'enroll': LOG.info("Failed to power off the node in" "'enroll' state, ignoring; error was " "%s", exc, node_info=node_info, data=introspection_data) else: msg = (_('Failed to power off node %(node)s, check ' 'its power management configuration: ' '%(exc)s') % {'node': node_info.uuid, 'exc': exc}) raise utils.Error(msg, node_info=node_info, data=introspection_data) LOG.info('Node powered-off', node_info=node_info, data=introspection_data) node_info.finished(istate.Events.finish) LOG.info('Introspection finished successfully', node_info=node_info, data=introspection_data) def reapply(node_ident): """Re-apply introspection steps. Re-apply preprocessing, postprocessing and introspection rules on stored data. :param node_ident: node UUID or name :raises: utils.Error """ LOG.debug('Processing re-apply introspection request for node ' 'UUID: %s', node_ident) node_info = node_cache.get_node(node_ident, locked=False) if not node_info.acquire_lock(blocking=False): # Note (mkovacik): it should be sufficient to check data # presence & locking. If either introspection didn't start # yet, was in waiting state or didn't finish yet, either data # won't be available or locking would fail raise utils.Error(_('Node locked, please, try again later'), node_info=node_info, code=409) utils.executor().submit(_reapply, node_info) def _reapply(node_info): # runs in background try: node_info.started_at = timeutils.utcnow() node_info.commit() introspection_data = _get_unprocessed_data(node_info.uuid) except Exception as exc: LOG.exception('Encountered exception while fetching ' 'stored introspection data', node_info=node_info) msg = (_('Unexpected exception %(exc_class)s while fetching ' 'unprocessed introspection data from Swift: %(error)s') % {'exc_class': exc.__class__.__name__, 'error': exc}) node_info.finished(istate.Events.error, error=msg) return try: ironic = ir_utils.get_client() except Exception as exc: msg = _('Encountered an exception while getting the Ironic client: ' '%s') % exc LOG.error(msg, node_info=node_info, data=introspection_data) node_info.finished(istate.Events.error, error=msg) return try: _reapply_with_data(node_info, introspection_data) except Exception as exc: return _finish(node_info, ironic, introspection_data, power_off=False) LOG.info('Successfully reapplied introspection on stored ' 'data', node_info=node_info, data=introspection_data) @node_cache.fsm_event_before(istate.Events.reapply) @node_cache.triggers_fsm_error_transition() def _reapply_with_data(node_info, introspection_data): failures = [] _run_pre_hooks(introspection_data, failures) if failures: raise utils.Error(_('Pre-processing failures detected reapplying ' 'introspection on stored data:\n%s') % '\n'.join(failures), node_info=node_info) _run_post_hooks(node_info, introspection_data) _store_data(node_info, introspection_data) node_info.invalidate_cache() rules.apply(node_info, introspection_data) ironic-inspector-7.2.0/ironic_inspector/cmd/0000775000175100017510000000000013241324014021141 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/cmd/all.py0000666000175100017510000000170113241323457022276 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Ironic Inspector service.""" import sys from ironic_inspector.common import service_utils from ironic_inspector import wsgi_service def main(args=sys.argv[1:]): # Parse config file and command line options, then start logging service_utils.prepare_service(args) server = wsgi_service.WSGIService() server.run() if __name__ == '__main__': sys.exit(main()) ironic-inspector-7.2.0/ironic_inspector/cmd/__init__.py0000666000175100017510000000006013241323457023262 0ustar zuulzuul00000000000000import eventlet # noqa eventlet.monkey_patch() ironic-inspector-7.2.0/ironic_inspector/db.py0000666000175100017510000001420213241323457021350 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy models for inspection data and shared database code.""" import contextlib from oslo_concurrency import lockutils from oslo_config import cfg from oslo_db import options as db_opts from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import models from oslo_db.sqlalchemy import types as db_types from sqlalchemy import (Boolean, Column, DateTime, Enum, ForeignKey, Integer, String, Text) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import orm from ironic_inspector import conf # noqa from ironic_inspector import introspection_state as istate class ModelBase(models.ModelBase): __table_args__ = {'mysql_engine': "InnoDB", 'mysql_charset': "utf8"} Base = declarative_base(cls=ModelBase) CONF = cfg.CONF _DEFAULT_SQL_CONNECTION = 'sqlite:///ironic_inspector.sqlite' _CTX_MANAGER = None db_opts.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION) _synchronized = lockutils.synchronized_with_prefix("ironic-inspector-") class Node(Base): __tablename__ = 'nodes' uuid = Column(String(36), primary_key=True) version_id = Column(String(36), server_default='') state = Column(Enum(*istate.States.all()), nullable=False, default=istate.States.finished, server_default=istate.States.finished) started_at = Column(DateTime, nullable=True) finished_at = Column(DateTime, nullable=True) error = Column(Text, nullable=True) # version_id is being tracked in the NodeInfo object # for the sake of consistency. See also SQLAlchemy docs: # http://docs.sqlalchemy.org/en/latest/orm/versioning.html __mapper_args__ = { 'version_id_col': version_id, 'version_id_generator': False, } class Attribute(Base): __tablename__ = 'attributes' uuid = Column(String(36), primary_key=True) node_uuid = Column(String(36), ForeignKey('nodes.uuid', name='fk_node_attribute')) name = Column(String(255), nullable=False) value = Column(String(255), nullable=True) class Option(Base): __tablename__ = 'options' uuid = Column(String(36), ForeignKey('nodes.uuid'), primary_key=True) name = Column(String(255), primary_key=True) value = Column(Text) class Rule(Base): __tablename__ = 'rules' uuid = Column(String(36), primary_key=True) created_at = Column(DateTime, nullable=False) description = Column(Text) # NOTE(dtantsur): in the future we might need to temporary disable a rule disabled = Column(Boolean, default=False) conditions = orm.relationship('RuleCondition', lazy='joined', order_by='RuleCondition.id', cascade="all, delete-orphan") actions = orm.relationship('RuleAction', lazy='joined', order_by='RuleAction.id', cascade="all, delete-orphan") class RuleCondition(Base): __tablename__ = 'rule_conditions' id = Column(Integer, primary_key=True) rule = Column(String(36), ForeignKey('rules.uuid')) op = Column(String(255), nullable=False) multiple = Column(String(255), nullable=False) invert = Column(Boolean, default=False) # NOTE(dtantsur): while all operations now require a field, I can also # imagine user-defined operations that do not, thus it's nullable. field = Column(Text) params = Column(db_types.JsonEncodedDict) def as_dict(self): res = self.params.copy() res['op'] = self.op res['field'] = self.field res['multiple'] = self.multiple res['invert'] = self.invert return res class RuleAction(Base): __tablename__ = 'rule_actions' id = Column(Integer, primary_key=True) rule = Column(String(36), ForeignKey('rules.uuid')) action = Column(String(255), nullable=False) params = Column(db_types.JsonEncodedDict) def as_dict(self): res = self.params.copy() res['action'] = self.action return res def init(): """Initialize the database. Method called on service start up, initialize transaction context manager and try to create db session. """ get_writer_session() def model_query(model, *args, **kwargs): """Query helper for simpler session usage. :param session: if present, the session to use """ session = kwargs.get('session') or get_reader_session() query = session.query(model, *args) return query @contextlib.contextmanager def ensure_transaction(session=None): session = session or get_writer_session() with session.begin(subtransactions=True): yield session @_synchronized("transaction-context-manager") def _create_context_manager(): _ctx_mgr = enginefacade.transaction_context() # TODO(aarefiev): enable foreign keys for SQLite once all unit # tests with failed constraint will be fixed. _ctx_mgr.configure(sqlite_fk=False) return _ctx_mgr def get_context_manager(): """Create transaction context manager lazily. :returns: The transaction context manager. """ global _CTX_MANAGER if _CTX_MANAGER is None: _CTX_MANAGER = _create_context_manager() return _CTX_MANAGER def get_reader_session(): """Help method to get reader session. :returns: The reader session. """ return get_context_manager().reader.get_sessionmaker()() def get_writer_session(): """Help method to get writer session. :returns: The writer session. """ return get_context_manager().writer.get_sessionmaker()() ironic-inspector-7.2.0/ironic_inspector/version.py0000666000175100017510000000121413241323457022447 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('ironic-inspector') ironic-inspector-7.2.0/ironic_inspector/pxe_filter/0000775000175100017510000000000013241324014022537 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/pxe_filter/interface.py0000666000175100017510000000370713241323457025074 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """The code of the PXE boot filtering interface.""" import abc import six @six.add_metaclass(abc.ABCMeta) class FilterDriver(object): """The PXE boot filtering interface.""" @abc.abstractmethod def init_filter(self): """Initialize the internal driver state. This method should be idempotent and may perform system-wide filter state changes. Can be synchronous. :returns: nothing. """ @abc.abstractmethod def sync(self, ironic): """Synchronize the filter with ironic and inspector. To be called both periodically and as needed by inspector. The filter should tear down its internal state if the sync method raises in order to "propagate" filtering exception between periodic and on-demand sync call. To this end, a driver should raise from the sync call if its internal state isn't properly initialized. :param ironic: an ironic client instance. :returns: nothing. """ @abc.abstractmethod def tear_down_filter(self): """Reset the filter. This method should be idempotent and may perform system-wide filter state changes. Can be synchronous. :returns: nothing. """ @abc.abstractmethod def get_periodic_sync_task(self): """Get periodic sync task for the filter. :returns: a periodic task to be run in the background. """ ironic-inspector-7.2.0/ironic_inspector/pxe_filter/__init__.py0000666000175100017510000000000013241323457024652 0ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/pxe_filter/iptables.py0000666000175100017510000002176613241323457024744 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import os import re from eventlet.green import subprocess from oslo_config import cfg from oslo_log import log from ironic_inspector.common import ironic as ir_utils from ironic_inspector import node_cache from ironic_inspector.pxe_filter import base as pxe_filter CONF = cfg.CONF LOG = log.getLogger(__name__) _EMAC_REGEX = 'EMAC=([0-9a-f]{2}(:[0-9a-f]{2}){5}) IMAC=.*' def _should_enable_dhcp(): """Check whether we should enable DHCP at all. We won't even open our DHCP if no nodes are on introspection and node_not_found_hook is not set. """ return (node_cache.introspection_active() or CONF.processing.node_not_found_hook is not None) class IptablesFilter(pxe_filter.BaseFilter): """A PXE boot filtering interface implementation.""" def __init__(self): super(IptablesFilter, self).__init__() self.blacklist_cache = None self.enabled = True self.interface = CONF.iptables.dnsmasq_interface self.chain = CONF.iptables.firewall_chain self.new_chain = self.chain + '_temp' self.base_command = ('sudo', 'ironic-inspector-rootwrap', CONF.rootwrap_config, 'iptables') def reset(self): self.enabled = True self.blacklist_cache = None for chain in (self.chain, self.new_chain): try: self._clean_up(chain) except Exception as e: LOG.exception('Encountered exception resetting filter: %s', e) super(IptablesFilter, self).reset() @pxe_filter.locked_driver_event(pxe_filter.Events.initialize) def init_filter(self): # -w flag makes iptables wait for xtables lock, but it's not supported # everywhere yet try: with open(os.devnull, 'wb') as null: subprocess.check_call(self.base_command + ('-w', '-h'), stderr=null, stdout=null) except subprocess.CalledProcessError: LOG.warning('iptables does not support -w flag, please update ' 'it to at least version 1.4.21') else: self.base_command += ('-w',) self._clean_up(self.chain) # Not really needed, but helps to validate that we have access to # iptables self._iptables('-N', self.chain) LOG.debug('The iptables filter was initialized') @pxe_filter.locked_driver_event(pxe_filter.Events.sync) def sync(self, ironic): """Sync firewall filter rules for introspection. Gives access to PXE boot port for any machine, except for those, whose MAC is registered in Ironic and is not on introspection right now. This function is called from both introspection initialization code and from periodic task. This function is supposed to be resistant to unexpected iptables state. ``init()`` function must be called once before any call to this function. This function is using ``eventlet`` semaphore to serialize access from different green threads. :param ironic: an ironic client instance. :returns: nothing. """ if not _should_enable_dhcp(): self._disable_dhcp() return to_blacklist = _get_blacklist(ironic) if to_blacklist == self.blacklist_cache: LOG.debug('Not updating iptables - no changes in MAC list %s', to_blacklist) return LOG.debug('Blacklisting active MAC\'s %s', to_blacklist) with self._temporary_chain(self.new_chain, self.chain): # Force update on the next iteration if this attempt fails self.blacklist_cache = None # - Blacklist active macs, so that nova can boot them for mac in to_blacklist: self._iptables('-A', self.new_chain, '-m', 'mac', '--mac-source', mac, '-j', 'DROP') # - Whitelist everything else self._iptables('-A', self.new_chain, '-j', 'ACCEPT') # Cache result of successful iptables update self.enabled = True self.blacklist_cache = to_blacklist LOG.debug('The iptables filter was synchronized') @contextlib.contextmanager def _temporary_chain(self, chain, main_chain): """Context manager to operate on a temporary chain.""" # Clean up a bit to account for possible troubles on previous run self._clean_up(chain) self._iptables('-N', chain) yield # Swap chains self._iptables('-I', 'INPUT', '-i', self.interface, '-p', 'udp', '--dport', '67', '-j', chain) self._iptables('-D', 'INPUT', '-i', self.interface, '-p', 'udp', '--dport', '67', '-j', main_chain, ignore=True) self._iptables('-F', main_chain, ignore=True) self._iptables('-X', main_chain, ignore=True) self._iptables('-E', chain, main_chain) def _iptables(self, *args, **kwargs): # NOTE(dtantsur): -w flag makes it wait for xtables lock cmd = self.base_command + args ignore = kwargs.pop('ignore', False) LOG.debug('Running iptables %s', args) kwargs['stderr'] = subprocess.STDOUT try: subprocess.check_output(cmd, **kwargs) except subprocess.CalledProcessError as exc: decoded_output = exc.output.decode("utf-8") output = decoded_output.replace('\n', '. ') if ignore: LOG.debug('Ignoring failed iptables %(args)s: %(output)s', {'args': args, 'output': output}) else: LOG.error('iptables %(iptables)s failed: %(exc)s', {'iptables': args, 'exc': output}) raise def _clean_up(self, chain): self._iptables('-D', 'INPUT', '-i', self.interface, '-p', 'udp', '--dport', '67', '-j', chain, ignore=True) self._iptables('-F', chain, ignore=True) self._iptables('-X', chain, ignore=True) def _disable_dhcp(self): """Disable DHCP completely.""" if not self.enabled: LOG.debug('DHCP is already disabled, not updating') return LOG.debug('No nodes on introspection and node_not_found_hook is ' 'not set - disabling DHCP') self.blacklist_cache = None with self._temporary_chain(self.new_chain, self.chain): # Blacklist everything self._iptables('-A', self.new_chain, '-j', 'REJECT') self.enabled = False def _ib_mac_to_rmac_mapping(ports): """Update port InfiniBand MAC address to EthernetOverInfiniBand MAC On InfiniBand deployment we need to map between the baremetal host InfiniBand MAC to the EoIB MAC. The EoIB MAC addresses are learned automatically by the EoIB interfaces and those MACs are recorded to the /sys/class/net//eth/neighs file. The InfiniBand GUID is taken from the ironic port client-id extra attribute. The InfiniBand GUID is the last 8 bytes of the client-id. The file format allows to map the GUID to EoIB MAC. The filter rules based on those MACs get applied during a driver.update() call :param ports: list of ironic ports :returns: Nothing. """ ethoib_interfaces = CONF.iptables.ethoib_interfaces for interface in ethoib_interfaces: neighs_file = ( os.path.join('/sys/class/net', interface, 'eth/neighs')) try: with open(neighs_file, 'r') as fd: data = fd.read() except IOError: LOG.error('Interface %s is not Ethernet Over InfiniBand; ' 'Skipping ...', interface) continue for port in ports: client_id = port.extra.get('client-id') if client_id: # Note(moshele): The last 8 bytes in the client-id is # the baremetal node InfiniBand GUID guid = client_id[-23:] p = re.compile(_EMAC_REGEX + guid) match = p.search(data) if match: port.address = match.group(1) def _get_blacklist(ironic): ports = [port.address for port in ir_utils.call_with_retries(ironic.port.list, limit=0, fields=['address', 'extra']) if port.address not in node_cache.active_macs()] _ib_mac_to_rmac_mapping(ports) return ports ironic-inspector-7.2.0/ironic_inspector/pxe_filter/base.py0000666000175100017510000001662713241323457024053 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Base code for PXE boot filtering.""" import contextlib from automaton import exceptions as automaton_errors from automaton import machines from eventlet import semaphore from futurist import periodics from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log import six import stevedore from ironic_inspector.common.i18n import _ from ironic_inspector.common import ironic as ir_utils from ironic_inspector.pxe_filter import interface CONF = cfg.CONF LOG = log.getLogger(__name__) _STEVEDORE_DRIVER_NAMESPACE = 'ironic_inspector.pxe_filter' class InvalidFilterDriverState(RuntimeError): """The fsm of the filter driver raised an error.""" class States(object): """PXE filter driver states.""" uninitialized = 'uninitialized' initialized = 'initialized' class Events(object): """PXE filter driver transitions.""" initialize = 'initialize' sync = 'sync' reset = 'reset' # a reset is always possible State_space = [ { 'name': States.uninitialized, 'next_states': { Events.initialize: States.initialized, Events.reset: States.uninitialized, }, }, { 'name': States.initialized, 'next_states': { Events.sync: States.initialized, Events.reset: States.uninitialized, }, }, ] def locked_driver_event(event): """Call driver method having processed the fsm event.""" def outer(method): @six.wraps(method) def inner(self, *args, **kwargs): with self.lock, self.fsm_reset_on_error() as fsm: fsm.process_event(event) return method(self, *args, **kwargs) return inner return outer class BaseFilter(interface.FilterDriver): """The generic PXE boot filtering interface implementation. This driver doesn't do anything but provides a basic synchronization and initialization logic for some drivers to reuse. Subclasses have to provide a custom sync() method. """ fsm = machines.FiniteMachine.build(State_space) fsm.default_start_state = States.uninitialized def __init__(self): super(BaseFilter, self).__init__() self.lock = semaphore.BoundedSemaphore() self.fsm.initialize(start_state=States.uninitialized) def __str__(self): return '%(driver)s, state=%(state)s' % { 'driver': type(self).__name__, 'state': self.state} @property def state(self): """Current driver state.""" return self.fsm.current_state def reset(self): """Reset internal driver state. This method is called by the fsm_context manager upon exception as well as by the tear_down_filter method. A subclass might wish to override as necessary, though must not lock the driver. The overriding subclass should up-call. :returns: nothing. """ LOG.debug('Resetting the PXE filter driver %s', self) # a reset event is always possible self.fsm.process_event(Events.reset) @contextlib.contextmanager def fsm_reset_on_error(self): """Reset the filter driver upon generic exception. The context is self.fsm. The automaton.exceptions.NotFound error is cast to the InvalidFilterDriverState error. Other exceptions trigger self.reset() :raises: InvalidFilterDriverState :returns: nothing. """ LOG.debug('The PXE filter driver %s enters the fsm_reset_on_error ' 'context', self) try: yield self.fsm except automaton_errors.NotFound as e: raise InvalidFilterDriverState(_('The PXE filter driver %(driver)s' ': my fsm encountered an ' 'exception: %(error)s') % { 'driver': self, 'error': e}) except Exception as e: LOG.exception('The PXE filter %(filter)s encountered an ' 'exception: %(error)s; resetting the filter', {'filter': self, 'error': e}) self.reset() raise finally: LOG.debug('The PXE filter driver %s left the fsm_reset_on_error ' 'context', self) @locked_driver_event(Events.initialize) def init_filter(self): """Base driver initialization logic. Locked. :raises: InvalidFilterDriverState :returns: nothing. """ LOG.debug('Initializing the PXE filter driver %s', self) def tear_down_filter(self): """Base driver tear down logic. Locked. :returns: nothing. """ LOG.debug('Tearing down the PXE filter driver %s', self) with self.lock: self.reset() @locked_driver_event(Events.sync) def sync(self, ironic): """Base driver sync logic. Locked. :param ironic: obligatory ironic client instance :returns: nothing. """ LOG.debug('Syncing the PXE filter driver %s', self) def get_periodic_sync_task(self): """Get periodic sync task for the filter. The periodic task returned is casting the InvalidFilterDriverState to the periodics.NeverAgain exception to quit looping. :raises: periodics.NeverAgain :returns: a periodic task to be run in the background. """ ironic = ir_utils.get_client() def periodic_sync_task(): try: self.sync(ironic) except InvalidFilterDriverState as e: LOG.warning('Filter driver %s disabling periodic sync ' 'task because of an invalid state.', self) raise periodics.NeverAgain(e) return periodics.periodic( # NOTE(milan): the periodic decorator doesn't support 0 as # a spacing value of (a switched off) periodic spacing=CONF.pxe_filter.sync_period or float('inf'), enabled=bool(CONF.pxe_filter.sync_period))(periodic_sync_task) class NoopFilter(BaseFilter): """A trivial PXE boot filter.""" _DRIVER_MANAGER = None @lockutils.synchronized(__name__) def _driver_manager(): """Create a Stevedore driver manager for filtering drivers. Locked.""" global _DRIVER_MANAGER name = CONF.pxe_filter.driver # FIXME(milan): to be removed after the transition period of deprecating # the firewall option group if name == 'iptables' and not CONF.iptables.manage_firewall: name = 'noop' if _DRIVER_MANAGER is None: _DRIVER_MANAGER = stevedore.driver.DriverManager( _STEVEDORE_DRIVER_NAMESPACE, name=name, invoke_on_load=True ) return _DRIVER_MANAGER def driver(): """Get the driver for the PXE filter. :returns: the singleton PXE filter driver object. """ return _driver_manager().driver ironic-inspector-7.2.0/ironic_inspector/pxe_filter/dnsmasq.py0000666000175100017510000002040013241323457024567 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE(milan) the filter design relies on the hostdir[1] being in exclusive # inspector control. The hostdir should be considered a private cache directory # of inspector that dnsmasq has read access to and polls updates from, through # the inotify facility. # # [1] see the --dhcp-hostsdir option description in # http://www.thekelleys.org.uk/dnsmasq/docs/dnsmasq-man.html import fcntl import os import time from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ironic_inspector.common import ironic as ir_utils from ironic_inspector import node_cache from ironic_inspector.pxe_filter import base as pxe_filter CONF = cfg.CONF LOG = log.getLogger(__name__) _EXCLUSIVE_WRITE_ATTEMPTS = 10 _EXCLUSIVE_WRITE_ATTEMPTS_DELAY = 0.01 _ROOTWRAP_COMMAND = 'sudo ironic-inspector-rootwrap {rootwrap_config!s}' _MACBL_LEN = len('ff:ff:ff:ff:ff:ff,ignore\n') class DnsmasqFilter(pxe_filter.BaseFilter): """The dnsmasq PXE filter driver. A pxe filter driver implementation that controls access to dnsmasq through amending its configuration. """ def reset(self): """Stop dnsmasq and upcall reset.""" _execute(CONF.dnsmasq_pxe_filter.dnsmasq_stop_command, ignore_errors=True) super(DnsmasqFilter, self).reset() def _sync(self, ironic): """Sync the inspector, ironic and dnsmasq state. Locked. :raises: IOError, OSError. :returns: None. """ LOG.debug('Syncing the driver') timestamp_start = timeutils.utcnow() active_macs = node_cache.active_macs() ironic_macs = set(port.address for port in ir_utils.call_with_retries(ironic.port.list, limit=0, fields=['address'])) blacklist_macs = _get_blacklist() # NOTE(milan) whitelist MACs of ports not kept in ironic anymore # also whitelist active MACs that are still blacklisted in the # dnsmasq configuration but have just been asked to be introspected for mac in ((blacklist_macs - ironic_macs) | (blacklist_macs & active_macs)): _whitelist_mac(mac) # blacklist new ports that aren't being inspected for mac in ironic_macs - (blacklist_macs | active_macs): _blacklist_mac(mac) timestamp_end = timeutils.utcnow() LOG.debug('The dnsmasq PXE filter was synchronized (took %s)', timestamp_end - timestamp_start) @pxe_filter.locked_driver_event(pxe_filter.Events.sync) def sync(self, ironic): """Sync dnsmasq configuration with current Ironic&Inspector state. Polls all ironic ports. Those being inspected, the active ones, are whitelisted while the rest are blacklisted in the dnsmasq configuration. :param ironic: an ironic client instance. :raises: OSError, IOError. :returns: None. """ self._sync(ironic) @pxe_filter.locked_driver_event(pxe_filter.Events.initialize) def init_filter(self): """Performs an initial sync with ironic and starts dnsmasq. The initial _sync() call reduces the chances dnsmasq might lose some inotify blacklist events by prefetching the blacklist before the dnsmasq is started. :raises: OSError, IOError. :returns: None. """ _purge_dhcp_hostsdir() ironic = ir_utils.get_client() self._sync(ironic) _execute(CONF.dnsmasq_pxe_filter.dnsmasq_start_command) LOG.info('The dnsmasq PXE filter was initialized') def _purge_dhcp_hostsdir(): """Remove all the DHCP hosts files. :raises: FileNotFoundError in case the dhcp_hostsdir is invalid. IOError in case of non-writable file or a record not being a file. :returns: None. """ dhcp_hostsdir = CONF.dnsmasq_pxe_filter.dhcp_hostsdir if not CONF.dnsmasq_pxe_filter.purge_dhcp_hostsdir: LOG.debug('Not purging %s; disabled in configuration.', dhcp_hostsdir) return LOG.debug('Purging %s', dhcp_hostsdir) for mac in os.listdir(dhcp_hostsdir): path = os.path.join(dhcp_hostsdir, mac) # NOTE(milan) relying on a failure here aborting the init_filter() call os.remove(path) LOG.debug('Removed %s', path) def _get_blacklist(): """Get addresses currently blacklisted in dnsmasq. :raises: FileNotFoundError in case the dhcp_hostsdir is invalid. :returns: a set of MACs currently blacklisted in dnsmasq. """ hostsdir = CONF.dnsmasq_pxe_filter.dhcp_hostsdir # whitelisted MACs lack the ,ignore directive return set(address for address in os.listdir(hostsdir) if os.stat(os.path.join(hostsdir, address)).st_size == _MACBL_LEN) def _exclusive_write_or_pass(path, buf): """Write exclusively or pass if path locked. The intention is to be able to run multiple instances of the filter on the same node in multiple inspector processes. :param path: where to write to :param buf: the content to write :raises: FileNotFoundError, IOError :returns: True if the write was successful. """ # NOTE(milan) line-buffering enforced to ensure dnsmasq record update # through inotify, which reacts on f.close() attempts = _EXCLUSIVE_WRITE_ATTEMPTS with open(path, 'w', 1) as f: while attempts: try: fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) f.write(buf) # Go ahead and flush the data now instead of waiting until # after the automatic flush with the file close after the # file lock is released. f.flush() return True except IOError as e: if e.errno == os.errno.EWOULDBLOCK: LOG.debug('%s locked; will try again (later)', path) attempts -= 1 time.sleep(_EXCLUSIVE_WRITE_ATTEMPTS_DELAY) continue raise finally: fcntl.flock(f, fcntl.LOCK_UN) LOG.debug('Failed to write the exclusively-locked path: %(path)s for ' '%(attempts)s times', {'attempts': _EXCLUSIVE_WRITE_ATTEMPTS, 'path': path}) return False def _blacklist_mac(mac): """Creates a dhcp_hostsdir ignore record for the MAC. :raises: FileNotFoundError in case the dhcp_hostsdir is invalid, IOError in case the dhcp host MAC file isn't writable. :returns: None. """ path = os.path.join(CONF.dnsmasq_pxe_filter.dhcp_hostsdir, mac) if _exclusive_write_or_pass(path, '%s,ignore\n' % mac): LOG.debug('Blacklisted %s', mac) else: LOG.warning('Failed to blacklist %s; retrying next periodic sync ' 'time', mac) def _whitelist_mac(mac): """Un-ignores the dhcp_hostsdir record for the MAC. :raises: FileNotFoundError in case the dhcp_hostsdir is invalid, IOError in case the dhcp host MAC file isn't writable. :returns: None. """ path = os.path.join(CONF.dnsmasq_pxe_filter.dhcp_hostsdir, mac) # remove the ,ignore directive if _exclusive_write_or_pass(path, '%s\n' % mac): LOG.debug('Whitelisted %s', mac) else: LOG.warning('Failed to whitelist %s; retrying next periodic sync ' 'time', mac) def _execute(cmd=None, ignore_errors=False): # e.g: '/bin/kill $(cat /var/run/dnsmasq.pid)' if not cmd: return helper = _ROOTWRAP_COMMAND.format(rootwrap_config=CONF.rootwrap_config) processutils.execute(cmd, run_as_root=True, root_helper=helper, shell=True, check_exit_code=not ignore_errors) ironic-inspector-7.2.0/ironic_inspector/migrations/0000775000175100017510000000000013241324014022552 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/migrations/versions/0000775000175100017510000000000013241324014024422 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py0000666000175100017510000000351413241323457031672 0ustar zuulzuul00000000000000# Copyright 2015 Cisco Systems, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """inital_db_schema Revision ID: 578f84f38d Revises: Create Date: 2015-09-15 14:52:22.448944 """ # revision identifiers, used by Alembic. revision = '578f84f38d' down_revision = None branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'nodes', sa.Column('uuid', sa.String(36), primary_key=True), sa.Column('started_at', sa.Float, nullable=True), sa.Column('finished_at', sa.Float, nullable=True), sa.Column('error', sa.Text, nullable=True), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'attributes', sa.Column('name', sa.String(255), primary_key=True), sa.Column('value', sa.String(255), primary_key=True), sa.Column('uuid', sa.String(36), sa.ForeignKey('nodes.uuid')), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'options', sa.Column('uuid', sa.String(36), sa.ForeignKey('nodes.uuid'), primary_key=True), sa.Column('name', sa.String(255), primary_key=True), sa.Column('value', sa.Text), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000ironic-inspector-7.2.0/ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.pyironic-inspector-7.2.0/ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_co0000666000175100017510000000177713241323457033720 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add invert field to rule condition Revision ID: e169a4a81d88 Revises: d588418040d Create Date: 2016-02-16 11:19:29.715615 """ # revision identifiers, used by Alembic. revision = 'e169a4a81d88' down_revision = 'd588418040d' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('rule_conditions', sa.Column('invert', sa.Boolean(), nullable=True, default=False)) ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000ironic-inspector-7.2.0/ironic_inspector/migrations/versions/d2e48801c8ef_introducing_node_state_attribute.pyironic-inspector-7.2.0/ironic_inspector/migrations/versions/d2e48801c8ef_introducing_node_state_attr0000666000175100017510000000341613241323457034056 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Introducing Node.state attribute Revision ID: d2e48801c8ef Revises: e169a4a81d88 Create Date: 2016-07-29 10:10:32.351661 """ # revision identifiers, used by Alembic. revision = 'd2e48801c8ef' down_revision = 'e169a4a81d88' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from sqlalchemy import sql from ironic_inspector import introspection_state as istate Node = sql.table('nodes', sql.column('error', sa.String), sql.column('state', sa.Enum(*istate.States.all()))) def upgrade(): state_enum = sa.Enum(*istate.States.all(), name='node_state') state_enum.create(op.get_bind()) op.add_column('nodes', sa.Column('version_id', sa.String(36), server_default='')) op.add_column('nodes', sa.Column('state', state_enum, nullable=False, default=istate.States.finished, server_default=istate.States.finished)) # correct the state: finished -> error if Node.error is not null stmt = Node.update().where(Node.c.error != sql.null()).values( {'state': op.inline_literal(istate.States.error)}) op.execute(stmt) ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000ironic-inspector-7.2.0/ironic_inspector/migrations/versions/882b2d84cb1b_attribute_constraints_relaxing.pyironic-inspector-7.2.0/ironic_inspector/migrations/versions/882b2d84cb1b_attribute_constraints_relax0000666000175100017510000000607713241323457034115 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """attribute_constraints_relaxing Revision ID: 882b2d84cb1b Revises: d00d6e3f38c4 Create Date: 2017-01-13 11:27:00.053286 """ # revision identifiers, used by Alembic. revision = '882b2d84cb1b' down_revision = 'd00d6e3f38c4' branch_labels = None depends_on = None from alembic import op from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.engine.reflection import Inspector as insp ATTRIBUTES = 'attributes' NODES = 'nodes' NAME = 'name' VALUE = 'value' UUID = 'uuid' NODE_UUID = 'node_uuid' naming_convention = { "pk": 'pk_%(table_name)s', "fk": 'fk_%(table_name)s' } def upgrade(): connection = op.get_bind() inspector = insp.from_engine(connection) pk_constraint = (inspector.get_pk_constraint(ATTRIBUTES).get('name') or naming_convention['pk'] % {'table_name': ATTRIBUTES}) fk_constraint = (inspector.get_foreign_keys(ATTRIBUTES)[0].get('name') or naming_convention['fk'] % {'table_name': ATTRIBUTES}) columns_meta = inspector.get_columns(ATTRIBUTES) name_type = {meta.get('type') for meta in columns_meta if meta['name'] == NAME}.pop() value_type = {meta.get('type') for meta in columns_meta if meta['name'] == VALUE}.pop() node_uuid_column = sa.Column(NODE_UUID, sa.String(36)) op.add_column(ATTRIBUTES, node_uuid_column) attributes = sa.table(ATTRIBUTES, node_uuid_column, sa.Column(UUID, sa.String(36))) with op.batch_alter_table(ATTRIBUTES, naming_convention=naming_convention) as batch_op: batch_op.drop_constraint(fk_constraint, type_='foreignkey') rows = connection.execute(sa.select([attributes.c.uuid, attributes.c.node_uuid])) for row in rows: # move uuid to node_uuid, reuse uuid as a new primary key connection.execute( attributes.update().where(attributes.c.uuid == row.uuid). values(node_uuid=row.uuid, uuid=uuidutils.generate_uuid()) ) with op.batch_alter_table(ATTRIBUTES, naming_convention=naming_convention) as batch_op: batch_op.drop_constraint(pk_constraint, type_='primary') batch_op.create_primary_key(pk_constraint, [UUID]) batch_op.create_foreign_key('fk_node_attribute', NODES, [NODE_UUID], [UUID]) batch_op.alter_column('name', nullable=False, type_=name_type) batch_op.alter_column('value', nullable=True, type_=value_type) ironic-inspector-7.2.0/ironic_inspector/migrations/versions/d588418040d_add_rules.py0000666000175100017510000000376213241323457030357 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add Rules Revision ID: d588418040d Revises: 578f84f38d Create Date: 2015-09-21 14:31:03.048455 """ # revision identifiers, used by Alembic. revision = 'd588418040d' down_revision = '578f84f38d' branch_labels = None depends_on = None from alembic import op from oslo_db.sqlalchemy import types import sqlalchemy as sa def upgrade(): op.create_table( 'rules', sa.Column('uuid', sa.String(36), primary_key=True), sa.Column('created_at', sa.DateTime, nullable=False), sa.Column('description', sa.Text), sa.Column('disabled', sa.Boolean, default=False), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'rule_conditions', sa.Column('id', sa.Integer, primary_key=True), sa.Column('rule', sa.String(36), sa.ForeignKey('rules.uuid')), sa.Column('op', sa.String(255), nullable=False), sa.Column('multiple', sa.String(255), nullable=False), sa.Column('field', sa.Text), sa.Column('params', types.JsonEncodedDict), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) op.create_table( 'rule_actions', sa.Column('id', sa.Integer, primary_key=True), sa.Column('rule', sa.String(36), sa.ForeignKey('rules.uuid')), sa.Column('action', sa.String(255), nullable=False), sa.Column('params', types.JsonEncodedDict), mysql_ENGINE='InnoDB', mysql_DEFAULT_CHARSET='UTF8' ) ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000ironic-inspector-7.2.0/ironic_inspector/migrations/versions/18440d0834af_introducing_the_aborting_state.pyironic-inspector-7.2.0/ironic_inspector/migrations/versions/18440d0834af_introducing_the_aborting_st0000666000175100017510000000251613241323457033677 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Introducing the aborting state Revision ID: 18440d0834af Revises: 882b2d84cb1b Create Date: 2017-12-11 15:40:13.905554 """ # revision identifiers, used by Alembic. revision = '18440d0834af' down_revision = '882b2d84cb1b' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from sqlalchemy import sql from ironic_inspector import introspection_state as istate old_state = sa.Enum(*(set(istate.States.all()) - {istate.States.aborting}), name='node_state') new_state = sa.Enum(*istate.States.all(), name='node_state') Node = sql.table('nodes', sql.column('state', old_state)) def upgrade(): with op.batch_alter_table('nodes') as batch_op: batch_op.alter_column('state', existing_type=old_state, type_=new_state) ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000ironic-inspector-7.2.0/ironic_inspector/migrations/versions/d00d6e3f38c4_change_created_finished_at_type.pyironic-inspector-7.2.0/ironic_inspector/migrations/versions/d00d6e3f38c4_change_created_finished_at_0000666000175100017510000000522413241323457033671 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Change created|finished_at type to DateTime Revision ID: d00d6e3f38c4 Revises: d2e48801c8ef Create Date: 2016-12-15 17:18:10.728695 """ # revision identifiers, used by Alembic. revision = 'd00d6e3f38c4' down_revision = 'd2e48801c8ef' branch_labels = None depends_on = None import datetime from alembic import op import sqlalchemy as sa def upgrade(): started_at = sa.Column('started_at', sa.types.Float, nullable=True) finished_at = sa.Column('finished_at', sa.types.Float, nullable=True) temp_started_at = sa.Column("temp_started_at", sa.types.DateTime, nullable=True) temp_finished_at = sa.Column("temp_finished_at", sa.types.DateTime, nullable=True) uuid = sa.Column("uuid", sa.String(36), primary_key=True) op.add_column("nodes", temp_started_at) op.add_column("nodes", temp_finished_at) t = sa.table('nodes', started_at, finished_at, temp_started_at, temp_finished_at, uuid) conn = op.get_bind() rows = conn.execute(sa.select([t.c.started_at, t.c.finished_at, t.c.uuid])) for row in rows: temp_started = datetime.datetime.utcfromtimestamp(row['started_at']) temp_finished = row['finished_at'] # Note(milan) this is just a precaution; sa.null shouldn't happen here if temp_finished is not None: temp_finished = datetime.datetime.utcfromtimestamp(temp_finished) conn.execute(t.update().where(t.c.uuid == row.uuid).values( temp_started_at=temp_started, temp_finished_at=temp_finished)) with op.batch_alter_table('nodes') as batch_op: batch_op.drop_column('started_at') batch_op.drop_column('finished_at') batch_op.alter_column('temp_started_at', existing_type=sa.types.DateTime, nullable=True, new_column_name='started_at') batch_op.alter_column('temp_finished_at', existing_type=sa.types.DateTime, nullable=True, new_column_name='finished_at') ironic-inspector-7.2.0/ironic_inspector/migrations/env.py0000666000175100017510000000464513241323457023741 0ustar zuulzuul00000000000000# Copyright 2015 Cisco Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging.config import fileConfig from alembic import context from ironic_inspector import db # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config ironic_inspector_config = config.ironic_inspector_config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = db.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = ironic_inspector_config.database.connection context.configure( url=url, target_metadata=target_metadata, literal_binds=True) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ session = db.get_writer_session() with session.connection() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ironic-inspector-7.2.0/ironic_inspector/migrations/script.py.mako0000666000175100017510000000171413241323457025375 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ironic-inspector-7.2.0/ironic_inspector/policy.py0000666000175100017510000001572013241323457022270 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import sys from oslo_concurrency import lockutils from oslo_config import cfg from oslo_policy import policy CONF = cfg.CONF _ENFORCER = None default_policies = [ policy.RuleDefault( 'is_admin', 'role:admin or role:administrator or role:baremetal_admin', description='Full read/write API access'), policy.RuleDefault( 'is_observer', 'role:baremetal_observer', description='Read-only API access'), policy.RuleDefault( 'public_api', 'is_public_api:True', description='Internal flag for public API routes'), policy.RuleDefault( 'default', '!', description='Default API access policy'), ] api_version_policies = [ policy.DocumentedRuleDefault( 'introspection', 'rule:public_api', 'Access the API root for available versions information', [{'path': '/', 'method': 'GET'}] ), policy.DocumentedRuleDefault( 'introspection:version', 'rule:public_api', 'Access the versioned API root for version information', [{'path': '/{version}', 'method': 'GET'}] ), ] introspection_policies = [ policy.DocumentedRuleDefault( 'introspection:continue', 'rule:public_api', 'Ramdisk callback to continue introspection', [{'path': '/continue', 'method': 'POST'}] ), policy.DocumentedRuleDefault( 'introspection:status', 'rule:is_admin or rule:is_observer', 'Get introspection status', [{'path': '/introspection', 'method': 'GET'}, {'path': '/introspection/{node_id}', 'method': 'GET'}] ), policy.DocumentedRuleDefault( 'introspection:start', 'rule:is_admin', 'Start introspection', [{'path': '/introspection/{node_id}', 'method': 'POST'}] ), policy.DocumentedRuleDefault( 'introspection:abort', 'rule:is_admin', 'Abort introspection', [{'path': '/introspection/{node_id}/abort', 'method': 'POST'}] ), policy.DocumentedRuleDefault( 'introspection:data', 'rule:is_admin', 'Get introspection data', [{'path': '/introspection/{node_id}/data', 'method': 'GET'}] ), policy.DocumentedRuleDefault( 'introspection:reapply', 'rule:is_admin', 'Reapply introspection on stored data', [{'path': '/introspection/{node_id}/data/unprocessed', 'method': 'POST'}] ), ] rule_policies = [ policy.DocumentedRuleDefault( 'introspection:rule:get', 'rule:is_admin', 'Get introspection rule(s)', [{'path': '/rules', 'method': 'GET'}, {'path': '/rules/{rule_id}', 'method': 'GET'}] ), policy.DocumentedRuleDefault( 'introspection:rule:delete', 'rule:is_admin', 'Delete introspection rule(s)', [{'path': '/rules', 'method': 'DELETE'}, {'path': '/rules/{rule_id}', 'method': 'DELETE'}] ), policy.DocumentedRuleDefault( 'introspection:rule:create', 'rule:is_admin', 'Create introspection rule', [{'path': '/rules', 'method': 'POST'}] ), ] def list_policies(): """Get list of all policies defined in code. Used to register them all at runtime, and by oslo-config-generator to generate sample policy files. """ policies = itertools.chain( default_policies, api_version_policies, introspection_policies, rule_policies) return policies @lockutils.synchronized('policy_enforcer') def init_enforcer(policy_file=None, rules=None, default_rule=None, use_conf=True): """Synchronously initializes the policy enforcer :param policy_file: Custom policy file to use, if none is specified, `CONF.oslo_policy.policy_file` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. :param default_rule: Default rule to use, CONF.oslo_policy.policy_default_rule will be used if none is specified. :param use_conf: Whether to load rules from config file. """ global _ENFORCER if _ENFORCER: return _ENFORCER = policy.Enforcer(CONF, policy_file=policy_file, rules=rules, default_rule=default_rule, use_conf=use_conf) _ENFORCER.register_defaults(list_policies()) def get_enforcer(): """Provides access to the single instance of Policy enforcer.""" if not _ENFORCER: init_enforcer() return _ENFORCER def get_oslo_policy_enforcer(): """Get the enforcer instance to generate policy files. This method is for use by oslopolicy CLI scripts. Those scripts need the 'output-file' and 'namespace' options, but having those in sys.argv means loading the inspector config options will fail as those are not expected to be present. So we pass in an arg list with those stripped out. """ conf_args = [] # Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:] i = 1 while i < len(sys.argv): if sys.argv[i].strip('-') in ['namespace', 'output-file']: # e.g. --namespace i += 2 continue conf_args.append(sys.argv[i]) i += 1 cfg.CONF(conf_args, project='ironic-inspector') return get_enforcer() def authorize(rule, target, creds, *args, **kwargs): """A shortcut for policy.Enforcer.authorize() Checks authorization of a rule against the target and credentials, and raises an exception if the rule is not defined. args and kwargs are passed directly to oslo.policy Enforcer.authorize Always returns True if CONF.auth_strategy != keystone. :param rule: name of a registered oslo.policy rule :param target: dict-like structure to check rule against :param creds: dict of policy values from request :returns: True if request is authorized against given policy, False otherwise :raises: oslo_policy.policy.PolicyNotRegistered if supplied policy is not registered in oslo_policy """ if CONF.auth_strategy != 'keystone': return True enforcer = get_enforcer() rule = CONF.oslo_policy.policy_default_rule if rule is None else rule return enforcer.authorize(rule, target, creds, *args, **kwargs) ironic-inspector-7.2.0/ironic_inspector/alembic.ini0000666000175100017510000000110213241323457022501 0ustar zuulzuul00000000000000[alembic] # path to migration scripts script_location = %(here)s/migrations # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ironic-inspector-7.2.0/ironic_inspector/dbsync.py0000666000175100017510000000574713241323457022263 0ustar zuulzuul00000000000000# Copyright 2015 Cisco Systems # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from alembic import command as alembic_command from alembic import config as alembic_config from alembic import util as alembic_util from oslo_config import cfg from oslo_log import log import six from ironic_inspector import conf # noqa CONF = cfg.CONF def add_alembic_command(subparsers, name): return subparsers.add_parser( name, help=getattr(alembic_command, name).__doc__) def add_command_parsers(subparsers): for name in ['current', 'history', 'branches', 'heads']: parser = add_alembic_command(subparsers, name) parser.set_defaults(func=do_alembic_command) for name in ['stamp', 'show', 'edit']: parser = add_alembic_command(subparsers, name) parser.set_defaults(func=with_revision) parser.add_argument('--revision', nargs='?', required=True) parser = add_alembic_command(subparsers, 'upgrade') parser.set_defaults(func=with_revision) parser.add_argument('--revision', nargs='?') parser = add_alembic_command(subparsers, 'revision') parser.set_defaults(func=do_revision) parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) def _get_alembic_config(): return alembic_config.Config(os.path.join(os.path.dirname(__file__), 'alembic.ini')) def do_revision(config, cmd, *args, **kwargs): do_alembic_command(config, cmd, message=CONF.command.message, autogenerate=CONF.command.autogenerate) def with_revision(config, cmd, *args, **kwargs): revision = CONF.command.revision or 'head' do_alembic_command(config, cmd, revision) def do_alembic_command(config, cmd, *args, **kwargs): try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(six.text_type(e)) def main(args=sys.argv[1:]): log.register_options(CONF) CONF.register_cli_opt(command_opt) CONF(args, project='ironic-inspector') config = _get_alembic_config() config.set_main_option('script_location', "ironic_inspector:migrations") config.ironic_inspector_config = CONF CONF.command.func(config, CONF.command.name) ironic-inspector-7.2.0/ironic_inspector/introspect.py0000666000175100017510000001313513241323457023161 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Handling introspection request.""" import time from eventlet import semaphore from oslo_config import cfg from ironic_inspector.common.i18n import _ from ironic_inspector.common import ironic as ir_utils from ironic_inspector import introspection_state as istate from ironic_inspector import node_cache from ironic_inspector.pxe_filter import base as pxe_filter from ironic_inspector import utils CONF = cfg.CONF LOG = utils.getProcessingLogger(__name__) _LAST_INTROSPECTION_TIME = 0 _LAST_INTROSPECTION_LOCK = semaphore.BoundedSemaphore() def introspect(node_id, token=None): """Initiate hardware properties introspection for a given node. :param node_id: node UUID or name :param token: authentication token :raises: Error """ ironic = ir_utils.get_client(token) node = ir_utils.get_node(node_id, ironic=ironic) ir_utils.check_provision_state(node) validation = ironic.node.validate(node.uuid) if not validation.power['result']: msg = _('Failed validation of power interface, reason: %s') raise utils.Error(msg % validation.power['reason'], node_info=node) bmc_address = ir_utils.get_ipmi_address(node) node_info = node_cache.start_introspection(node.uuid, bmc_address=bmc_address, ironic=ironic) utils.executor().submit(_background_introspect, node_info, ironic) @node_cache.release_lock @node_cache.fsm_transition(istate.Events.wait) def _background_introspect(node_info, ironic): global _LAST_INTROSPECTION_TIME LOG.debug('Attempting to acquire lock on last introspection time') with _LAST_INTROSPECTION_LOCK: delay = (_LAST_INTROSPECTION_TIME - time.time() + CONF.introspection_delay) if delay > 0: LOG.debug('Waiting %d seconds before sending the next ' 'node on introspection', delay) time.sleep(delay) _LAST_INTROSPECTION_TIME = time.time() node_info.acquire_lock() _background_introspect_locked(node_info, ironic) def _background_introspect_locked(node_info, ironic): # TODO(dtantsur): pagination macs = list(node_info.ports()) if macs: node_info.add_attribute(node_cache.MACS_ATTRIBUTE, macs) LOG.info('Whitelisting MAC\'s %s for a PXE boot', macs, node_info=node_info) pxe_filter.driver().sync(ironic) attrs = node_info.attributes if CONF.processing.node_not_found_hook is None and not attrs: raise utils.Error( _('No lookup attributes were found, inspector won\'t ' 'be able to find it after introspection, consider creating ' 'ironic ports or providing an IPMI address'), node_info=node_info) LOG.info('The following attributes will be used for look up: %s', attrs, node_info=node_info) try: ironic.node.set_boot_device(node_info.uuid, 'pxe', persistent=False) except Exception as exc: LOG.warning('Failed to set boot device to PXE: %s', exc, node_info=node_info) try: ironic.node.set_power_state(node_info.uuid, 'reboot') except Exception as exc: raise utils.Error(_('Failed to power on the node, check it\'s ' 'power management configuration: %s'), exc, node_info=node_info) LOG.info('Introspection started successfully', node_info=node_info) def abort(node_id, token=None): """Abort running introspection. :param node_id: node UUID or name :param token: authentication token :raises: Error """ LOG.debug('Aborting introspection for node %s', node_id) ironic = ir_utils.get_client(token) node_info = node_cache.get_node(node_id, ironic=ironic, locked=False) # check pending operations locked = node_info.acquire_lock(blocking=False) if not locked: # Node busy --- cannot abort atm raise utils.Error(_('Node is locked, please, retry later'), node_info=node_info, code=409) utils.executor().submit(_abort, node_info, ironic) @node_cache.release_lock @node_cache.fsm_event_before(istate.Events.abort) def _abort(node_info, ironic): # runs in background LOG.debug('Forcing power-off', node_info=node_info) try: ironic.node.set_power_state(node_info.uuid, 'off') except Exception as exc: LOG.warning('Failed to power off node: %s', exc, node_info=node_info) node_info.finished(istate.Events.abort_end, error=_('Canceled by operator')) # block this node from PXE Booting the introspection image try: pxe_filter.driver().sync(ironic) except Exception as exc: # Note(mkovacik): this will be retried in the PXE filter sync # periodic task; we continue aborting LOG.warning('Failed to sync the PXE filter: %s', exc, node_info=node_info) LOG.info('Introspection aborted', node_info=node_info) ironic-inspector-7.2.0/ironic_inspector/rules.py0000666000175100017510000003665713241323457022137 0ustar zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Support for introspection rules.""" import jsonpath_rw as jsonpath import jsonschema from oslo_db import exception as db_exc from oslo_utils import timeutils from oslo_utils import uuidutils import six from sqlalchemy import orm from ironic_inspector.common.i18n import _ from ironic_inspector import db from ironic_inspector.plugins import base as plugins_base from ironic_inspector import utils LOG = utils.getProcessingLogger(__name__) _CONDITIONS_SCHEMA = None _ACTIONS_SCHEMA = None def conditions_schema(): global _CONDITIONS_SCHEMA if _CONDITIONS_SCHEMA is None: condition_plugins = [x.name for x in plugins_base.rule_conditions_manager()] _CONDITIONS_SCHEMA = { "title": "Inspector rule conditions schema", "type": "array", # we can have rules that always apply "minItems": 0, "items": { "type": "object", # field might become optional in the future, but not right now "required": ["op", "field"], "properties": { "op": { "description": "condition operator", "enum": condition_plugins }, "field": { "description": "JSON path to field for matching", "type": "string" }, "multiple": { "description": "how to treat multiple values", "enum": ["all", "any", "first"] }, "invert": { "description": "whether to invert the result", "type": "boolean" }, }, # other properties are validated by plugins "additionalProperties": True } } return _CONDITIONS_SCHEMA def actions_schema(): global _ACTIONS_SCHEMA if _ACTIONS_SCHEMA is None: action_plugins = [x.name for x in plugins_base.rule_actions_manager()] _ACTIONS_SCHEMA = { "title": "Inspector rule actions schema", "type": "array", "minItems": 1, "items": { "type": "object", "required": ["action"], "properties": { "action": { "description": "action to take", "enum": action_plugins }, }, # other properties are validated by plugins "additionalProperties": True } } return _ACTIONS_SCHEMA class IntrospectionRule(object): """High-level class representing an introspection rule.""" def __init__(self, uuid, conditions, actions, description): """Create rule object from database data.""" self._uuid = uuid self._conditions = conditions self._actions = actions self._description = description def as_dict(self, short=False): result = { 'uuid': self._uuid, 'description': self._description, } if not short: result['conditions'] = [c.as_dict() for c in self._conditions] result['actions'] = [a.as_dict() for a in self._actions] return result @property def description(self): return self._description or self._uuid def check_conditions(self, node_info, data): """Check if conditions are true for a given node. :param node_info: a NodeInfo object :param data: introspection data :returns: True if conditions match, otherwise False """ LOG.debug('Checking rule "%s"', self.description, node_info=node_info, data=data) ext_mgr = plugins_base.rule_conditions_manager() for cond in self._conditions: scheme, path = _parse_path(cond.field) if scheme == 'node': source_data = node_info.node().to_dict() elif scheme == 'data': source_data = data field_values = jsonpath.parse(path).find(source_data) field_values = [x.value for x in field_values] cond_ext = ext_mgr[cond.op].obj if not field_values: if cond_ext.ALLOW_NONE: LOG.debug('Field with JSON path %s was not found in data', cond.field, node_info=node_info, data=data) field_values = [None] else: LOG.info('Field with JSON path %(path)s was not found ' 'in data, rule "%(rule)s" will not ' 'be applied', {'path': cond.field, 'rule': self.description}, node_info=node_info, data=data) return False for value in field_values: result = cond_ext.check(node_info, value, cond.params) if cond.invert: result = not result if (cond.multiple == 'first' or (cond.multiple == 'all' and not result) or (cond.multiple == 'any' and result)): break if not result: LOG.info('Rule "%(rule)s" will not be applied: condition ' '%(field)s %(op)s %(params)s failed', {'rule': self.description, 'field': cond.field, 'op': cond.op, 'params': cond.params}, node_info=node_info, data=data) return False LOG.info('Rule "%s" will be applied', self.description, node_info=node_info, data=data) return True def apply_actions(self, node_info, data=None): """Run actions on a node. :param node_info: NodeInfo instance :param data: introspection data """ LOG.debug('Running actions for rule "%s"', self.description, node_info=node_info, data=data) ext_mgr = plugins_base.rule_actions_manager() for act in self._actions: ext = ext_mgr[act.action].obj for formatted_param in ext.FORMATTED_PARAMS: value = act.params.get(formatted_param) if not value or not isinstance(value, six.string_types): continue # NOTE(aarefiev): verify provided value with introspection # data format specifications. # TODO(aarefiev): simple verify on import rule time. try: act.params[formatted_param] = value.format(data=data) except KeyError as e: raise utils.Error(_('Invalid formatting variable key ' 'provided: %s') % e, node_info=node_info, data=data) LOG.debug('Running action `%(action)s %(params)s`', {'action': act.action, 'params': act.params}, node_info=node_info, data=data) ext.apply(node_info, act.params) LOG.debug('Successfully applied actions', node_info=node_info, data=data) def _parse_path(path): """Parse path, extract scheme and path. Parse path with 'node' and 'data' scheme, which links on introspection data and node info respectively. If scheme is missing in path, default is 'data'. :param path: data or node path :return: tuple (scheme, path) """ try: index = path.index('://') except ValueError: scheme = 'data' path = path else: scheme = path[:index] path = path[index + 3:] return scheme, path def create(conditions_json, actions_json, uuid=None, description=None): """Create a new rule in database. :param conditions_json: list of dicts with the following keys: * op - operator * field - JSON path to field to compare Other keys are stored as is. :param actions_json: list of dicts with the following keys: * action - action type Other keys are stored as is. :param uuid: rule UUID, will be generated if empty :param description: human-readable rule description :returns: new IntrospectionRule object :raises: utils.Error on failure """ uuid = uuid or uuidutils.generate_uuid() LOG.debug('Creating rule %(uuid)s with description "%(descr)s", ' 'conditions %(conditions)s and actions %(actions)s', {'uuid': uuid, 'descr': description, 'conditions': conditions_json, 'actions': actions_json}) try: jsonschema.validate(conditions_json, conditions_schema()) except jsonschema.ValidationError as exc: raise utils.Error(_('Validation failed for conditions: %s') % exc) try: jsonschema.validate(actions_json, actions_schema()) except jsonschema.ValidationError as exc: raise utils.Error(_('Validation failed for actions: %s') % exc) cond_mgr = plugins_base.rule_conditions_manager() act_mgr = plugins_base.rule_actions_manager() conditions = [] reserved_params = {'op', 'field', 'multiple', 'invert'} for cond_json in conditions_json: field = cond_json['field'] scheme, path = _parse_path(field) if scheme not in ('node', 'data'): raise utils.Error(_('Unsupported scheme for field: %s, valid ' 'values are node:// or data://') % scheme) # verify field as JSON path try: jsonpath.parse(path) except Exception as exc: raise utils.Error(_('Unable to parse field JSON path %(field)s: ' '%(error)s') % {'field': field, 'error': exc}) plugin = cond_mgr[cond_json['op']].obj params = {k: v for k, v in cond_json.items() if k not in reserved_params} try: plugin.validate(params) except ValueError as exc: raise utils.Error(_('Invalid parameters for operator %(op)s: ' '%(error)s') % {'op': cond_json['op'], 'error': exc}) conditions.append((cond_json['field'], cond_json['op'], cond_json.get('multiple', 'any'), cond_json.get('invert', False), params)) actions = [] for action_json in actions_json: plugin = act_mgr[action_json['action']].obj params = {k: v for k, v in action_json.items() if k != 'action'} try: plugin.validate(params) except ValueError as exc: raise utils.Error(_('Invalid parameters for action %(act)s: ' '%(error)s') % {'act': action_json['action'], 'error': exc}) actions.append((action_json['action'], params)) try: with db.ensure_transaction() as session: rule = db.Rule(uuid=uuid, description=description, disabled=False, created_at=timeutils.utcnow()) for field, op, multiple, invert, params in conditions: rule.conditions.append(db.RuleCondition(op=op, field=field, multiple=multiple, invert=invert, params=params)) for action, params in actions: rule.actions.append(db.RuleAction(action=action, params=params)) rule.save(session) except db_exc.DBDuplicateEntry as exc: LOG.error('Database integrity error %s when ' 'creating a rule', exc) raise utils.Error(_('Rule with UUID %s already exists') % uuid, code=409) LOG.info('Created rule %(uuid)s with description "%(descr)s"', {'uuid': uuid, 'descr': description}) return IntrospectionRule(uuid=uuid, conditions=rule.conditions, actions=rule.actions, description=description) def get(uuid): """Get a rule by its UUID.""" try: rule = db.model_query(db.Rule).filter_by(uuid=uuid).one() except orm.exc.NoResultFound: raise utils.Error(_('Rule %s was not found') % uuid, code=404) return IntrospectionRule(uuid=rule.uuid, actions=rule.actions, conditions=rule.conditions, description=rule.description) def get_all(): """List all rules.""" query = db.model_query(db.Rule).order_by(db.Rule.created_at) return [IntrospectionRule(uuid=rule.uuid, actions=rule.actions, conditions=rule.conditions, description=rule.description) for rule in query] def delete(uuid): """Delete a rule by its UUID.""" with db.ensure_transaction() as session: db.model_query(db.RuleAction, session=session).filter_by(rule=uuid).delete() db.model_query(db.RuleCondition, session=session) .filter_by(rule=uuid).delete() count = (db.model_query(db.Rule, session=session) .filter_by(uuid=uuid).delete()) if not count: raise utils.Error(_('Rule %s was not found') % uuid, code=404) LOG.info('Introspection rule %s was deleted', uuid) def delete_all(): """Delete all rules.""" with db.ensure_transaction() as session: db.model_query(db.RuleAction, session=session).delete() db.model_query(db.RuleCondition, session=session).delete() db.model_query(db.Rule, session=session).delete() LOG.info('All introspection rules were deleted') def apply(node_info, data): """Apply rules to a node.""" rules = get_all() if not rules: LOG.debug('No custom introspection rules to apply', node_info=node_info, data=data) return LOG.debug('Applying custom introspection rules', node_info=node_info, data=data) to_apply = [] for rule in rules: if rule.check_conditions(node_info, data): to_apply.append(rule) if to_apply: LOG.debug('Running actions', node_info=node_info, data=data) for rule in to_apply: rule.apply_actions(node_info, data=data) else: LOG.debug('No actions to apply', node_info=node_info, data=data) LOG.info('Successfully applied custom introspection rules', node_info=node_info, data=data) ironic-inspector-7.2.0/ironic_inspector/conf/0000775000175100017510000000000013241324014021323 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/conf/discovery.py0000666000175100017510000000166413241323457023727 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ _OPTS = [ cfg.StrOpt('enroll_node_driver', default='fake', help=_('The name of the Ironic driver used by the enroll ' 'hook when creating a new node in Ironic.')), ] def register_opts(conf): conf.register_opts(_OPTS, 'discovery') def list_opts(): return _OPTS ironic-inspector-7.2.0/ironic_inspector/conf/swift.py0000666000175100017510000000455113241323457023052 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ from ironic_inspector.common import keystone SWIFT_GROUP = 'swift' SERVICE_TYPE = 'object-store' _OPTS = [ cfg.IntOpt('max_retries', default=2, help=_('Maximum number of times to retry a Swift request, ' 'before failing.')), cfg.IntOpt('delete_after', default=0, help=_('Number of seconds that the Swift object will last ' 'before being deleted. (set to 0 to never delete the ' 'object).')), cfg.StrOpt('container', default='ironic-inspector', help=_('Default Swift container to use when creating ' 'objects.')), cfg.StrOpt('os_service_type', default='object-store', help=_('Swift service type.'), deprecated_for_removal=True, deprecated_reason=_('Use [swift]/service_type option ' 'to set specific service type')), cfg.StrOpt('os_endpoint_type', default='internalURL', help=_('Swift endpoint type.'), deprecated_for_removal=True, deprecated_reason=_('Use [swift]/valid_interfaces option ' 'to specify endpoint interfaces.')), cfg.StrOpt('os_region', help=_('Keystone region to get endpoint for.'), deprecated_for_removal=True, deprecated_reason=_("Use [swift]/region_name option to " "configure region.")) ] def register_opts(conf): conf.register_opts(_OPTS, SWIFT_GROUP) keystone.register_auth_opts(SWIFT_GROUP, SERVICE_TYPE) def list_opts(): return keystone.add_auth_options(_OPTS, SERVICE_TYPE) ironic-inspector-7.2.0/ironic_inspector/conf/processing.py0000666000175100017510000001243713241323457024074 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ VALID_ADD_PORTS_VALUES = ('all', 'active', 'pxe', 'disabled') VALID_KEEP_PORTS_VALUES = ('all', 'present', 'added') VALID_STORE_DATA_VALUES = ('none', 'swift') _OPTS = [ cfg.StrOpt('add_ports', default='pxe', help=_('Which MAC addresses to add as ports during ' 'introspection. Possible values: all ' '(all MAC addresses), active (MAC addresses of NIC with ' 'IP addresses), pxe (only MAC address of NIC node PXE ' 'booted from, falls back to "active" if PXE MAC is not ' 'supplied by the ramdisk).'), choices=VALID_ADD_PORTS_VALUES), cfg.StrOpt('keep_ports', default='all', help=_('Which ports (already present on a node) to keep after ' 'introspection. Possible values: all (do not delete ' 'anything), present (keep ports which MACs were present ' 'in introspection data), added (keep only MACs that we ' 'added during introspection).'), choices=VALID_KEEP_PORTS_VALUES), cfg.BoolOpt('overwrite_existing', default=True, help=_('Whether to overwrite existing values in node ' 'database. Disable this option to make ' 'introspection a non-destructive operation.')), cfg.StrOpt('default_processing_hooks', default='ramdisk_error,root_disk_selection,scheduler,' 'validate_interfaces,capabilities,pci_devices', help=_('Comma-separated list of default hooks for processing ' 'pipeline. Hook \'scheduler\' updates the node with the ' 'minimum properties required by the Nova scheduler. ' 'Hook \'validate_interfaces\' ensures that valid NIC ' 'data was provided by the ramdisk. ' 'Do not exclude these two unless you really know what ' 'you\'re doing.')), cfg.StrOpt('processing_hooks', default='$default_processing_hooks', help=_('Comma-separated list of enabled hooks for processing ' 'pipeline. The default for this is ' '$default_processing_hooks, hooks can be added before ' 'or after the defaults like this: ' '"prehook,$default_processing_hooks,posthook".')), cfg.StrOpt('ramdisk_logs_dir', help=_('If set, logs from ramdisk will be stored in this ' 'directory.')), cfg.BoolOpt('always_store_ramdisk_logs', default=False, help=_('Whether to store ramdisk logs even if it did not ' 'return an error message (dependent upon ' '"ramdisk_logs_dir" option being set).')), cfg.StrOpt('node_not_found_hook', help=_('The name of the hook to run when inspector receives ' 'inspection information from a node it isn\'t already ' 'aware of. This hook is ignored by default.')), cfg.StrOpt('store_data', default='none', choices=VALID_STORE_DATA_VALUES, help=_('Method for storing introspection data. If set to \'none' '\', introspection data will not be stored.')), cfg.StrOpt('store_data_location', help=_('Name of the key to store the location of stored data ' 'in the extra column of the Ironic database.')), cfg.BoolOpt('disk_partitioning_spacing', default=True, help=_('Whether to leave 1 GiB of disk size untouched for ' 'partitioning. Only has effect when used with the IPA ' 'as a ramdisk, for older ramdisk local_gb is ' 'calculated on the ramdisk side.')), cfg.StrOpt('ramdisk_logs_filename_format', default='{uuid}_{dt:%Y%m%d-%H%M%S.%f}.tar.gz', help=_('File name template for storing ramdisk logs. The ' 'following replacements can be used: ' '{uuid} - node UUID or "unknown", ' '{bmc} - node BMC address or "unknown", ' '{dt} - current UTC date and time, ' '{mac} - PXE booting MAC or "unknown".')), cfg.BoolOpt('power_off', default=True, help=_('Whether to power off a node after introspection.')), ] def register_opts(conf): conf.register_opts(_OPTS, 'processing') def list_opts(): return _OPTS ironic-inspector-7.2.0/ironic_inspector/conf/__init__.py0000666000175100017510000000257113241323457023455 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.from oslo_config import cfg from oslo_config import cfg from ironic_inspector.conf import capabilities from ironic_inspector.conf import default from ironic_inspector.conf import discovery from ironic_inspector.conf import dnsmasq_pxe_filter from ironic_inspector.conf import iptables from ironic_inspector.conf import ironic from ironic_inspector.conf import pci_devices from ironic_inspector.conf import processing from ironic_inspector.conf import pxe_filter from ironic_inspector.conf import swift CONF = cfg.CONF capabilities.register_opts(CONF) discovery.register_opts(CONF) default.register_opts(CONF) dnsmasq_pxe_filter.register_opts(CONF) iptables.register_opts(CONF) ironic.register_opts(CONF) pci_devices.register_opts(CONF) processing.register_opts(CONF) pxe_filter.register_opts(CONF) swift.register_opts(CONF) ironic-inspector-7.2.0/ironic_inspector/conf/iptables.py0000666000175100017510000000473213241323457023522 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ _OPTS = [ cfg.BoolOpt('manage_firewall', default=True, # NOTE(milan) this filter driver will be replaced by # a dnsmasq filter driver deprecated_for_removal=True, deprecated_group='firewall', help=_('Whether to manage firewall rules for PXE port. ' 'This configuration option was deprecated in favor of ' 'the ``driver`` option in the ``pxe_filter`` section. ' 'Please, use the ``noop`` filter driver to disable the ' 'firewall filtering or the ``iptables`` filter driver ' 'to enable it.')), cfg.StrOpt('dnsmasq_interface', default='br-ctlplane', deprecated_group='firewall', help=_('Interface on which dnsmasq listens, the default is for ' 'VM\'s.')), cfg.StrOpt('firewall_chain', default='ironic-inspector', deprecated_group='firewall', help=_('iptables chain name to use.')), cfg.ListOpt('ethoib_interfaces', deprecated_group='firewall', default=[], help=_('List of Etherent Over InfiniBand interfaces ' 'on the Inspector host which are used for physical ' 'access to the DHCP network. Multiple interfaces would ' 'be attached to a bond or bridge specified in ' 'dnsmasq_interface. The MACs of the InfiniBand nodes ' 'which are not in desired state are going to be ' 'blacklisted based on the list of neighbor MACs ' 'on these interfaces.')), ] def register_opts(conf): conf.register_opts(_OPTS, 'iptables') def list_opts(): return _OPTS ironic-inspector-7.2.0/ironic_inspector/conf/opts.py0000666000175100017510000000574213241323457022706 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log from oslo_middleware import cors import ironic_inspector.conf from ironic_inspector import version MIN_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Minimum-Version' MAX_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Maximum-Version' VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Version' def set_config_defaults(): """Return a list of oslo.config options available in Inspector code.""" log.set_defaults(default_log_levels=['sqlalchemy=WARNING', 'iso8601=WARNING', 'requests=WARNING', 'urllib3.connectionpool=WARNING', 'keystonemiddleware=WARNING', 'swiftclient=WARNING', 'keystoneauth=WARNING', 'ironicclient=WARNING']) set_cors_middleware_defaults() def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults( cors.CORS_OPTS, allow_headers=['X-Auth-Token', MIN_VERSION_HEADER, MAX_VERSION_HEADER, VERSION_HEADER], allow_methods=['GET', 'POST', 'PUT', 'HEAD', 'PATCH', 'DELETE', 'OPTIONS'] ) def parse_args(args, default_config_files=None): cfg.CONF(args, project='ironic-inspector', version=version.version_info.release_string(), default_config_files=default_config_files) def list_opts(): return [ ('capabilities', ironic_inspector.conf.capabilities.list_opts()), ('DEFAULT', ironic_inspector.conf.default.list_opts()), ('discovery', ironic_inspector.conf.discovery.list_opts()), ('dnsmasq_pxe_filter', ironic_inspector.conf.dnsmasq_pxe_filter.list_opts()), ('swift', ironic_inspector.conf.swift.list_opts()), ('ironic', ironic_inspector.conf.ironic.list_opts()), ('iptables', ironic_inspector.conf.iptables.list_opts()), ('processing', ironic_inspector.conf.processing.list_opts()), ('pci_devices', ironic_inspector.conf.pci_devices.list_opts()), ('pxe_filter', ironic_inspector.conf.pxe_filter.list_opts()), ] ironic-inspector-7.2.0/ironic_inspector/conf/default.py0000666000175100017510000000631213241323457023337 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ _OPTS = [ cfg.StrOpt('listen_address', default='0.0.0.0', help=_('IP to listen on.')), cfg.PortOpt('listen_port', default=5050, help=_('Port to listen on.')), cfg.StrOpt('auth_strategy', default='keystone', choices=('keystone', 'noauth'), help=_('Authentication method used on the ironic-inspector ' 'API. Either "noauth" or "keystone" are currently valid ' 'options. "noauth" will disable all authentication.')), cfg.IntOpt('timeout', default=3600, help=_('Timeout after which introspection is considered ' 'failed, set to 0 to disable.')), cfg.IntOpt('node_status_keep_time', default=0, help=_('For how much time (in seconds) to keep status ' 'information about nodes after introspection was ' 'finished for them. Set to 0 (the default) ' 'to disable the timeout.'), deprecated_for_removal=True), cfg.IntOpt('clean_up_period', default=60, help=_('Amount of time in seconds, after which repeat clean up ' 'of timed out nodes and old nodes status information.')), cfg.BoolOpt('use_ssl', default=False, help=_('SSL Enabled/Disabled')), cfg.StrOpt('ssl_cert_path', default='', help=_('Path to SSL certificate')), cfg.StrOpt('ssl_key_path', default='', help=_('Path to SSL key')), cfg.IntOpt('max_concurrency', default=1000, min=2, help=_('The green thread pool size.')), cfg.IntOpt('introspection_delay', default=5, help=_('Delay (in seconds) between two introspections.')), cfg.ListOpt('ipmi_address_fields', default=['ilo_address', 'drac_host', 'drac_address', 'cimc_address'], help=_('Ironic driver_info fields that are equivalent ' 'to ipmi_address.')), cfg.StrOpt('rootwrap_config', default="/etc/ironic-inspector/rootwrap.conf", help=_('Path to the rootwrap configuration file to use for ' 'running commands as root')), cfg.IntOpt('api_max_limit', default=1000, min=1, help=_('Limit the number of elements an API list-call returns')) ] def register_opts(conf): conf.register_opts(_OPTS) def list_opts(): return _OPTS ironic-inspector-7.2.0/ironic_inspector/conf/pci_devices.py0000666000175100017510000000207013241323457024165 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ _OPTS = [ cfg.MultiStrOpt('alias', default=[], help=_('An alias for PCI device identified by ' '\'vendor_id\' and \'product_id\' fields. Format: ' '{"vendor_id": "1234", "product_id": "5678", ' '"name": "pci_dev1"}')), ] def register_opts(conf): conf.register_opts(_OPTS, group='pci_devices') def list_opts(): return _OPTS ironic-inspector-7.2.0/ironic_inspector/conf/pxe_filter.py0000666000175100017510000000216213241323457024053 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ _OPTS = [ cfg.StrOpt('driver', default='iptables', help=_('PXE boot filter driver to use, such as iptables')), cfg.IntOpt('sync_period', default=15, min=0, deprecated_name='firewall_update_period', deprecated_group='firewall', help=_('Amount of time in seconds, after which repeat periodic ' 'update of the filter.')), ] def register_opts(conf): conf.register_opts(_OPTS, 'pxe_filter') def list_opts(): return _OPTS ironic-inspector-7.2.0/ironic_inspector/conf/dnsmasq_pxe_filter.py0000666000175100017510000000370713241323457025607 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ _OPTS = [ cfg.StrOpt('dhcp_hostsdir', default='/var/lib/ironic-inspector/dhcp-hostsdir', help=_('The MAC address cache directory, exposed to dnsmasq.' 'This directory is expected to be in exclusive control ' 'of the driver.')), cfg.BoolOpt('purge_dhcp_hostsdir', default=True, help=_('Purge the hostsdir upon driver initialization. ' 'Setting to false should only be performed when the ' 'deployment of inspector is such that there are ' 'multiple processes executing inside of the same host ' 'and namespace. In this case, the Operator is ' 'responsible for setting up a custom cleaning ' 'facility.')), cfg.StrOpt('dnsmasq_start_command', default='', help=_('A (shell) command line to start the dnsmasq service ' 'upon filter initialization. Default: don\'t start.')), cfg.StrOpt('dnsmasq_stop_command', default='', help=_('A (shell) command line to stop the dnsmasq service ' 'upon inspector (error) exit. Default: don\'t stop.')), ] def register_opts(conf): conf.register_opts(_OPTS, 'dnsmasq_pxe_filter') def list_opts(): return _OPTS ironic-inspector-7.2.0/ironic_inspector/conf/ironic.py0000666000175100017510000000623713241323457023204 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ from ironic_inspector.common import keystone IRONIC_GROUP = 'ironic' SERVICE_TYPE = 'baremetal' _OPTS = [ cfg.StrOpt('os_region', help=_('Keystone region used to get Ironic endpoints.'), deprecated_for_removal=True, deprecated_reason=_("Use [ironic]/region_name option instead " "to configure region.")), cfg.StrOpt('auth_strategy', default='keystone', choices=('keystone', 'noauth'), help=_('Method to use for authentication: noauth or ' 'keystone.'), deprecated_for_removal=True, deprecated_reason=_("Use [ironic]/auth_type, for noauth case " "set [ironic]/auth_type to `none` and " "specify ironic API URL via " "[ironic]/endpoint_override option.")), cfg.StrOpt('ironic_url', default='http://localhost:6385/', help=_('Ironic API URL, used to set Ironic API URL when ' 'auth_strategy option is noauth or auth_type is "none" ' 'to work with standalone Ironic without keystone.'), deprecated_for_removal=True, deprecated_reason=_('Use [ironic]/endpoint_override option ' 'to set a specific ironic API url.')), cfg.StrOpt('os_service_type', default='baremetal', help=_('Ironic service type.'), deprecated_for_removal=True, deprecated_reason=_('Use [ironic]/service_type option ' 'to set a specific type.')), cfg.StrOpt('os_endpoint_type', default='internalURL', help=_('Ironic endpoint type.'), deprecated_for_removal=True, deprecated_reason=_('Use [ironic]/valid_interfaces option ' 'to specify endpoint interfaces.')), cfg.IntOpt('retry_interval', default=2, help=_('Interval between retries in case of conflict error ' '(HTTP 409).')), cfg.IntOpt('max_retries', default=30, help=_('Maximum number of retries in case of conflict error ' '(HTTP 409).')), ] def register_opts(conf): conf.register_opts(_OPTS, IRONIC_GROUP) keystone.register_auth_opts(IRONIC_GROUP, SERVICE_TYPE) def list_opts(): return keystone.add_auth_options(_OPTS, SERVICE_TYPE) ironic-inspector-7.2.0/ironic_inspector/conf/capabilities.py0000666000175100017510000000236513241323457024350 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from ironic_inspector.common.i18n import _ DEFAULT_CPU_FLAGS_MAPPING = { 'vmx': 'cpu_vt', 'svm': 'cpu_vt', 'aes': 'cpu_aes', 'pse': 'cpu_hugepages', 'pdpe1gb': 'cpu_hugepages_1g', 'smx': 'cpu_txt', } _OPTS = [ cfg.BoolOpt('boot_mode', default=False, help=_('Whether to store the boot mode (BIOS or UEFI).')), cfg.DictOpt('cpu_flags', default=DEFAULT_CPU_FLAGS_MAPPING, help=_('Mapping between a CPU flag and a capability to set ' 'if this flag is present.')), ] def register_opts(conf): conf.register_opts(_OPTS, 'capabilities') def list_opts(): return _OPTS ironic-inspector-7.2.0/ironic_inspector/test/0000775000175100017510000000000013241324014021355 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/test/__init__.py0000666000175100017510000000000013241323457023470 0ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/test/unit/0000775000175100017510000000000013241324014022334 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_rules.py0000666000175100017510000002104413241323457026655 0ustar zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for introspection rules plugins.""" import mock from ironic_inspector.common import ironic as ir_utils from ironic_inspector import node_cache from ironic_inspector.plugins import rules as rules_plugins from ironic_inspector.test import base as test_base from ironic_inspector import utils TEST_SET = [(42, 42), ('42', 42), ('4.2', 4.2), (42, 41), ('42', 41), ('4.2', 4.0), (41, 42), ('41', 42), ('4.0', 4.2)] class TestSimpleConditions(test_base.BaseTest): def test_validate(self): cond = rules_plugins.SimpleCondition() cond.validate({'value': 42}) self.assertRaises(ValueError, cond.validate, {}) def _test(self, cond, expected, value, ref): self.assertIs(expected, cond.check(None, value, {'value': ref})) def test_eq(self): cond = rules_plugins.EqCondition() for values, expected in zip(TEST_SET, [True] * 3 + [False] * 6): self._test(cond, expected, *values) self._test(cond, True, 'foo', 'foo') self._test(cond, False, 'foo', 'bar') def test_ne(self): cond = rules_plugins.NeCondition() for values, expected in zip(TEST_SET, [False] * 3 + [True] * 6): self._test(cond, expected, *values) self._test(cond, False, 'foo', 'foo') self._test(cond, True, 'foo', 'bar') def test_gt(self): cond = rules_plugins.GtCondition() for values, expected in zip(TEST_SET, [False] * 3 + [True] * 3 + [False] * 3): self._test(cond, expected, *values) def test_ge(self): cond = rules_plugins.GeCondition() for values, expected in zip(TEST_SET, [True] * 6 + [False] * 3): self._test(cond, expected, *values) def test_le(self): cond = rules_plugins.LeCondition() for values, expected in zip(TEST_SET, [True] * 3 + [False] * 3 + [True] * 3): self._test(cond, expected, *values) def test_lt(self): cond = rules_plugins.LtCondition() for values, expected in zip(TEST_SET, [False] * 6 + [True] * 3): self._test(cond, expected, *values) class TestReConditions(test_base.BaseTest): def test_validate(self): for cond in (rules_plugins.MatchesCondition(), rules_plugins.ContainsCondition()): cond.validate({'value': r'[a-z]?(foo|b.r).+'}) self.assertRaises(ValueError, cond.validate, {'value': '**'}) def test_matches(self): cond = rules_plugins.MatchesCondition() for reg, field, res in [(r'.*', 'foo', True), (r'fo{1,2}', 'foo', True), (r'o{1,2}', 'foo', False), (r'[1-9]*', 42, True), (r'^(foo|bar)$', 'foo', True), (r'fo', 'foo', False)]: self.assertEqual(res, cond.check(None, field, {'value': reg})) def test_contains(self): cond = rules_plugins.ContainsCondition() for reg, field, res in [(r'.*', 'foo', True), (r'fo{1,2}', 'foo', True), (r'o{1,2}', 'foo', True), (r'[1-9]*', 42, True), (r'bar', 'foo', False)]: self.assertEqual(res, cond.check(None, field, {'value': reg})) class TestNetCondition(test_base.BaseTest): cond = rules_plugins.NetCondition() def test_validate(self): self.cond.validate({'value': '192.0.2.1/24'}) self.assertRaises(ValueError, self.cond.validate, {'value': 'foo'}) def test_check(self): self.assertTrue(self.cond.check(None, '192.0.2.4', {'value': '192.0.2.1/24'})) self.assertFalse(self.cond.check(None, '192.1.2.4', {'value': '192.0.2.1/24'})) class TestEmptyCondition(test_base.BaseTest): cond = rules_plugins.EmptyCondition() def test_check_none(self): self.assertTrue(self.cond.check(None, None, {})) self.assertFalse(self.cond.check(None, 0, {})) def test_check_empty_string(self): self.assertTrue(self.cond.check(None, '', {})) self.assertFalse(self.cond.check(None, '16', {})) def test_check_empty_list(self): self.assertTrue(self.cond.check(None, [], {})) self.assertFalse(self.cond.check(None, ['16'], {})) def test_check_empty_dict(self): self.assertTrue(self.cond.check(None, {}, {})) self.assertFalse(self.cond.check(None, {'test': '16'}, {})) class TestFailAction(test_base.BaseTest): act = rules_plugins.FailAction() def test_validate(self): self.act.validate({'message': 'boom'}) self.assertRaises(ValueError, self.act.validate, {}) def test_apply(self): self.assertRaisesRegex(utils.Error, 'boom', self.act.apply, None, {'message': 'boom'}) class TestSetAttributeAction(test_base.NodeTest): act = rules_plugins.SetAttributeAction() params = {'path': '/extra/value', 'value': 42} def test_validate(self): self.act.validate(self.params) self.assertRaises(ValueError, self.act.validate, {'value': 42}) self.assertRaises(ValueError, self.act.validate, {'path': '/extra/value'}) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_apply(self, mock_patch): self.act.apply(self.node_info, self.params) mock_patch.assert_called_once_with([{'op': 'add', 'path': '/extra/value', 'value': 42}]) @mock.patch('ironic_inspector.common.ironic.get_client', new=mock.Mock()) class TestSetCapabilityAction(test_base.NodeTest): act = rules_plugins.SetCapabilityAction() params = {'name': 'cap1', 'value': 'val'} def test_validate(self): self.act.validate(self.params) self.assertRaises(ValueError, self.act.validate, {'value': 42}) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_apply(self, mock_patch): self.act.apply(self.node_info, self.params) mock_patch.assert_called_once_with( [{'op': 'add', 'path': '/properties/capabilities', 'value': 'cap1:val'}], mock.ANY) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_apply_with_existing(self, mock_patch): self.node.properties['capabilities'] = 'x:y,cap1:old_val,answer:42' self.act.apply(self.node_info, self.params) patch = mock_patch.call_args[0][0] new_caps = ir_utils.capabilities_to_dict(patch[0]['value']) self.assertEqual({'cap1': 'val', 'x': 'y', 'answer': '42'}, new_caps) @mock.patch('ironic_inspector.common.ironic.get_client', new=mock.Mock()) class TestExtendAttributeAction(test_base.NodeTest): act = rules_plugins.ExtendAttributeAction() params = {'path': '/extra/value', 'value': 42} def test_validate(self): self.act.validate(self.params) self.assertRaises(ValueError, self.act.validate, {'value': 42}) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_apply(self, mock_patch): self.act.apply(self.node_info, self.params) mock_patch.assert_called_once_with( [{'op': 'add', 'path': '/extra/value', 'value': [42]}], mock.ANY) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_apply_non_empty(self, mock_patch): self.node.extra['value'] = [0] self.act.apply(self.node_info, self.params) mock_patch.assert_called_once_with( [{'op': 'replace', 'path': '/extra/value', 'value': [0, 42]}], mock.ANY) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_apply_unique_with_existing(self, mock_patch): params = dict(unique=True, **self.params) self.node.extra['value'] = [42] self.act.apply(self.node_info, params) self.assertFalse(mock_patch.called) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_discovery.py0000666000175100017510000001302513241323457027532 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from ironic_inspector.common import ironic as ir_utils from ironic_inspector import node_cache from ironic_inspector.plugins import discovery from ironic_inspector.test import base as test_base from ironic_inspector import utils def copy_call_args(mock_arg): new_mock = mock.Mock() def side_effect(*args, **kwargs): args = copy.deepcopy(args) kwargs = copy.deepcopy(kwargs) new_mock(*args, **kwargs) return mock.DEFAULT mock_arg.side_effect = side_effect return new_mock class TestEnrollNodeNotFoundHook(test_base.NodeTest): def setUp(self): super(TestEnrollNodeNotFoundHook, self).setUp() self.ironic = mock.MagicMock() @mock.patch.object(node_cache, 'create_node', autospec=True) @mock.patch.object(ir_utils, 'get_client', autospec=True) @mock.patch.object(discovery, '_check_existing_nodes', autospec=True) def test_enroll_default(self, mock_check_existing, mock_client, mock_create_node): mock_client.return_value = self.ironic introspection_data = {'test': 'test'} discovery.enroll_node_not_found_hook(introspection_data) mock_create_node.assert_called_once_with('fake', ironic=self.ironic, driver_info={}) mock_check_existing.assert_called_once_with( introspection_data, {}, self.ironic) @mock.patch.object(node_cache, 'create_node', autospec=True) @mock.patch.object(ir_utils, 'get_client', autospec=True) @mock.patch.object(discovery, '_check_existing_nodes', autospec=True) def test_enroll_with_ipmi_address(self, mock_check_existing, mock_client, mock_create_node): mock_client.return_value = self.ironic introspection_data = {'ipmi_address': '1.2.3.4'} expected_data = introspection_data.copy() mock_check_existing = copy_call_args(mock_check_existing) discovery.enroll_node_not_found_hook(introspection_data) mock_create_node.assert_called_once_with( 'fake', ironic=self.ironic, driver_info={'ipmi_address': '1.2.3.4'}) mock_check_existing.assert_called_once_with( expected_data, {'ipmi_address': '1.2.3.4'}, self.ironic) self.assertEqual({'ipmi_address': '1.2.3.4', 'auto_discovered': True}, introspection_data) @mock.patch.object(node_cache, 'create_node', autospec=True) @mock.patch.object(ir_utils, 'get_client', autospec=True) @mock.patch.object(discovery, '_check_existing_nodes', autospec=True) def test_enroll_with_non_default_driver(self, mock_check_existing, mock_client, mock_create_node): mock_client.return_value = self.ironic discovery.CONF.set_override('enroll_node_driver', 'fake2', 'discovery') mock_check_existing = copy_call_args(mock_check_existing) introspection_data = {} discovery.enroll_node_not_found_hook(introspection_data) mock_create_node.assert_called_once_with('fake2', ironic=self.ironic, driver_info={}) mock_check_existing.assert_called_once_with( {}, {}, self.ironic) self.assertEqual({'auto_discovered': True}, introspection_data) def test__check_existing_nodes_new_mac(self): self.ironic.port.list.return_value = [] introspection_data = {'macs': self.macs} node_driver_info = {} discovery._check_existing_nodes( introspection_data, node_driver_info, self.ironic) def test__check_existing_nodes_existing_mac(self): self.ironic.port.list.return_value = [mock.MagicMock( address=self.macs[0], uuid='fake_port')] introspection_data = { 'all_interfaces': {'eth%d' % i: {'mac': m} for i, m in enumerate(self.macs)} } node_driver_info = {} self.assertRaises(utils.Error, discovery._check_existing_nodes, introspection_data, node_driver_info, self.ironic) def test__check_existing_nodes_new_node(self): self.ironic.node.list.return_value = [mock.MagicMock( driver_info={'ipmi_address': '1.2.4.3'}, uuid='fake_node')] introspection_data = {} node_driver_info = {'ipmi_address': self.bmc_address} discovery._check_existing_nodes(introspection_data, node_driver_info, self.ironic) def test__check_existing_nodes_existing_node(self): self.ironic.node.list.return_value = [mock.MagicMock( driver_info={'ipmi_address': self.bmc_address}, uuid='fake_node')] introspection_data = {} node_driver_info = {'ipmi_address': self.bmc_address} self.assertRaises(utils.Error, discovery._check_existing_nodes, introspection_data, node_driver_info, self.ironic) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_db.py0000666000175100017510000000575213241323457024357 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from ironic_inspector import db from ironic_inspector.test import base as test_base class TestDB(test_base.NodeTest): @mock.patch.object(db, 'get_reader_session', autospec=True) def test_model_query(self, mock_reader): mock_session = mock_reader.return_value fake_query = mock_session.query.return_value query = db.model_query('db.Node') mock_reader.assert_called_once_with() mock_session.query.assert_called_once_with('db.Node') self.assertEqual(fake_query, query) @mock.patch.object(db, 'get_writer_session', autospec=True) def test_ensure_transaction_new_session(self, mock_writer): mock_session = mock_writer.return_value with db.ensure_transaction() as session: mock_writer.assert_called_once_with() mock_session.begin.assert_called_once_with(subtransactions=True) self.assertEqual(mock_session, session) @mock.patch.object(db, 'get_writer_session', autospec=True) def test_ensure_transaction_session(self, mock_writer): mock_session = mock.MagicMock() with db.ensure_transaction(session=mock_session) as session: self.assertFalse(mock_writer.called) mock_session.begin.assert_called_once_with(subtransactions=True) self.assertEqual(mock_session, session) @mock.patch.object(db.enginefacade, 'transaction_context', autospec=True) def test__create_context_manager(self, mock_cnxt): mock_ctx_mgr = mock_cnxt.return_value ctx_mgr = db._create_context_manager() mock_ctx_mgr.configure.assert_called_once_with(sqlite_fk=False) self.assertEqual(mock_ctx_mgr, ctx_mgr) @mock.patch.object(db, 'get_context_manager', autospec=True) def test_get_reader_session(self, mock_cnxt_mgr): mock_cnxt = mock_cnxt_mgr.return_value mock_sess_maker = mock_cnxt.reader.get_sessionmaker.return_value session = db.get_reader_session() mock_sess_maker.assert_called_once_with() self.assertEqual(mock_sess_maker.return_value, session) @mock.patch.object(db, 'get_context_manager', autospec=True) def test_get_writer_session(self, mock_cnxt_mgr): mock_cnxt = mock_cnxt_mgr.return_value mock_sess_maker = mock_cnxt.writer.get_sessionmaker.return_value session = db.get_writer_session() mock_sess_maker.assert_called_once_with() self.assertEqual(mock_sess_maker.return_value, session) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_capabilities.py0000666000175100017510000000565313241323457030164 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from ironic_inspector import node_cache from ironic_inspector.plugins import base from ironic_inspector.plugins import capabilities from ironic_inspector.test import base as test_base CONF = cfg.CONF @mock.patch.object(node_cache.NodeInfo, 'update_capabilities', autospec=True) class TestCapabilitiesHook(test_base.NodeTest): hook = capabilities.CapabilitiesHook() def test_loadable_by_name(self, mock_caps): base.CONF.set_override('processing_hooks', 'capabilities', 'processing') ext = base.processing_hooks_manager()['capabilities'] self.assertIsInstance(ext.obj, capabilities.CapabilitiesHook) def test_no_data(self, mock_caps): self.hook.before_update(self.data, self.node_info) self.assertFalse(mock_caps.called) def test_boot_mode(self, mock_caps): CONF.set_override('boot_mode', True, 'capabilities') self.inventory['boot'] = {'current_boot_mode': 'uefi'} self.hook.before_update(self.data, self.node_info) mock_caps.assert_called_once_with(self.node_info, boot_mode='uefi') def test_boot_mode_disabled(self, mock_caps): self.inventory['boot'] = {'current_boot_mode': 'uefi'} self.hook.before_update(self.data, self.node_info) self.assertFalse(mock_caps.called) def test_cpu_flags(self, mock_caps): self.inventory['cpu']['flags'] = ['fpu', 'vmx', 'aes', 'pse', 'smx'] self.hook.before_update(self.data, self.node_info) mock_caps.assert_called_once_with(self.node_info, cpu_vt='true', cpu_hugepages='true', cpu_txt='true', cpu_aes='true') def test_cpu_no_known_flags(self, mock_caps): self.inventory['cpu']['flags'] = ['fpu'] self.hook.before_update(self.data, self.node_info) self.assertFalse(mock_caps.called) def test_cpu_flags_custom(self, mock_caps): CONF.set_override('cpu_flags', {'fpu': 'new_cap'}, 'capabilities') self.inventory['cpu']['flags'] = ['fpu', 'vmx', 'aes', 'pse'] self.hook.before_update(self.data, self.node_info) mock_caps.assert_called_once_with(self.node_info, new_cap='true') ironic-inspector-7.2.0/ironic_inspector/test/unit/test_migrations.py0000666000175100017510000004730613241323457026147 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. There are "opportunistic" tests here, supported backends are: sqlite (used in test environment by default), mysql and postgresql, which are required properly configured unit test environment. For the opportunistic testing you need to set up a db named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. The test will then use that db and u/p combo to run the tests. """ import contextlib import datetime import alembic from alembic import script import mock from oslo_config import cfg from oslo_db.sqlalchemy.migration_cli import ext_alembic from oslo_db.sqlalchemy import orm from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_migrations from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log as logging from oslo_utils import uuidutils import sqlalchemy from ironic_inspector import db from ironic_inspector import dbsync from ironic_inspector import introspection_state as istate from ironic_inspector.test import base CONF = cfg.CONF LOG = logging.getLogger(__name__) def _get_connect_string(backend, user, passwd, database): """Get database connection Try to get a connection with a very specific set of values, if we get these then we'll run the tests, otherwise they are skipped """ if backend == "sqlite": backend = "sqlite" elif backend == "postgres": backend = "postgresql+psycopg2" elif backend == "mysql": backend = "mysql+mysqldb" else: raise Exception("Unrecognized backend: '%s'" % backend) return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % {'backend': backend, 'user': user, 'passwd': passwd, 'database': database}) def _is_backend_avail(backend, user, passwd, database): try: connect_uri = _get_connect_string(backend, user, passwd, database) engine = sqlalchemy.create_engine(connect_uri) connection = engine.connect() except Exception: # intentionally catch all to handle exceptions even if we don't # have any backend code loaded. return False else: connection.close() engine.dispose() return True @contextlib.contextmanager def patch_with_engine(engine): with mock.patch.object(db, 'get_writer_session') as patch_w_sess, \ mock.patch.object(db, 'get_reader_session') as patch_r_sess: patch_w_sess.return_value = patch_r_sess.return_value = ( orm.get_maker(engine)()) yield class WalkVersionsMixin(object): def _walk_versions(self, engine=None, alembic_cfg=None): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. with patch_with_engine(engine): script_directory = script.ScriptDirectory.from_config(alembic_cfg) self.assertIsNone(self.migration_ext.version()) versions = [ver for ver in script_directory.walk_revisions()] for version in reversed(versions): self._migrate_up(engine, alembic_cfg, version.revision, with_data=True) def _migrate_up(self, engine, config, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%s" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_ext.upgrade(version) self.assertEqual(version, self.migration_ext.version()) if with_data: check = getattr(self, "_check_%s" % version, None) if check: check(engine, data) except Exception: LOG.error("Failed to migrate to version %(version)s on engine " "%(engine)s", {'version': version, 'engine': engine}) raise class TestWalkVersions(base.BaseTest, WalkVersionsMixin): def setUp(self): super(TestWalkVersions, self).setUp() self.engine = mock.MagicMock() self.migration_ext = mock.MagicMock() self.config = mock.MagicMock() self.versions = [mock.Mock(revision='2b2'), mock.Mock(revision='1a1')] def test_migrate_up(self): self.migration_ext.version.return_value = 'dsa123' self._migrate_up(self.engine, self.config, 'dsa123') self.migration_ext.version.assert_called_with() def test_migrate_up_with_data(self): test_value = {"a": 1, "b": 2} self.migration_ext.version.return_value = '141' self._pre_upgrade_141 = mock.MagicMock() self._pre_upgrade_141.return_value = test_value self._check_141 = mock.MagicMock() self._migrate_up(self.engine, self.config, '141', True) self._pre_upgrade_141.assert_called_with(self.engine) self._check_141.assert_called_with(self.engine, test_value) @mock.patch.object(script, 'ScriptDirectory') @mock.patch.object(WalkVersionsMixin, '_migrate_up') def test_walk_versions_all_default(self, _migrate_up, script_directory): fc = script_directory.from_config() fc.walk_revisions.return_value = self.versions self.migration_ext.version.return_value = None self._walk_versions(self.engine, self.config) self.migration_ext.version.assert_called_with() upgraded = [mock.call(self.engine, self.config, v.revision, with_data=True) for v in reversed(self.versions)] self.assertEqual(self._migrate_up.call_args_list, upgraded) @mock.patch.object(script, 'ScriptDirectory') @mock.patch.object(WalkVersionsMixin, '_migrate_up') def test_walk_versions_all_false(self, _migrate_up, script_directory): fc = script_directory.from_config() fc.walk_revisions.return_value = self.versions self.migration_ext.version.return_value = None self._walk_versions(self.engine, self.config) upgraded = [mock.call(self.engine, self.config, v.revision, with_data=True) for v in reversed(self.versions)] self.assertEqual(upgraded, self._migrate_up.call_args_list) class MigrationCheckersMixin(object): def setUp(self): super(MigrationCheckersMixin, self).setUp() self.config = dbsync._get_alembic_config() self.config.ironic_inspector_config = CONF # create AlembicExtension with fake config and replace # with real one. self.migration_ext = ext_alembic.AlembicExtension( self.engine, {'alembic_ini_path': ''}) self.migration_ext.config = self.config def test_walk_versions(self): self._walk_versions(self.engine, self.config) def test_connect_fail(self): """Test that we can trigger a database connection failure Test that we can fail gracefully to ensure we don't break people without specific database backend """ if _is_backend_avail(self.FIXTURE.DRIVER, "openstack_cifail", self.FIXTURE.USERNAME, self.FIXTURE.DBNAME): self.fail("Shouldn't have connected") def _check_578f84f38d(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('uuid', col_names) self.assertIsInstance(nodes.c.uuid.type, sqlalchemy.types.String) self.assertIn('started_at', col_names) self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) self.assertIn('finished_at', col_names) self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) self.assertIn('error', col_names) self.assertIsInstance(nodes.c.error.type, sqlalchemy.types.Text) attributes = db_utils.get_table(engine, 'attributes') col_names = [column.name for column in attributes.c] self.assertIn('uuid', col_names) self.assertIsInstance(attributes.c.uuid.type, sqlalchemy.types.String) self.assertIn('name', col_names) self.assertIsInstance(attributes.c.name.type, sqlalchemy.types.String) self.assertIn('value', col_names) self.assertIsInstance(attributes.c.value.type, sqlalchemy.types.String) options = db_utils.get_table(engine, 'options') col_names = [column.name for column in options.c] self.assertIn('uuid', col_names) self.assertIsInstance(options.c.uuid.type, sqlalchemy.types.String) self.assertIn('name', col_names) self.assertIsInstance(options.c.name.type, sqlalchemy.types.String) self.assertIn('value', col_names) self.assertIsInstance(options.c.value.type, sqlalchemy.types.Text) def _check_d588418040d(self, engine, data): rules = db_utils.get_table(engine, 'rules') col_names = [column.name for column in rules.c] self.assertIn('uuid', col_names) self.assertIsInstance(rules.c.uuid.type, sqlalchemy.types.String) self.assertIn('created_at', col_names) self.assertIsInstance(rules.c.created_at.type, sqlalchemy.types.DateTime) self.assertIn('description', col_names) self.assertIsInstance(rules.c.description.type, sqlalchemy.types.Text) self.assertIn('disabled', col_names) # in some backends bool type is integer self.assertIsInstance(rules.c.disabled.type, (sqlalchemy.types.Boolean, sqlalchemy.types.Integer)) conditions = db_utils.get_table(engine, 'rule_conditions') col_names = [column.name for column in conditions.c] self.assertIn('id', col_names) self.assertIsInstance(conditions.c.id.type, sqlalchemy.types.Integer) self.assertIn('rule', col_names) self.assertIsInstance(conditions.c.rule.type, sqlalchemy.types.String) self.assertIn('op', col_names) self.assertIsInstance(conditions.c.op.type, sqlalchemy.types.String) self.assertIn('multiple', col_names) self.assertIsInstance(conditions.c.multiple.type, sqlalchemy.types.String) self.assertIn('field', col_names) self.assertIsInstance(conditions.c.field.type, sqlalchemy.types.Text) self.assertIn('params', col_names) self.assertIsInstance(conditions.c.params.type, sqlalchemy.types.Text) actions = db_utils.get_table(engine, 'rule_actions') col_names = [column.name for column in actions.c] self.assertIn('id', col_names) self.assertIsInstance(actions.c.id.type, sqlalchemy.types.Integer) self.assertIn('rule', col_names) self.assertIsInstance(actions.c.rule.type, sqlalchemy.types.String) self.assertIn('action', col_names) self.assertIsInstance(actions.c.action.type, sqlalchemy.types.String) self.assertIn('params', col_names) self.assertIsInstance(actions.c.params.type, sqlalchemy.types.Text) def _check_e169a4a81d88(self, engine, data): rule_conditions = db_utils.get_table(engine, 'rule_conditions') # set invert with default value - False data = {'id': 1, 'op': 'eq', 'multiple': 'all'} rule_conditions.insert().execute(data) conds = rule_conditions.select( rule_conditions.c.id == 1).execute().first() self.assertFalse(conds['invert']) # set invert with - True data = {'id': 2, 'op': 'eq', 'multiple': 'all', 'invert': True} rule_conditions.insert().execute(data) conds = rule_conditions.select( rule_conditions.c.id == 2).execute().first() self.assertTrue(conds['invert']) def _pre_upgrade_d2e48801c8ef(self, engine): ok_node_id = uuidutils.generate_uuid() err_node_id = uuidutils.generate_uuid() data = [ { 'uuid': ok_node_id, 'error': None, 'finished_at': 0.0, 'started_at': 0.0 }, { 'uuid': err_node_id, 'error': 'Oops!', 'finished_at': 0.0, 'started_at': 0.0 } ] nodes = db_utils.get_table(engine, 'nodes') for node in data: nodes.insert().execute(node) return {'err_node_id': err_node_id, 'ok_node_id': ok_node_id} def _check_d2e48801c8ef(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('uuid', col_names) self.assertIsInstance(nodes.c.uuid.type, sqlalchemy.types.String) self.assertIn('version_id', col_names) self.assertIsInstance(nodes.c.version_id.type, sqlalchemy.types.String) self.assertIn('state', col_names) self.assertIsInstance(nodes.c.state.type, sqlalchemy.types.String) self.assertIn('started_at', col_names) self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) self.assertIn('finished_at', col_names) self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.Float) self.assertIn('error', col_names) self.assertIsInstance(nodes.c.error.type, sqlalchemy.types.Text) ok_node_id = data['ok_node_id'] err_node_id = data['err_node_id'] # assert the ok node is in the (default) finished state ok_node = nodes.select(nodes.c.uuid == ok_node_id).execute().first() self.assertEqual(istate.States.finished, ok_node['state']) # assert err node state is error after the migration # even though the default state is finished err_node = nodes.select(nodes.c.uuid == err_node_id).execute().first() self.assertEqual(istate.States.error, err_node['state']) def _pre_upgrade_d00d6e3f38c4(self, engine): nodes = db_utils.get_table(engine, 'nodes') data = [] for finished_at in (None, 1234.0): node = {'uuid': uuidutils.generate_uuid(), 'started_at': 1232.0, 'finished_at': finished_at, 'error': None} nodes.insert().values(node).execute() data.append(node) return data def _check_d00d6e3f38c4(self, engine, data): nodes = db_utils.get_table(engine, 'nodes') col_names = [column.name for column in nodes.c] self.assertIn('started_at', col_names) self.assertIn('finished_at', col_names) self.assertIsInstance(nodes.c.started_at.type, sqlalchemy.types.DateTime) self.assertIsInstance(nodes.c.finished_at.type, sqlalchemy.types.DateTime) for node in data: finished_at = datetime.datetime.utcfromtimestamp( node['finished_at']) if node['finished_at'] else None row = nodes.select(nodes.c.uuid == node['uuid']).execute().first() self.assertEqual( datetime.datetime.utcfromtimestamp(node['started_at']), row['started_at']) self.assertEqual( finished_at, row['finished_at']) def _pre_upgrade_882b2d84cb1b(self, engine): attributes = db_utils.get_table(engine, 'attributes') nodes = db_utils.get_table(engine, 'nodes') self.node_uuid = uuidutils.generate_uuid() node = { 'uuid': self.node_uuid, 'started_at': datetime.datetime.utcnow(), 'finished_at': None, 'error': None, 'state': istate.States.starting } nodes.insert().values(node).execute() data = { 'uuid': self.node_uuid, 'name': 'foo', 'value': 'bar' } attributes.insert().values(data).execute() def _check_882b2d84cb1b(self, engine, data): attributes = db_utils.get_table(engine, 'attributes') col_names = [column.name for column in attributes.c] self.assertIn('uuid', col_names) self.assertIsInstance(attributes.c.uuid.type, sqlalchemy.types.String) self.assertIn('node_uuid', col_names) self.assertIsInstance(attributes.c.node_uuid.type, sqlalchemy.types.String) self.assertIn('name', col_names) self.assertIsInstance(attributes.c.name.type, sqlalchemy.types.String) self.assertIn('value', col_names) self.assertIsInstance(attributes.c.value.type, sqlalchemy.types.String) row = attributes.select(attributes.c.node_uuid == self.node_uuid).execute().first() self.assertEqual(self.node_uuid, row.node_uuid) self.assertNotEqual(self.node_uuid, row.uuid) self.assertIsNotNone(row.uuid) self.assertEqual('foo', row.name) self.assertEqual('bar', row.value) def test_upgrade_and_version(self): with patch_with_engine(self.engine): self.migration_ext.upgrade('head') self.assertIsNotNone(self.migration_ext.version()) def test_upgrade_twice(self): with patch_with_engine(self.engine): self.migration_ext.upgrade('578f84f38d') v1 = self.migration_ext.version() self.migration_ext.upgrade('d588418040d') v2 = self.migration_ext.version() self.assertNotEqual(v1, v2) class TestMigrationsMySQL(MigrationCheckersMixin, WalkVersionsMixin, test_base.MySQLOpportunisticTestCase): pass class TestMigrationsPostgreSQL(MigrationCheckersMixin, WalkVersionsMixin, test_base.PostgreSQLOpportunisticTestCase): pass class TestMigrationSqlite(MigrationCheckersMixin, WalkVersionsMixin, test_base.DbTestCase): pass class ModelsMigrationSyncMixin(object): def get_metadata(self): return db.Base.metadata def get_engine(self): return self.engine def db_sync(self, engine): config = dbsync._get_alembic_config() config.ironic_inspector_config = CONF with patch_with_engine(engine): alembic.command.upgrade(config, 'head') class ModelsMigrationsSyncMysql(ModelsMigrationSyncMixin, test_migrations.ModelsMigrationsSync, test_base.MySQLOpportunisticTestCase): pass class ModelsMigrationsSyncPostgres(ModelsMigrationSyncMixin, test_migrations.ModelsMigrationsSync, test_base.PostgreSQLOpportunisticTestCase): pass class ModelsMigrationsSyncSqlite(ModelsMigrationSyncMixin, test_migrations.ModelsMigrationsSync, test_base.DbTestCase): pass ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_base.py0000666000175100017510000000653213241323457026442 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import mock from ironic_inspector.plugins import base from ironic_inspector.test import base as test_base class WithValidation(base.WithValidation): REQUIRED_PARAMS = {'x'} OPTIONAL_PARAMS = {'y', 'z'} class TestWithValidation(test_base.BaseTest): def setUp(self): super(TestWithValidation, self).setUp() self.test = WithValidation() def test_ok(self): for x in (1, 0, '', False, True): self.test.validate({'x': x}) self.test.validate({'x': 'x', 'y': 42}) self.test.validate({'x': 'x', 'y': 42, 'z': False}) def test_required_missing(self): err_re = 'missing required parameter\(s\): x' self.assertRaisesRegex(ValueError, err_re, self.test.validate, {}) self.assertRaisesRegex(ValueError, err_re, self.test.validate, {'x': None}) self.assertRaisesRegex(ValueError, err_re, self.test.validate, {'y': 1, 'z': 2}) def test_unexpected(self): self.assertRaisesRegex(ValueError, 'unexpected parameter\(s\): foo', self.test.validate, {'foo': 'bar', 'x': 42}) fake_ext = collections.namedtuple('Extension', ['name', 'obj']) @mock.patch.object(base, 'processing_hooks_manager', autospec=True) class TestValidateProcessingHooks(test_base.BaseTest): def test_ok(self, mock_mgr): mock_mgr.return_value = [ fake_ext(name='1', obj=mock.Mock(dependencies=[])), fake_ext(name='2', obj=mock.Mock(dependencies=['1'])), fake_ext(name='3', obj=mock.Mock(dependencies=['2', '1'])), ] hooks = base.validate_processing_hooks() self.assertEqual(mock_mgr.return_value, hooks) mock_mgr.assert_called_once_with() def test_broken_dependencies(self, mock_mgr): mock_mgr.return_value = [ fake_ext(name='2', obj=mock.Mock(dependencies=['1'])), fake_ext(name='3', obj=mock.Mock(dependencies=['2', '1'])), ] self.assertRaisesRegex(RuntimeError, "missing: 1", base.validate_processing_hooks) def test_self_dependency(self, mock_mgr): mock_mgr.return_value = [ fake_ext(name='1', obj=mock.Mock(dependencies=['1'])), ] self.assertRaisesRegex(RuntimeError, "missing: 1", base.validate_processing_hooks) def test_wrong_dependencies_order(self, mock_mgr): mock_mgr.return_value = [ fake_ext(name='2', obj=mock.Mock(dependencies=['1'])), fake_ext(name='1', obj=mock.Mock(dependencies=[])), fake_ext(name='3', obj=mock.Mock(dependencies=['2', '1'])), ] self.assertRaisesRegex(RuntimeError, "missing: 1", base.validate_processing_hooks) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_local_link_connection.py0000666000175100017510000002064613241323457032060 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from ironic_inspector import node_cache from ironic_inspector.plugins import local_link_connection from ironic_inspector.test import base as test_base from ironic_inspector import utils class TestGenericLocalLinkConnectionHook(test_base.NodeTest): hook = local_link_connection.GenericLocalLinkConnectionHook() def setUp(self): super(TestGenericLocalLinkConnectionHook, self).setUp() self.data = { 'inventory': { 'interfaces': [{ 'name': 'em1', 'mac_address': '11:11:11:11:11:11', 'ipv4_address': '1.1.1.1', 'lldp': [ (0, ''), (1, '04885a92ec5459'), (2, '0545746865726e6574312f3138'), (3, '0078')] }], 'cpu': 1, 'disks': 1, 'memory': 1 }, 'all_interfaces': { 'em1': {}, } } llc = { 'port_id': '56' } ports = [mock.Mock(spec=['address', 'uuid', 'local_link_connection'], address=a, local_link_connection=llc) for a in ('11:11:11:11:11:11',)] self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, node=self.node, ports=ports) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_expected_data(self, mock_patch): patches = [ {'path': '/local_link_connection/port_id', 'value': 'Ethernet1/18', 'op': 'add'}, {'path': '/local_link_connection/switch_id', 'value': '88:5a:92:ec:54:59', 'op': 'add'}, ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_invalid_chassis_id_subtype(self, mock_patch): # First byte of TLV value is processed to calculate the subtype for the # chassis ID, Subtype 5 ('05...') isn't a subtype supported by this # plugin, so we expect it to skip this TLV. self.data['inventory']['interfaces'][0]['lldp'][1] = ( 1, '05885a92ec5459') patches = [ {'path': '/local_link_connection/port_id', 'value': 'Ethernet1/18', 'op': 'add'}, ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_invalid_port_id_subtype(self, mock_patch): # First byte of TLV value is processed to calculate the subtype for the # port ID, Subtype 6 ('06...') isn't a subtype supported by this # plugin, so we expect it to skip this TLV. self.data['inventory']['interfaces'][0]['lldp'][2] = ( 2, '0645746865726e6574312f3138') patches = [ {'path': '/local_link_connection/switch_id', 'value': '88:5a:92:ec:54:59', 'op': 'add'} ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_port_id_subtype_mac(self, mock_patch): self.data['inventory']['interfaces'][0]['lldp'][2] = ( 2, '03885a92ec5458') patches = [ {'path': '/local_link_connection/port_id', 'value': '88:5a:92:ec:54:58', 'op': 'add'}, {'path': '/local_link_connection/switch_id', 'value': '88:5a:92:ec:54:59', 'op': 'add'} ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_lldp_none(self, mock_patch): self.data['inventory']['interfaces'][0]['lldp'] = None patches = [] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_interface_not_in_all_interfaces(self, mock_patch): self.data['all_interfaces'] = {} patches = [] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_interface_not_in_ironic(self, mock_patch): self.node_info._ports = {} patches = [] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) def test_no_inventory(self): del self.data['inventory'] self.assertRaises(utils.Error, self.hook.before_update, self.data, self.node_info) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_no_overwrite(self, mock_patch): cfg.CONF.set_override('overwrite_existing', False, group='processing') patches = [ {'path': '/local_link_connection/switch_id', 'value': '88:5a:92:ec:54:59', 'op': 'add'} ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_processed_data_available(self, mock_patch): self.data['all_interfaces'] = { 'em1': {"ip": self.ips[0], "mac": self.macs[0], "lldp_processed": { "switch_chassis_id": "11:22:33:aa:bb:dd", "switch_port_id": "Ethernet2/66"} } } patches = [ {'path': '/local_link_connection/port_id', 'value': 'Ethernet2/66', 'op': 'add'}, {'path': '/local_link_connection/switch_id', 'value': '11:22:33:aa:bb:dd', 'op': 'add'}, ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_processed_data_chassis_only(self, mock_patch): self.data['all_interfaces'] = { 'em1': {"ip": self.ips[0], "mac": self.macs[0], "lldp_processed": { "switch_chassis_id": "11:22:33:aa:bb:dd"} } } patches = [ {'path': '/local_link_connection/switch_id', 'value': '11:22:33:aa:bb:dd', 'op': 'add'} ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_processed_data_port_only(self, mock_patch): self.data['all_interfaces'] = { 'em1': {"ip": self.ips[0], "mac": self.macs[0], "lldp_processed": { "switch_port_id": "Ethernet2/66"} } } patches = [ {'path': '/local_link_connection/port_id', 'value': 'Ethernet2/66', 'op': 'add'} ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch_port') def test_processed_chassis_id_not_mac(self, mock_patch): self.data['all_interfaces'] = { 'em1': {"ip": self.ips[0], "mac": self.macs[0], "lldp_processed": { "switch_chassis_id": "192.0.2.1", "switch_port_id": "Ethernet2/66"} } } # skip chassis_id since its not a mac patches = [ {'path': '/local_link_connection/port_id', 'value': 'Ethernet2/66', 'op': 'add'}, ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patches, mock_patch) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_swift.py0000666000175100017510000001370413241323457025122 0ustar zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Mostly copied from ironic/tests/test_swift.py from keystoneauth1 import loading as kloading import mock from swiftclient import client as swift_client from swiftclient import exceptions as swift_exception from ironic_inspector.common import keystone from ironic_inspector.common import swift from ironic_inspector.test import base as test_base from ironic_inspector import utils class BaseTest(test_base.NodeTest): def setUp(self): super(BaseTest, self).setUp() self.all_macs = self.macs + ['DE:AD:BE:EF:DE:AD'] self.pxe_mac = self.macs[1] self.data = { 'ipmi_address': self.bmc_address, 'cpus': 2, 'cpu_arch': 'x86_64', 'memory_mb': 1024, 'local_gb': 20, 'interfaces': { 'em1': {'mac': self.macs[0], 'ip': '1.2.0.1'}, 'em2': {'mac': self.macs[1], 'ip': '1.2.0.2'}, 'em3': {'mac': self.all_macs[2]}, }, 'boot_interface': '01-' + self.pxe_mac.replace(':', '-'), } @mock.patch.object(keystone, 'get_adapter', autospec=True) @mock.patch.object(keystone, 'register_auth_opts') @mock.patch.object(keystone, 'get_session') @mock.patch.object(swift_client, 'Connection', autospec=True) class SwiftTestCase(BaseTest): def setUp(self): super(SwiftTestCase, self).setUp() swift.reset_swift_session() self.swift_exception = swift_exception.ClientException('', '') self.cfg.config(group='swift', os_service_type='object-store', os_endpoint_type='internalURL', os_region='somewhere', max_retries=2) # NOTE(aarefiev) register keystoneauth dynamic options adapter_opts = kloading.get_adapter_conf_options( include_deprecated=False) self.cfg.register_opts(adapter_opts, 'swift') self.addCleanup(swift.reset_swift_session) def test___init__(self, connection_mock, load_mock, opts_mock, adapter_mock): fake_endpoint = "http://localhost:6000" adapter_mock.return_value.get_endpoint.return_value = fake_endpoint swift.SwiftAPI() connection_mock.assert_called_once_with( session=load_mock.return_value, os_options={'object_storage_url': fake_endpoint}) def test_create_object(self, connection_mock, load_mock, opts_mock, adapter_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value connection_obj_mock.put_object.return_value = 'object-uuid' object_uuid = swiftapi.create_object('object', 'some-string-data') connection_obj_mock.put_container.assert_called_once_with('ironic-' 'inspector') connection_obj_mock.put_object.assert_called_once_with( 'ironic-inspector', 'object', 'some-string-data', headers=None) self.assertEqual('object-uuid', object_uuid) def test_create_object_create_container_fails( self, connection_mock, load_mock, opts_mock, adapter_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value connection_obj_mock.put_container.side_effect = self.swift_exception self.assertRaises(utils.Error, swiftapi.create_object, 'object', 'some-string-data') connection_obj_mock.put_container.assert_called_once_with('ironic-' 'inspector') self.assertFalse(connection_obj_mock.put_object.called) def test_create_object_put_object_fails(self, connection_mock, load_mock, opts_mock, adapter_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value connection_obj_mock.put_object.side_effect = self.swift_exception self.assertRaises(utils.Error, swiftapi.create_object, 'object', 'some-string-data') connection_obj_mock.put_container.assert_called_once_with('ironic-' 'inspector') connection_obj_mock.put_object.assert_called_once_with( 'ironic-inspector', 'object', 'some-string-data', headers=None) def test_get_object(self, connection_mock, load_mock, opts_mock, adapter_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value expected_obj = self.data connection_obj_mock.get_object.return_value = ('headers', expected_obj) swift_obj = swiftapi.get_object('object') connection_obj_mock.get_object.assert_called_once_with( 'ironic-inspector', 'object') self.assertEqual(expected_obj, swift_obj) def test_get_object_fails(self, connection_mock, load_mock, opts_mock, adapter_mock): swiftapi = swift.SwiftAPI() connection_obj_mock = connection_mock.return_value connection_obj_mock.get_object.side_effect = self.swift_exception self.assertRaises(utils.Error, swiftapi.get_object, 'object') connection_obj_mock.get_object.assert_called_once_with( 'ironic-inspector', 'object') ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_extra_hardware.py0000666000175100017510000000742013241323457030525 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import mock from ironic_inspector import node_cache from ironic_inspector.plugins import extra_hardware from ironic_inspector.test import base as test_base @mock.patch.object(extra_hardware.swift, 'SwiftAPI', autospec=True) @mock.patch.object(node_cache.NodeInfo, 'patch') class TestExtraHardware(test_base.NodeTest): hook = extra_hardware.ExtraHardwareHook() def test_data_recieved(self, patch_mock, swift_mock): introspection_data = { 'data': [['memory', 'total', 'size', '4294967296'], ['cpu', 'physical', 'number', '1'], ['cpu', 'logical', 'number', '1']]} data = json.dumps(introspection_data['data']) self.hook.before_processing(introspection_data) self.hook.before_update(introspection_data, self.node_info) swift_conn = swift_mock.return_value name = 'extra_hardware-%s' % self.uuid swift_conn.create_object.assert_called_once_with(name, data) patch_mock.assert_called_once_with( [{'op': 'add', 'path': '/extra/hardware_swift_object', 'value': name}]) expected = { 'memory': { 'total': { 'size': 4294967296 } }, 'cpu': { 'physical': { 'number': 1 }, 'logical': { 'number': 1 }, } } self.assertEqual(expected, introspection_data['extra']) def test_data_not_in_edeploy_format(self, patch_mock, swift_mock): introspection_data = { 'data': [['memory', 'total', 'size', '4294967296'], ['cpu', 'physical', 'number', '1'], {'interface': 'eth1'}]} data = json.dumps(introspection_data['data']) self.hook.before_processing(introspection_data) self.hook.before_update(introspection_data, self.node_info) swift_conn = swift_mock.return_value name = 'extra_hardware-%s' % self.uuid swift_conn.create_object.assert_called_once_with(name, data) patch_mock.assert_called_once_with( [{'op': 'add', 'path': '/extra/hardware_swift_object', 'value': name}]) self.assertNotIn('data', introspection_data) def test_no_data_recieved(self, patch_mock, swift_mock): introspection_data = {'cats': 'meow'} swift_conn = swift_mock.return_value self.hook.before_processing(introspection_data) self.hook.before_update(introspection_data, self.node_info) self.assertFalse(patch_mock.called) self.assertFalse(swift_conn.create_object.called) def test__convert_edeploy_data(self, patch_mock, swift_mock): introspection_data = [['Sheldon', 'J.', 'Plankton', '123'], ['Larry', 'the', 'Lobster', None], ['Eugene', 'H.', 'Krabs', 'The cashier']] data = self.hook._convert_edeploy_data(introspection_data) expected_data = {'Sheldon': {'J.': {'Plankton': 123}}, 'Larry': {'the': {'Lobster': None}}, 'Eugene': {'H.': {'Krabs': 'The cashier'}}} self.assertEqual(expected_data, data) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_iptables.py0000666000175100017510000003663013241323457025574 0ustar zuulzuul00000000000000# Copyright 2015 NEC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from ironicclient import exc as ironic_exc import mock from oslo_config import cfg from ironic_inspector import node_cache from ironic_inspector.pxe_filter import base as pxe_filter from ironic_inspector.pxe_filter import iptables from ironic_inspector.test import base as test_base CONF = cfg.CONF class TestIptablesDriver(test_base.NodeTest): def setUp(self): super(TestIptablesDriver, self).setUp() CONF.set_override('rootwrap_config', '/some/fake/path') # NOTE(milan) we ignore the state checking in order to avoid having to # always call e.g self.driver.init_filter() to set proper driver state self.mock_fsm = self.useFixture( fixtures.MockPatchObject(iptables.IptablesFilter, 'fsm')).mock self.mock_call = self.useFixture( fixtures.MockPatchObject(iptables.subprocess, 'check_call')).mock self.driver = iptables.IptablesFilter() self.mock_iptables = self.useFixture( fixtures.MockPatchObject(self.driver, '_iptables')).mock self.mock_should_enable_dhcp = self.useFixture( fixtures.MockPatchObject(iptables, '_should_enable_dhcp')).mock self.mock__get_blacklist = self.useFixture( fixtures.MockPatchObject(iptables, '_get_blacklist')).mock self.mock__get_blacklist.return_value = [] self.mock_ironic = mock.Mock() def check_fsm(self, events): # assert the iptables.fsm.process_event() was called with the events calls = [mock.call(event) for event in events] self.assertEqual(calls, self.driver.fsm.process_event.call_args_list) def test_init_args(self): self.driver.init_filter() init_expected_args = [ ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.chain), ('-F', self.driver.chain), ('-X', self.driver.chain), ('-N', self.driver.chain)] call_args_list = self.mock_iptables.call_args_list for (args, call) in zip(init_expected_args, call_args_list): self.assertEqual(args, call[0]) expected = ('sudo', 'ironic-inspector-rootwrap', CONF.rootwrap_config, 'iptables', '-w') self.assertEqual(expected, self.driver.base_command) self.check_fsm([pxe_filter.Events.initialize]) def test_init_args_old_iptables(self): self.mock_call.side_effect = iptables.subprocess.CalledProcessError( 2, '') self.driver.init_filter() init_expected_args = [ ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.chain), ('-F', self.driver.chain), ('-X', self.driver.chain), ('-N', self.driver.chain)] call_args_list = self.mock_iptables.call_args_list for (args, call) in zip(init_expected_args, call_args_list): self.assertEqual(args, call[0]) expected = ('sudo', 'ironic-inspector-rootwrap', CONF.rootwrap_config, 'iptables',) self.assertEqual(expected, self.driver.base_command) self.check_fsm([pxe_filter.Events.initialize]) def test_init_kwargs(self): self.driver.init_filter() init_expected_kwargs = [ {'ignore': True}, {'ignore': True}, {'ignore': True}] call_args_list = self.mock_iptables.call_args_list for (kwargs, call) in zip(init_expected_kwargs, call_args_list): self.assertEqual(kwargs, call[1]) self.check_fsm([pxe_filter.Events.initialize]) def test_init_fails(self): class MyError(Exception): pass self.mock_call.side_effect = MyError('Oops!') self.assertRaisesRegex(MyError, 'Oops!', self.driver.init_filter) self.check_fsm([pxe_filter.Events.initialize, pxe_filter.Events.reset]) def test__iptables_args(self): self.mock_should_enable_dhcp.return_value = True _iptables_expected_args = [ ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.new_chain), ('-F', self.driver.new_chain), ('-X', self.driver.new_chain), ('-N', self.driver.new_chain), ('-A', self.driver.new_chain, '-j', 'ACCEPT'), ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.new_chain), ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.chain), ('-F', self.driver.chain), ('-X', self.driver.chain), ('-E', self.driver.new_chain, self.driver.chain) ] self.driver.sync(self.mock_ironic) call_args_list = self.mock_iptables.call_args_list for (args, call) in zip(_iptables_expected_args, call_args_list): self.assertEqual(args, call[0]) self.mock__get_blacklist.assert_called_once_with(self.mock_ironic) self.check_fsm([pxe_filter.Events.sync]) def test__iptables_kwargs(self): _iptables_expected_kwargs = [ {'ignore': True}, {'ignore': True}, {'ignore': True}, {}, {}, {}, {'ignore': True}, {'ignore': True}, {'ignore': True} ] self.driver.sync(self.mock_ironic) call_args_list = self.mock_iptables.call_args_list for (kwargs, call) in zip(_iptables_expected_kwargs, call_args_list): self.assertEqual(kwargs, call[1]) self.check_fsm([pxe_filter.Events.sync]) def test_sync_with_blacklist(self): self.mock__get_blacklist.return_value = ['AA:BB:CC:DD:EE:FF'] self.mock_should_enable_dhcp.return_value = True _iptables_expected_args = [ ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.new_chain), ('-F', self.driver.new_chain), ('-X', self.driver.new_chain), ('-N', self.driver.new_chain), # Blacklist ('-A', self.driver.new_chain, '-m', 'mac', '--mac-source', self.mock__get_blacklist.return_value[0], '-j', 'DROP'), ('-A', self.driver.new_chain, '-j', 'ACCEPT'), ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.new_chain), ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.chain), ('-F', self.driver.chain), ('-X', self.driver.chain), ('-E', self.driver.new_chain, self.driver.chain) ] self.driver.sync(self.mock_ironic) self.check_fsm([pxe_filter.Events.sync]) call_args_list = self.mock_iptables.call_args_list for (args, call) in zip(_iptables_expected_args, call_args_list): self.assertEqual(args, call[0]) self.mock__get_blacklist.assert_called_once_with(self.mock_ironic) # check caching self.mock_iptables.reset_mock() self.mock__get_blacklist.reset_mock() self.driver.sync(self.mock_ironic) self.mock__get_blacklist.assert_called_once_with(self.mock_ironic) self.assertFalse(self.mock_iptables.called) def test__iptables_clean_cache_on_error(self): self.mock__get_blacklist.return_value = ['AA:BB:CC:DD:EE:FF'] self.mock_should_enable_dhcp.return_value = True self.mock_iptables.side_effect = [None, None, RuntimeError('Oops!'), None, None, None, None, None, None] self.assertRaises(RuntimeError, self.driver.sync, self.mock_ironic) self.check_fsm([pxe_filter.Events.sync, pxe_filter.Events.reset]) self.mock__get_blacklist.assert_called_once_with(self.mock_ironic) # check caching syncs_expected_args = [ # driver reset ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.new_chain), ('-F', self.driver.new_chain), ('-X', self.driver.new_chain), ('-N', self.driver.new_chain), # Blacklist ('-A', self.driver.new_chain, '-m', 'mac', '--mac-source', self.mock__get_blacklist.return_value[0], '-j', 'DROP'), ('-A', self.driver.new_chain, '-j', 'ACCEPT'), ('-I', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.new_chain), ('-D', 'INPUT', '-i', 'br-ctlplane', '-p', 'udp', '--dport', '67', '-j', self.driver.chain), ('-F', self.driver.chain), ('-X', self.driver.chain), ('-E', self.driver.new_chain, self.driver.chain) ] self.mock_iptables.reset_mock() self.mock_iptables.side_effect = None self.mock__get_blacklist.reset_mock() self.mock_fsm.reset_mock() self.driver.sync(self.mock_ironic) self.check_fsm([pxe_filter.Events.sync]) call_args_list = self.mock_iptables.call_args_list for (idx, (args, call)) in enumerate(zip(syncs_expected_args, call_args_list)): self.assertEqual(args, call[0], 'idx: %s' % idx) self.mock__get_blacklist.assert_called_once_with(self.mock_ironic) class Test_ShouldEnableDhcp(test_base.BaseTest): def setUp(self): super(Test_ShouldEnableDhcp, self).setUp() self.mock_introspection_active = self.useFixture( fixtures.MockPatchObject(node_cache, 'introspection_active')).mock def test_introspection_active(self): self.mock_introspection_active.return_value = True self.assertIs(True, iptables._should_enable_dhcp()) def test_node_not_found_hook_set(self): # DHCP should be always opened if node_not_found hook is set CONF.set_override('node_not_found_hook', 'enroll', 'processing') self.mock_introspection_active.return_value = False self.assertIs(True, iptables._should_enable_dhcp()) def test__should_enable_dhcp_false(self): self.mock_introspection_active.return_value = False self.assertIs(False, iptables._should_enable_dhcp()) class TestIBMapping(test_base.BaseTest): def setUp(self): super(TestIBMapping, self).setUp() CONF.set_override('ethoib_interfaces', ['eth0'], 'iptables') self.ib_data = ( 'EMAC=02:00:02:97:00:01 IMAC=97:fe:80:00:00:00:00:00:00:7c:fe:90:' '03:00:29:26:52\n' 'EMAC=02:00:00:61:00:02 IMAC=61:fe:80:00:00:00:00:00:00:7c:fe:90:' '03:00:29:24:4f\n' ) self.client_id = ('ff:00:00:00:00:00:02:00:00:02:c9:00:7c:fe:90:03:00:' '29:24:4f') self.ib_address = '7c:fe:90:29:24:4f' self.ib_port = mock.Mock(address=self.ib_address, extra={'client-id': self.client_id}, spec=['address', 'extra']) self.port = mock.Mock(address='aa:bb:cc:dd:ee:ff', extra={}, spec=['address', 'extra']) self.ports = [self.ib_port, self.port] self.expected_rmac = '02:00:00:61:00:02' self.fileobj = mock.mock_open(read_data=self.ib_data) def test_matching_ib(self): with mock.patch('six.moves.builtins.open', self.fileobj, create=True) as mock_open: iptables._ib_mac_to_rmac_mapping(self.ports) self.assertEqual(self.expected_rmac, self.ib_port.address) self.assertEqual(self.ports, [self.ib_port, self.port]) mock_open.assert_called_once_with('/sys/class/net/eth0/eth/neighs', 'r') def test_ib_not_match(self): self.ports[0].extra['client-id'] = 'foo' with mock.patch('six.moves.builtins.open', self.fileobj, create=True) as mock_open: iptables._ib_mac_to_rmac_mapping(self.ports) self.assertEqual(self.ib_address, self.ib_port.address) self.assertEqual(self.ports, [self.ib_port, self.port]) mock_open.assert_called_once_with('/sys/class/net/eth0/eth/neighs', 'r') def test_open_no_such_file(self): with mock.patch('six.moves.builtins.open', side_effect=IOError()) as mock_open: iptables._ib_mac_to_rmac_mapping(self.ports) self.assertEqual(self.ib_address, self.ib_port.address) self.assertEqual(self.ports, [self.ib_port, self.port]) mock_open.assert_called_once_with('/sys/class/net/eth0/eth/neighs', 'r') def test_no_interfaces(self): CONF.set_override('ethoib_interfaces', [], 'iptables') with mock.patch('six.moves.builtins.open', self.fileobj, create=True) as mock_open: iptables._ib_mac_to_rmac_mapping(self.ports) self.assertEqual(self.ib_address, self.ib_port.address) self.assertEqual(self.ports, [self.ib_port, self.port]) mock_open.assert_not_called() class TestGetBlacklist(test_base.BaseTest): def setUp(self): super(TestGetBlacklist, self).setUp() self.mock__ib_mac_to_rmac_mapping = self.useFixture( fixtures.MockPatchObject(iptables, '_ib_mac_to_rmac_mapping')).mock self.mock_active_macs = self.useFixture( fixtures.MockPatchObject(node_cache, 'active_macs')).mock self.mock_ironic = mock.Mock() def test_active_port(self): self.mock_ironic.port.list.return_value = [ mock.Mock(address='foo'), mock.Mock(address='bar'), ] self.mock_active_macs.return_value = {'foo'} ports = iptables._get_blacklist(self.mock_ironic) # foo is an active address so we expect the blacklist contains only bar self.assertEqual(['bar'], ports) self.mock_ironic.port.list.assert_called_once_with( limit=0, fields=['address', 'extra']) self.mock__ib_mac_to_rmac_mapping.assert_called_once_with(ports) @mock.patch('time.sleep', lambda _x: None) def test_retry_on_port_list_failure(self): self.mock_ironic.port.list.side_effect = [ ironic_exc.ConnectionRefused('boom'), [ mock.Mock(address='foo'), mock.Mock(address='bar'), ] ] self.mock_active_macs.return_value = {'foo'} ports = iptables._get_blacklist(self.mock_ironic) # foo is an active address so we expect the blacklist contains only bar self.assertEqual(['bar'], ports) self.mock_ironic.port.list.assert_called_with( limit=0, fields=['address', 'extra']) self.mock__ib_mac_to_rmac_mapping.assert_called_once_with(ports) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_rules.py0000666000175100017510000004411113241323457025114 0ustar zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for introspection rules.""" import mock from oslo_utils import uuidutils from ironic_inspector import db from ironic_inspector.plugins import base as plugins_base from ironic_inspector import rules from ironic_inspector.test import base as test_base from ironic_inspector import utils class BaseTest(test_base.NodeTest): def setUp(self): super(BaseTest, self).setUp() self.uuid = uuidutils.generate_uuid() self.conditions_json = [ {'op': 'eq', 'field': 'memory_mb', 'value': 1024}, {'op': 'eq', 'field': 'local_gb', 'value': 60}, ] self.actions_json = [ {'action': 'fail', 'message': 'boom!'} ] self.data = { 'memory_mb': 1024, 'local_gb': 42, } @staticmethod def condition_defaults(condition): condition = condition.copy() condition.setdefault('multiple', 'any') condition.setdefault('invert', False) return condition class TestCreateRule(BaseTest): def test_only_actions(self): rule = rules.create([], self.actions_json) rule_json = rule.as_dict() self.assertTrue(rule_json.pop('uuid')) self.assertEqual({'description': None, 'conditions': [], 'actions': self.actions_json}, rule_json) def test_duplicate_uuid(self): rules.create([], self.actions_json, uuid=self.uuid) self.assertRaisesRegex(utils.Error, 'already exists', rules.create, [], self.actions_json, uuid=self.uuid) def test_with_conditions(self): self.conditions_json.extend([ # multiple present&default, invert absent {'op': 'eq', 'field': 'local_gb', 'value': 60, 'multiple': 'any'}, # multiple absent, invert present&default {'op': 'eq', 'field': 'local_gb', 'value': 60, 'invert': False}, # multiple&invert present&non-default {'op': 'eq', 'field': 'memory_mb', 'value': 1024, 'multiple': 'all', 'invert': True}, ]) rule = rules.create(self.conditions_json, self.actions_json) rule_json = rule.as_dict() self.assertTrue(rule_json.pop('uuid')) self.assertEqual({'description': None, 'conditions': [BaseTest.condition_defaults(cond) for cond in self.conditions_json], 'actions': self.actions_json}, rule_json) def test_invalid_condition(self): del self.conditions_json[0]['op'] self.assertRaisesRegex(utils.Error, 'Validation failed for conditions', rules.create, self.conditions_json, self.actions_json) self.conditions_json[0]['op'] = 'foobar' self.assertRaisesRegex(utils.Error, 'Validation failed for conditions', rules.create, self.conditions_json, self.actions_json) def test_invalid_condition_field(self): self.conditions_json[0]['field'] = '!*!' self.assertRaisesRegex(utils.Error, 'Unable to parse field JSON path', rules.create, self.conditions_json, self.actions_json) def test_invalid_condition_parameters(self): self.conditions_json[0]['foo'] = 'bar' self.assertRaisesRegex(utils.Error, 'Invalid parameters for operator', rules.create, self.conditions_json, self.actions_json) def test_no_actions(self): self.assertRaisesRegex(utils.Error, 'Validation failed for actions', rules.create, self.conditions_json, []) def test_invalid_action(self): del self.actions_json[0]['action'] self.assertRaisesRegex(utils.Error, 'Validation failed for actions', rules.create, self.conditions_json, self.actions_json) self.actions_json[0]['action'] = 'foobar' self.assertRaisesRegex(utils.Error, 'Validation failed for actions', rules.create, self.conditions_json, self.actions_json) def test_invalid_action_parameters(self): self.actions_json[0]['foo'] = 'bar' self.assertRaisesRegex(utils.Error, 'Invalid parameters for action', rules.create, self.conditions_json, self.actions_json) class TestGetRule(BaseTest): def setUp(self): super(TestGetRule, self).setUp() rules.create(self.conditions_json, self.actions_json, uuid=self.uuid) def test_get(self): rule_json = rules.get(self.uuid).as_dict() self.assertTrue(rule_json.pop('uuid')) self.assertEqual({'description': None, 'conditions': [BaseTest.condition_defaults(cond) for cond in self.conditions_json], 'actions': self.actions_json}, rule_json) def test_not_found(self): self.assertRaises(utils.Error, rules.get, 'foobar') def test_get_all(self): uuid2 = uuidutils.generate_uuid() rules.create(self.conditions_json, self.actions_json, uuid=uuid2) self.assertEqual({self.uuid, uuid2}, {r.as_dict()['uuid'] for r in rules.get_all()}) class TestDeleteRule(BaseTest): def setUp(self): super(TestDeleteRule, self).setUp() self.uuid2 = uuidutils.generate_uuid() rules.create(self.conditions_json, self.actions_json, uuid=self.uuid) rules.create(self.conditions_json, self.actions_json, uuid=self.uuid2) def test_delete(self): rules.delete(self.uuid) self.assertEqual([(self.uuid2,)], db.model_query(db.Rule.uuid).all()) self.assertFalse(db.model_query(db.RuleCondition) .filter_by(rule=self.uuid).all()) self.assertFalse(db.model_query(db.RuleAction) .filter_by(rule=self.uuid).all()) def test_delete_non_existing(self): self.assertRaises(utils.Error, rules.delete, 'foo') def test_delete_all(self): rules.delete_all() self.assertFalse(db.model_query(db.Rule).all()) self.assertFalse(db.model_query(db.RuleCondition).all()) self.assertFalse(db.model_query(db.RuleAction).all()) @mock.patch.object(plugins_base, 'rule_conditions_manager', autospec=True) class TestCheckConditions(BaseTest): def setUp(self): super(TestCheckConditions, self).setUp() self.rule = rules.create(conditions_json=self.conditions_json, actions_json=self.actions_json) self.cond_mock = mock.Mock(spec=plugins_base.RuleConditionPlugin) self.cond_mock.ALLOW_NONE = False self.ext_mock = mock.Mock(spec=['obj'], obj=self.cond_mock) def test_ok(self, mock_ext_mgr): mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock self.cond_mock.check.return_value = True res = self.rule.check_conditions(self.node_info, self.data) self.cond_mock.check.assert_any_call(self.node_info, 1024, {'value': 1024}) self.cond_mock.check.assert_any_call(self.node_info, 42, {'value': 60}) self.assertEqual(len(self.conditions_json), self.cond_mock.check.call_count) self.assertTrue(res) def test_invert(self, mock_ext_mgr): self.conditions_json = [ {'op': 'eq', 'field': 'memory_mb', 'value': 42, 'invert': True}, ] self.rule = rules.create(conditions_json=self.conditions_json, actions_json=self.actions_json) mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock self.cond_mock.check.return_value = False res = self.rule.check_conditions(self.node_info, self.data) self.cond_mock.check.assert_called_once_with(self.node_info, 1024, {'value': 42}) self.assertTrue(res) def test_no_field(self, mock_ext_mgr): mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock self.cond_mock.check.return_value = True del self.data['local_gb'] res = self.rule.check_conditions(self.node_info, self.data) self.cond_mock.check.assert_called_once_with(self.node_info, 1024, {'value': 1024}) self.assertFalse(res) def test_no_field_none_allowed(self, mock_ext_mgr): mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock self.cond_mock.ALLOW_NONE = True self.cond_mock.check.return_value = True del self.data['local_gb'] res = self.rule.check_conditions(self.node_info, self.data) self.cond_mock.check.assert_any_call(self.node_info, 1024, {'value': 1024}) self.cond_mock.check.assert_any_call(self.node_info, None, {'value': 60}) self.assertEqual(len(self.conditions_json), self.cond_mock.check.call_count) self.assertTrue(res) def test_fail(self, mock_ext_mgr): mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock self.cond_mock.check.return_value = False res = self.rule.check_conditions(self.node_info, self.data) self.cond_mock.check.assert_called_once_with(self.node_info, 1024, {'value': 1024}) self.assertFalse(res) class TestCheckConditionsMultiple(BaseTest): def setUp(self): super(TestCheckConditionsMultiple, self).setUp() self.conditions_json = [ {'op': 'eq', 'field': 'interfaces[*].ip', 'value': '1.2.3.4'} ] def _build_data(self, ips): return { 'interfaces': [ {'ip': ip} for ip in ips ] } def test_default(self): rule = rules.create(conditions_json=self.conditions_json, actions_json=self.actions_json) data_set = [ (['1.1.1.1', '1.2.3.4', '1.3.2.2'], True), (['1.2.3.4'], True), (['1.1.1.1', '1.3.2.2'], False), (['1.2.3.4', '1.3.2.2'], True), ] for ips, result in data_set: data = self._build_data(ips) self.assertIs(result, rule.check_conditions(self.node_info, data), data) def test_any(self): self.conditions_json[0]['multiple'] = 'any' rule = rules.create(conditions_json=self.conditions_json, actions_json=self.actions_json) data_set = [ (['1.1.1.1', '1.2.3.4', '1.3.2.2'], True), (['1.2.3.4'], True), (['1.1.1.1', '1.3.2.2'], False), (['1.2.3.4', '1.3.2.2'], True), ] for ips, result in data_set: data = self._build_data(ips) self.assertIs(result, rule.check_conditions(self.node_info, data), data) def test_all(self): self.conditions_json[0]['multiple'] = 'all' rule = rules.create(conditions_json=self.conditions_json, actions_json=self.actions_json) data_set = [ (['1.1.1.1', '1.2.3.4', '1.3.2.2'], False), (['1.2.3.4'], True), (['1.1.1.1', '1.3.2.2'], False), (['1.2.3.4', '1.3.2.2'], False), ] for ips, result in data_set: data = self._build_data(ips) self.assertIs(result, rule.check_conditions(self.node_info, data), data) def test_first(self): self.conditions_json[0]['multiple'] = 'first' rule = rules.create(conditions_json=self.conditions_json, actions_json=self.actions_json) data_set = [ (['1.1.1.1', '1.2.3.4', '1.3.2.2'], False), (['1.2.3.4'], True), (['1.1.1.1', '1.3.2.2'], False), (['1.2.3.4', '1.3.2.2'], True), ] for ips, result in data_set: data = self._build_data(ips) self.assertIs(result, rule.check_conditions(self.node_info, data), data) class TestCheckConditionsSchemePath(BaseTest): def test_conditions_data_path(self): self.data_set = [ ([{'op': 'eq', 'field': 'data://memory_mb', 'value': 1024}], True), ([{'op': 'gt', 'field': 'data://local_gb', 'value': 42}], False) ] for condition, res in self.data_set: rule = rules.create(conditions_json=condition, actions_json=self.actions_json) self.assertIs(res, rule.check_conditions(self.node_info, self.data), self.data) def test_conditions_node_path(self): self.node_set = [ ([{'op': 'eq', 'field': 'node://driver_info.ipmi_address', 'value': self.bmc_address}], True), ([{'op': 'eq', 'field': 'node://driver', 'value': 'fake'}], False) ] for condition, res in self.node_set: rule = rules.create(conditions_json=condition, actions_json=self.actions_json) self.assertIs(res, rule.check_conditions(self.node_info, self.data)) @mock.patch.object(plugins_base, 'rule_actions_manager', autospec=True) class TestApplyActions(BaseTest): def setUp(self): super(TestApplyActions, self).setUp() self.actions_json.append({'action': 'example'}) self.rule = rules.create(conditions_json=self.conditions_json, actions_json=self.actions_json) self.act_mock = mock.Mock(spec=plugins_base.RuleActionPlugin) self.act_mock.FORMATTED_PARAMS = ['value'] self.ext_mock = mock.Mock(spec=['obj'], obj=self.act_mock) def test_apply(self, mock_ext_mgr): mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock self.rule.apply_actions(self.node_info, data=self.data) self.act_mock.apply.assert_any_call(self.node_info, {'message': 'boom!'}) self.act_mock.apply.assert_any_call(self.node_info, {}) self.assertEqual(len(self.actions_json), self.act_mock.apply.call_count) def test_apply_data_format_value(self, mock_ext_mgr): self.rule = rules.create(actions_json=[ {'action': 'set-attribute', 'path': '/driver_info/ipmi_address', 'value': '{data[memory_mb]}'}], conditions_json=self.conditions_json ) mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock self.rule.apply_actions(self.node_info, data=self.data) self.assertEqual(1, self.act_mock.apply.call_count) def test_apply_data_format_value_fail(self, mock_ext_mgr): self.rule = rules.create( actions_json=[ {'action': 'set-attribute', 'path': '/driver_info/ipmi_address', 'value': '{data[inventory][bmc_address]}'}], conditions_json=self.conditions_json ) mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock self.assertRaises(utils.Error, self.rule.apply_actions, self.node_info, data=self.data) def test_apply_data_non_format_value(self, mock_ext_mgr): self.rule = rules.create(actions_json=[ {'action': 'set-attribute', 'path': '/driver_info/ipmi_address', 'value': 1}], conditions_json=self.conditions_json ) mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock self.rule.apply_actions(self.node_info, data=self.data) self.assertEqual(1, self.act_mock.apply.call_count) @mock.patch.object(rules, 'get_all', autospec=True) class TestApply(BaseTest): def setUp(self): super(TestApply, self).setUp() self.rules = [mock.Mock(spec=rules.IntrospectionRule), mock.Mock(spec=rules.IntrospectionRule)] def test_no_rules(self, mock_get_all): mock_get_all.return_value = [] rules.apply(self.node_info, self.data) def test_apply(self, mock_get_all): mock_get_all.return_value = self.rules for idx, rule in enumerate(self.rules): rule.check_conditions.return_value = not bool(idx) rules.apply(self.node_info, self.data) for idx, rule in enumerate(self.rules): rule.check_conditions.assert_called_once_with(self.node_info, self.data) if rule.check_conditions.return_value: rule.apply_actions.assert_called_once_with( self.node_info, data=self.data) else: self.assertFalse(rule.apply_actions.called) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_api_tools.py0000666000175100017510000001116213241323457025753 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import flask import mock from oslo_config import cfg from oslo_utils import uuidutils import six from ironic_inspector import api_tools import ironic_inspector.test.base as test_base from ironic_inspector import utils CONF = cfg.CONF app = flask.Flask(__name__) app.testing = True def mock_test_field(return_value=None, side_effect=None): """Mock flask.request.args.get""" def outer(func): @six.wraps(func) def inner(self, *args, **kwargs): with app.test_request_context('/'): get_mock = flask.request.args.get = mock.Mock() get_mock.return_value = return_value get_mock.side_effect = side_effect ret = func(self, get_mock, *args, **kwargs) return ret return inner return outer class RaisesCoercionExceptionTestCase(test_base.BaseTest): def test_ok(self): @api_tools.raises_coercion_exceptions def fn(): return True self.assertIs(True, fn()) def test_assertion_error(self): @api_tools.raises_coercion_exceptions def fn(): assert False, 'Oops!' six.assertRaisesRegex(self, utils.Error, 'Bad request: Oops!', fn) def test_value_error(self): @api_tools.raises_coercion_exceptions def fn(): raise ValueError('Oops!') six.assertRaisesRegex(self, utils.Error, 'Bad request: Oops!', fn) class RequestFieldTestCase(test_base.BaseTest): @mock_test_field(return_value='42') def test_request_field_ok(self, get_mock): @api_tools.request_field('foo') def fn(value): self.assertEqual(get_mock.return_value, value) fn() get_mock.assert_called_once_with('foo', default=None) @mock_test_field(return_value='42') def test_request_field_with_default(self, get_mock): @api_tools.request_field('foo') def fn(value): self.assertEqual(get_mock.return_value, value) fn(default='bar') get_mock.assert_called_once_with('foo', default='bar') @mock_test_field(return_value=42) def test_request_field_with_default_returns_default(self, get_mock): @api_tools.request_field('foo') def fn(value): self.assertEqual(get_mock.return_value, value) fn(default=42) get_mock.assert_called_once_with('foo', default=42) class MarkerFieldTestCase(test_base.BaseTest): @mock_test_field(return_value=uuidutils.generate_uuid()) def test_marker_ok(self, get_mock): value = api_tools.marker_field() self.assertEqual(get_mock.return_value, value) @mock.patch.object(uuidutils, 'is_uuid_like', autospec=True) @mock_test_field(return_value='foo') def test_marker_check_fails(self, get_mock, like_mock): like_mock.return_value = False six.assertRaisesRegex(self, utils.Error, '.*(Marker not UUID-like)', api_tools.marker_field) like_mock.assert_called_once_with(get_mock.return_value) class LimitFieldTestCase(test_base.BaseTest): @mock_test_field(return_value=42) def test_limit_ok(self, get_mock): value = api_tools.limit_field() self.assertEqual(get_mock.return_value, value) @mock_test_field(return_value=str(CONF.api_max_limit + 1)) def test_limit_over(self, get_mock): six.assertRaisesRegex(self, utils.Error, '.*(Limit over %s)' % CONF.api_max_limit, api_tools.limit_field) @mock_test_field(return_value='0') def test_limit_zero(self, get_mock): value = api_tools.limit_field() self.assertEqual(CONF.api_max_limit, value) @mock_test_field(return_value='-1') def test_limit_negative(self, get_mock): six.assertRaisesRegex(self, utils.Error, '.*(Limit cannot be negative)', api_tools.limit_field) @mock_test_field(return_value='foo') def test_limit_invalid_value(self, get_mock): six.assertRaisesRegex(self, utils.Error, 'Bad request', api_tools.limit_field) ironic-inspector-7.2.0/ironic_inspector/test/unit/__init__.py0000666000175100017510000000000013241323457024447 0ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector/test/unit/test_process.py0000666000175100017510000007316713241323457025455 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import shutil import tempfile import eventlet import fixtures from ironicclient import exceptions import mock from oslo_config import cfg from oslo_serialization import base64 from oslo_utils import timeutils from oslo_utils import uuidutils import six from ironic_inspector.common import ironic as ir_utils from ironic_inspector import db from ironic_inspector import introspection_state as istate from ironic_inspector import node_cache from ironic_inspector.plugins import base as plugins_base from ironic_inspector.plugins import example as example_plugin from ironic_inspector import process from ironic_inspector.pxe_filter import base as pxe_filter from ironic_inspector.test import base as test_base from ironic_inspector import utils CONF = cfg.CONF class BaseTest(test_base.NodeTest): def setUp(self): super(BaseTest, self).setUp() self.started_at = timeutils.utcnow() self.all_ports = [mock.Mock(uuid=uuidutils.generate_uuid(), address=mac) for mac in self.macs] self.ports = [self.all_ports[1]] self.fake_result_json = 'node json' self.cli_fixture = self.useFixture( fixtures.MockPatchObject(ir_utils, 'get_client', autospec=True)) self.cli = self.cli_fixture.mock.return_value class BaseProcessTest(BaseTest): def setUp(self): super(BaseProcessTest, self).setUp() self.cache_fixture = self.useFixture( fixtures.MockPatchObject(node_cache, 'find_node', autospec=True)) self.process_fixture = self.useFixture( fixtures.MockPatchObject(process, '_process_node', autospec=True)) self.find_mock = self.cache_fixture.mock self.node_info = node_cache.NodeInfo( uuid=self.node.uuid, state=istate.States.waiting, started_at=self.started_at) self.node_info.finished = mock.Mock() self.find_mock.return_value = self.node_info self.cli.node.get.return_value = self.node self.process_mock = self.process_fixture.mock self.process_mock.return_value = self.fake_result_json class TestProcess(BaseProcessTest): def test_ok(self): res = process.process(self.data) self.assertEqual(self.fake_result_json, res) self.find_mock.assert_called_once_with(bmc_address=self.bmc_address, mac=mock.ANY) actual_macs = self.find_mock.call_args[1]['mac'] self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) self.cli.node.get.assert_called_once_with(self.uuid) self.process_mock.assert_called_once_with( self.node_info, self.node, self.data) def test_no_ipmi(self): del self.inventory['bmc_address'] process.process(self.data) self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY) actual_macs = self.find_mock.call_args[1]['mac'] self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) self.cli.node.get.assert_called_once_with(self.uuid) self.process_mock.assert_called_once_with(self.node_info, self.node, self.data) def test_ipmi_not_detected(self): self.inventory['bmc_address'] = '0.0.0.0' process.process(self.data) self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY) actual_macs = self.find_mock.call_args[1]['mac'] self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) self.cli.node.get.assert_called_once_with(self.uuid) self.process_mock.assert_called_once_with(self.node_info, self.node, self.data) def test_ipmi_not_detected_with_old_field(self): self.inventory['bmc_address'] = '0.0.0.0' self.data['ipmi_address'] = '0.0.0.0' process.process(self.data) self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY) actual_macs = self.find_mock.call_args[1]['mac'] self.assertEqual(sorted(self.all_macs), sorted(actual_macs)) self.cli.node.get.assert_called_once_with(self.uuid) self.process_mock.assert_called_once_with(self.node_info, self.node, self.data) def test_not_found_in_cache(self): self.find_mock.side_effect = utils.Error('not found') self.assertRaisesRegex(utils.Error, 'not found', process.process, self.data) self.assertFalse(self.cli.node.get.called) self.assertFalse(self.process_mock.called) def test_not_found_in_ironic(self): self.cli.node.get.side_effect = exceptions.NotFound() self.assertRaisesRegex(utils.Error, 'Node %s was not found' % self.uuid, process.process, self.data) self.cli.node.get.assert_called_once_with(self.uuid) self.assertFalse(self.process_mock.called) self.node_info.finished.assert_called_once_with( istate.Events.error, error=mock.ANY) def test_already_finished(self): self.node_info.finished_at = timeutils.utcnow() self.assertRaisesRegex(utils.Error, 'already finished', process.process, self.data) self.assertFalse(self.process_mock.called) self.assertFalse(self.find_mock.return_value.finished.called) def test_expected_exception(self): self.process_mock.side_effect = utils.Error('boom') self.assertRaisesRegex(utils.Error, 'boom', process.process, self.data) self.node_info.finished.assert_called_once_with( istate.Events.error, error='boom') def test_unexpected_exception(self): self.process_mock.side_effect = RuntimeError('boom') with self.assertRaisesRegex(utils.Error, 'Unexpected exception') as ctx: process.process(self.data) self.assertEqual(500, ctx.exception.http_code) self.node_info.finished.assert_called_once_with( istate.Events.error, error='Unexpected exception RuntimeError during processing: boom') def test_hook_unexpected_exceptions(self): for ext in plugins_base.processing_hooks_manager(): patcher = mock.patch.object(ext.obj, 'before_processing', side_effect=RuntimeError('boom')) patcher.start() self.addCleanup(lambda p=patcher: p.stop()) self.assertRaisesRegex(utils.Error, 'Unexpected exception', process.process, self.data) self.node_info.finished.assert_called_once_with( istate.Events.error, error=mock.ANY) error_message = self.node_info.finished.call_args[1]['error'] self.assertIn('RuntimeError', error_message) self.assertIn('boom', error_message) def test_hook_unexpected_exceptions_no_node(self): # Check that error from hooks is raised, not "not found" self.find_mock.side_effect = utils.Error('not found') for ext in plugins_base.processing_hooks_manager(): patcher = mock.patch.object(ext.obj, 'before_processing', side_effect=RuntimeError('boom')) patcher.start() self.addCleanup(lambda p=patcher: p.stop()) self.assertRaisesRegex(utils.Error, 'Unexpected exception', process.process, self.data) self.assertFalse(self.node_info.finished.called) def test_error_if_node_not_found_hook(self): plugins_base._NOT_FOUND_HOOK_MGR = None self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') self.assertRaisesRegex(utils.Error, 'Look up error: BOOM', process.process, self.data) @mock.patch.object(example_plugin, 'example_not_found_hook', autospec=True) class TestNodeNotFoundHook(BaseProcessTest): def test_node_not_found_hook_run_ok(self, hook_mock): CONF.set_override('node_not_found_hook', 'example', 'processing') plugins_base._NOT_FOUND_HOOK_MGR = None self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') hook_mock.return_value = node_cache.NodeInfo( uuid=self.node.uuid, started_at=self.started_at) res = process.process(self.data) self.assertEqual(self.fake_result_json, res) hook_mock.assert_called_once_with(self.data) def test_node_not_found_hook_run_none(self, hook_mock): CONF.set_override('node_not_found_hook', 'example', 'processing') plugins_base._NOT_FOUND_HOOK_MGR = None self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') hook_mock.return_value = None self.assertRaisesRegex(utils.Error, 'Node not found hook returned nothing', process.process, self.data) hook_mock.assert_called_once_with(self.data) def test_node_not_found_hook_exception(self, hook_mock): CONF.set_override('node_not_found_hook', 'example', 'processing') plugins_base._NOT_FOUND_HOOK_MGR = None self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM') hook_mock.side_effect = Exception('Hook Error') self.assertRaisesRegex(utils.Error, 'Node not found hook failed: Hook Error', process.process, self.data) hook_mock.assert_called_once_with(self.data) class TestUnprocessedData(BaseProcessTest): @mock.patch.object(process, '_store_unprocessed_data', autospec=True) def test_save_unprocessed_data(self, store_mock): CONF.set_override('store_data', 'swift', 'processing') expected = copy.deepcopy(self.data) process.process(self.data) store_mock.assert_called_once_with(mock.ANY, expected) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) def test_save_unprocessed_data_failure(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') name = 'inspector_data-%s-%s' % ( self.uuid, process._UNPROCESSED_DATA_STORE_SUFFIX ) swift_conn = swift_mock.return_value swift_conn.create_object.side_effect = utils.Error('Oops') res = process.process(self.data) # assert store failure doesn't break processing self.assertEqual(self.fake_result_json, res) swift_conn.create_object.assert_called_once_with(name, mock.ANY) @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_processing', autospec=True) class TestStoreLogs(BaseProcessTest): def setUp(self): super(TestStoreLogs, self).setUp() CONF.set_override('processing_hooks', 'ramdisk_error,example', 'processing') self.tempdir = tempfile.mkdtemp() self.addCleanup(lambda: shutil.rmtree(self.tempdir)) CONF.set_override('ramdisk_logs_dir', self.tempdir, 'processing') self.logs = b'test logs' self.data['logs'] = base64.encode_as_bytes(self.logs) def _check_contents(self, name=None): files = os.listdir(self.tempdir) self.assertEqual(1, len(files)) filename = files[0] if name is None: self.assertTrue(filename.startswith(self.uuid), '%s does not start with uuid' % filename) else: self.assertEqual(name, filename) with open(os.path.join(self.tempdir, filename), 'rb') as fp: self.assertEqual(self.logs, fp.read()) def test_store_on_preprocess_failure(self, hook_mock): hook_mock.side_effect = Exception('Hook Error') self.assertRaises(utils.Error, process.process, self.data) self._check_contents() def test_store_on_process_failure(self, hook_mock): self.process_mock.side_effect = utils.Error('boom') self.assertRaises(utils.Error, process.process, self.data) self._check_contents() def test_store_on_unexpected_process_failure(self, hook_mock): self.process_mock.side_effect = RuntimeError('boom') self.assertRaises(utils.Error, process.process, self.data) self._check_contents() def test_store_on_ramdisk_error(self, hook_mock): self.data['error'] = 'boom' self.assertRaises(utils.Error, process.process, self.data) self._check_contents() def test_store_find_node_error(self, hook_mock): self.cli.node.get.side_effect = exceptions.NotFound('boom') self.assertRaises(utils.Error, process.process, self.data) self._check_contents() def test_no_error_no_logs(self, hook_mock): process.process(self.data) self.assertEqual([], os.listdir(self.tempdir)) def test_logs_disabled(self, hook_mock): CONF.set_override('ramdisk_logs_dir', None, 'processing') hook_mock.side_effect = Exception('Hook Error') self.assertRaises(utils.Error, process.process, self.data) self.assertEqual([], os.listdir(self.tempdir)) def test_always_store_logs(self, hook_mock): CONF.set_override('always_store_ramdisk_logs', True, 'processing') process.process(self.data) self._check_contents() @mock.patch.object(process.LOG, 'exception', autospec=True) def test_failure_to_write(self, log_mock, hook_mock): CONF.set_override('always_store_ramdisk_logs', True, 'processing') CONF.set_override('ramdisk_logs_dir', '/I/cannot/write/here', 'processing') process.process(self.data) self.assertEqual([], os.listdir(self.tempdir)) self.assertTrue(log_mock.called) def test_directory_is_created(self, hook_mock): shutil.rmtree(self.tempdir) self.data['error'] = 'boom' self.assertRaises(utils.Error, process.process, self.data) self._check_contents() def test_store_custom_name(self, hook_mock): CONF.set_override('ramdisk_logs_filename_format', '{uuid}-{bmc}-{mac}', 'processing') self.process_mock.side_effect = utils.Error('boom') self.assertRaises(utils.Error, process.process, self.data) self._check_contents(name='%s-%s-%s' % (self.uuid, self.bmc_address, self.pxe_mac.replace(':', ''))) class TestProcessNode(BaseTest): def setUp(self): super(TestProcessNode, self).setUp() CONF.set_override('processing_hooks', '$processing.default_processing_hooks,example', 'processing') self.validate_attempts = 5 self.data['macs'] = self.macs # validate_interfaces hook self.valid_interfaces['eth3'] = { 'mac': self.macs[1], 'ip': self.ips[1], 'extra': {}, 'pxe': False } self.data['interfaces'] = self.valid_interfaces self.ports = self.all_ports self.cli.node.get_boot_device.side_effect = ( [RuntimeError()] * self.validate_attempts + [None]) self.cli.port.create.side_effect = self.ports self.cli.node.update.return_value = self.node self.cli.node.list_ports.return_value = [] self.useFixture(fixtures.MockPatchObject( pxe_filter, 'driver', autospec=True)) self.useFixture(fixtures.MockPatchObject( eventlet.greenthread, 'sleep', autospec=True)) self.node_info._state = istate.States.waiting db.Node(uuid=self.node_info.uuid, state=self.node_info._state, started_at=self.node_info.started_at, finished_at=self.node_info.finished_at, error=self.node_info.error).save(self.session) def test_return_includes_uuid(self): ret_val = process._process_node(self.node_info, self.node, self.data) self.assertEqual(self.uuid, ret_val.get('uuid')) @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') def test_wrong_provision_state(self, post_hook_mock): self.node.provision_state = 'active' self.assertRaises(utils.Error, process._process_node, self.node_info, self.node, self.data) self.assertFalse(post_hook_mock.called) @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) def test_ok(self, finished_mock, post_hook_mock): process._process_node(self.node_info, self.node, self.data) self.cli.port.create.assert_any_call(node_uuid=self.uuid, address=self.macs[0], extra={}, pxe_enabled=True) self.cli.port.create.assert_any_call(node_uuid=self.uuid, address=self.macs[1], extra={}, pxe_enabled=False) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') self.assertFalse(self.cli.node.validate.called) post_hook_mock.assert_called_once_with(self.data, self.node_info) finished_mock.assert_called_once_with(mock.ANY, istate.Events.finish) def test_port_failed(self): self.cli.port.create.side_effect = ( [exceptions.Conflict()] + self.ports[1:]) process._process_node(self.node_info, self.node, self.data) self.cli.port.create.assert_any_call(node_uuid=self.uuid, address=self.macs[0], extra={}, pxe_enabled=True) self.cli.port.create.assert_any_call(node_uuid=self.uuid, address=self.macs[1], extra={}, pxe_enabled=False) @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) def test_power_off_failed(self, finished_mock): self.cli.node.set_power_state.side_effect = RuntimeError('boom') process._process_node(self.node_info, self.node, self.data) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') finished_mock.assert_called_once_with( mock.ANY, istate.Events.error, error='Failed to power off node %s, check its power ' 'management configuration: boom' % self.uuid ) @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) def test_power_off_enroll_state(self, finished_mock, post_hook_mock): self.node.provision_state = 'enroll' self.node_info.node = mock.Mock(return_value=self.node) process._process_node(self.node_info, self.node, self.data) self.assertTrue(post_hook_mock.called) self.assertTrue(self.cli.node.set_power_state.called) finished_mock.assert_called_once_with( self.node_info, istate.Events.finish) @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) def test_no_power_off(self, finished_mock): CONF.set_override('power_off', False, 'processing') process._process_node(self.node_info, self.node, self.data) self.assertFalse(self.cli.node.set_power_state.called) finished_mock.assert_called_once_with( self.node_info, istate.Events.finish) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) def test_store_data(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') swift_conn = swift_mock.return_value name = 'inspector_data-%s' % self.uuid expected = self.data process._process_node(self.node_info, self.node, self.data) swift_conn.create_object.assert_called_once_with(name, mock.ANY) self.assertEqual(expected, json.loads(swift_conn.create_object.call_args[0][1])) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) def test_store_data_no_logs(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') swift_conn = swift_mock.return_value name = 'inspector_data-%s' % self.uuid self.data['logs'] = 'something' process._process_node(self.node_info, self.node, self.data) swift_conn.create_object.assert_called_once_with(name, mock.ANY) self.assertNotIn('logs', json.loads(swift_conn.create_object.call_args[0][1])) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) def test_store_data_location(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') CONF.set_override('store_data_location', 'inspector_data_object', 'processing') swift_conn = swift_mock.return_value name = 'inspector_data-%s' % self.uuid patch = [{'path': '/extra/inspector_data_object', 'value': name, 'op': 'add'}] expected = self.data process._process_node(self.node_info, self.node, self.data) swift_conn.create_object.assert_called_once_with(name, mock.ANY) self.assertEqual(expected, json.loads(swift_conn.create_object.call_args[0][1])) self.cli.node.update.assert_any_call(self.uuid, patch) @mock.patch.object(process, '_reapply', autospec=True) @mock.patch.object(node_cache, 'get_node', autospec=True) class TestReapply(BaseTest): def prepare_mocks(func): @six.wraps(func) def wrapper(self, pop_mock, *args, **kw): pop_mock.return_value = node_cache.NodeInfo( uuid=self.node.uuid, started_at=self.started_at) pop_mock.return_value.finished = mock.Mock() pop_mock.return_value.acquire_lock = mock.Mock() return func(self, pop_mock, *args, **kw) return wrapper def setUp(self): super(TestReapply, self).setUp() CONF.set_override('store_data', 'swift', 'processing') @prepare_mocks def test_ok(self, pop_mock, reapply_mock): process.reapply(self.uuid) pop_mock.assert_called_once_with(self.uuid, locked=False) pop_mock.return_value.acquire_lock.assert_called_once_with( blocking=False ) reapply_mock.assert_called_once_with(pop_mock.return_value) @prepare_mocks def test_locking_failed(self, pop_mock, reapply_mock): pop_mock.return_value.acquire_lock.return_value = False self.assertRaisesRegex(utils.Error, 'Node locked, please, try again later', process.reapply, self.uuid) pop_mock.assert_called_once_with(self.uuid, locked=False) pop_mock.return_value.acquire_lock.assert_called_once_with( blocking=False ) @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update') @mock.patch.object(process.rules, 'apply', autospec=True) @mock.patch.object(process.swift, 'SwiftAPI', autospec=True) @mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True) @mock.patch.object(node_cache.NodeInfo, 'release_lock', autospec=True) class TestReapplyNode(BaseTest): def setUp(self): super(TestReapplyNode, self).setUp() CONF.set_override('processing_hooks', '$processing.default_processing_hooks,example', 'processing') CONF.set_override('store_data', 'swift', 'processing') self.data['macs'] = self.macs self.ports = self.all_ports self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=self.started_at, node=self.node) self.node_info.invalidate_cache = mock.Mock() self.cli.port.create.side_effect = self.ports self.cli.node.update.return_value = self.node self.cli.node.list_ports.return_value = [] self.node_info._state = istate.States.finished self.commit_fixture = self.useFixture( fixtures.MockPatchObject(node_cache.NodeInfo, 'commit', autospec=True)) db.Node(uuid=self.node_info.uuid, state=self.node_info._state, started_at=self.node_info.started_at, finished_at=self.node_info.finished_at, error=self.node_info.error).save(self.session) def call(self): process._reapply(self.node_info) # make sure node_info lock is released after a call self.node_info.release_lock.assert_called_once_with(self.node_info) def prepare_mocks(fn): @six.wraps(fn) def wrapper(self, release_mock, finished_mock, swift_mock, *args, **kw): finished_mock.side_effect = lambda *a, **kw: \ release_mock(self.node_info) swift_client_mock = swift_mock.return_value fn(self, finished_mock, swift_client_mock, *args, **kw) return wrapper @prepare_mocks def test_ok(self, finished_mock, swift_mock, apply_mock, post_hook_mock): swift_name = 'inspector_data-%s' % self.uuid swift_mock.get_object.return_value = json.dumps(self.data) self.call() self.commit_fixture.mock.assert_called_once_with(self.node_info) post_hook_mock.assert_called_once_with(mock.ANY, self.node_info) swift_mock.create_object.assert_called_once_with(swift_name, mock.ANY) swifted_data = json.loads(swift_mock.create_object.call_args[0][1]) self.node_info.invalidate_cache.assert_called_once_with() apply_mock.assert_called_once_with(self.node_info, swifted_data) # assert no power operations were performed self.assertFalse(self.cli.node.set_power_state.called) finished_mock.assert_called_once_with( self.node_info, istate.Events.finish) # asserting validate_interfaces was called self.assertEqual(self.pxe_interfaces, swifted_data['interfaces']) self.assertEqual([self.pxe_mac], swifted_data['macs']) # assert ports were created with whatever there was left # behind validate_interfaces self.cli.port.create.assert_called_once_with( node_uuid=self.uuid, address=swifted_data['macs'][0], extra={}, pxe_enabled=True ) @prepare_mocks def test_get_incomming_data_exception(self, finished_mock, swift_mock, apply_mock, post_hook_mock): exc = Exception('Oops') expected_error = ('Unexpected exception Exception while fetching ' 'unprocessed introspection data from Swift: Oops') swift_mock.get_object.side_effect = exc self.call() self.commit_fixture.mock.assert_called_once_with(self.node_info) self.assertFalse(swift_mock.create_object.called) self.assertFalse(apply_mock.called) self.assertFalse(post_hook_mock.called) finished_mock.assert_called_once_with( self.node_info, istate.Events.error, error=expected_error) @prepare_mocks def test_prehook_failure(self, finished_mock, swift_mock, apply_mock, post_hook_mock): CONF.set_override('processing_hooks', 'example', 'processing') plugins_base._HOOKS_MGR = None exc = Exception('Failed.') swift_mock.get_object.return_value = json.dumps(self.data) with mock.patch.object(example_plugin.ExampleProcessingHook, 'before_processing') as before_processing_mock: before_processing_mock.side_effect = exc self.call() exc_failure = ('Pre-processing failures detected reapplying ' 'introspection on stored data:\n' 'Unexpected exception %(exc_class)s during ' 'preprocessing in hook example: %(error)s' % {'exc_class': type(exc).__name__, 'error': exc}) finished_mock.assert_called_once_with( self.node_info, istate.Events.error, error=exc_failure) # assert _reapply ended having detected the failure self.assertFalse(swift_mock.create_object.called) self.assertFalse(apply_mock.called) self.assertFalse(post_hook_mock.called) @prepare_mocks def test_generic_exception_creating_ports(self, finished_mock, swift_mock, apply_mock, post_hook_mock): swift_mock.get_object.return_value = json.dumps(self.data) exc = Exception('Oops') self.cli.port.create.side_effect = exc self.call() finished_mock.assert_called_once_with( self.node_info, istate.Events.error, error=str(exc)) self.assertFalse(swift_mock.create_object.called) self.assertFalse(apply_mock.called) self.assertFalse(post_hook_mock.called) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_common_ironic.py0000666000175100017510000001710013241323457026613 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import unittest from ironicclient import client from ironicclient import exc as ironic_exc import mock from oslo_config import cfg from ironic_inspector.common import ironic as ir_utils from ironic_inspector.common import keystone from ironic_inspector.test import base from ironic_inspector import utils CONF = cfg.CONF class TestGetClientBase(object): def test_get_client_with_auth_token(self, mock_client, mock_load, mock_opts, mock_adapter): fake_token = 'token' fake_ironic_url = 'http://127.0.0.1:6385' mock_sess = mock.Mock() mock_adapter.return_value.get_endpoint.return_value = fake_ironic_url mock_load.return_value = mock_sess ir_utils.get_client(fake_token) mock_adapter.assert_called_once_with( 'ironic', region_name='somewhere', session=mock_sess) mock_adapter.return_value.get_endpoint.assert_called_once_with() args = {'token': fake_token, 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, fake_ironic_url, **args) def test_get_client_without_auth_token(self, mock_client, mock_load, mock_opts, mock_adapter): fake_ironic_url = 'http://127.0.0.1:6385' mock_adapter.return_value.get_endpoint.return_value = fake_ironic_url mock_sess = mock.Mock() mock_load.return_value = mock_sess ir_utils.get_client(None) args = {'session': mock_sess, 'os_ironic_api_version': ir_utils.DEFAULT_IRONIC_API_VERSION, 'max_retries': CONF.ironic.max_retries, 'retry_interval': CONF.ironic.retry_interval} mock_client.assert_called_once_with(1, fake_ironic_url, **args) @mock.patch.object(keystone, 'get_adapter') @mock.patch.object(keystone, 'register_auth_opts') @mock.patch.object(keystone, 'get_session') @mock.patch.object(client, 'Client') class TestGetClientAuth(TestGetClientBase, base.BaseTest): def setUp(self): super(TestGetClientAuth, self).setUp() ir_utils.reset_ironic_session() self.cfg.config(auth_strategy='keystone') self.cfg.config(os_region='somewhere', group='ironic') self.addCleanup(ir_utils.reset_ironic_session) @mock.patch.object(keystone, 'get_adapter') @mock.patch.object(keystone, 'register_auth_opts') @mock.patch.object(keystone, 'get_session') @mock.patch.object(client, 'Client') class TestGetClientNoAuth(TestGetClientBase, base.BaseTest): def setUp(self): super(TestGetClientNoAuth, self).setUp() ir_utils.reset_ironic_session() self.cfg.config(auth_strategy='noauth') self.cfg.config(os_region='somewhere', group='ironic') self.addCleanup(ir_utils.reset_ironic_session) class TestGetIpmiAddress(base.BaseTest): def test_ipv4_in_resolves(self): node = mock.Mock(spec=['driver_info', 'uuid'], driver_info={'ipmi_address': '192.168.1.1'}) ip = ir_utils.get_ipmi_address(node) self.assertEqual('192.168.1.1', ip) @mock.patch('socket.gethostbyname') def test_good_hostname_resolves(self, mock_socket): node = mock.Mock(spec=['driver_info', 'uuid'], driver_info={'ipmi_address': 'www.example.com'}) mock_socket.return_value = '192.168.1.1' ip = ir_utils.get_ipmi_address(node) mock_socket.assert_called_once_with('www.example.com') self.assertEqual('192.168.1.1', ip) @mock.patch('socket.gethostbyname') def test_bad_hostname_errors(self, mock_socket): node = mock.Mock(spec=['driver_info', 'uuid'], driver_info={'ipmi_address': 'meow'}, uuid='uuid1') mock_socket.side_effect = socket.gaierror('Boom') self.assertRaises(utils.Error, ir_utils.get_ipmi_address, node) def test_additional_fields(self): node = mock.Mock(spec=['driver_info', 'uuid'], driver_info={'foo': '192.168.1.1'}) self.assertIsNone(ir_utils.get_ipmi_address(node)) self.cfg.config(ipmi_address_fields=['foo', 'bar', 'baz']) ip = ir_utils.get_ipmi_address(node) self.assertEqual('192.168.1.1', ip) def test_ipmi_bridging_enabled(self): node = mock.Mock(spec=['driver_info', 'uuid'], driver_info={'ipmi_address': 'www.example.com', 'ipmi_bridging': 'single'}) self.assertIsNone(ir_utils.get_ipmi_address(node)) def test_loopback_address(self): node = mock.Mock(spec=['driver_info', 'uuid'], driver_info={'ipmi_address': '127.0.0.2'}) ip = ir_utils.get_ipmi_address(node) self.assertIsNone(ip) class TestCapabilities(unittest.TestCase): def test_capabilities_to_dict(self): capabilities = 'cat:meow,dog:wuff' expected_output = {'cat': 'meow', 'dog': 'wuff'} output = ir_utils.capabilities_to_dict(capabilities) self.assertEqual(expected_output, output) def test_dict_to_capabilities(self): capabilities_dict = {'cat': 'meow', 'dog': 'wuff'} output = ir_utils.dict_to_capabilities(capabilities_dict) self.assertIn('cat:meow', output) self.assertIn('dog:wuff', output) class TestCallWithRetries(unittest.TestCase): def setUp(self): super(TestCallWithRetries, self).setUp() self.call = mock.Mock(spec=[]) def test_no_retries_on_success(self): result = ir_utils.call_with_retries(self.call, 'meow', answer=42) self.assertEqual(result, self.call.return_value) self.call.assert_called_once_with('meow', answer=42) def test_no_retries_on_python_error(self): self.call.side_effect = RuntimeError('boom') self.assertRaisesRegexp(RuntimeError, 'boom', ir_utils.call_with_retries, self.call, 'meow', answer=42) self.call.assert_called_once_with('meow', answer=42) @mock.patch('time.sleep', lambda _x: None) def test_retries_on_ironicclient_error(self): self.call.side_effect = [ ironic_exc.ClientException('boom') ] * 3 + [mock.sentinel.result] result = ir_utils.call_with_retries(self.call, 'meow', answer=42) self.assertEqual(result, mock.sentinel.result) self.call.assert_called_with('meow', answer=42) self.assertEqual(4, self.call.call_count) @mock.patch('time.sleep', lambda _x: None) def test_retries_on_ironicclient_error_with_failure(self): self.call.side_effect = ironic_exc.ClientException('boom') self.assertRaisesRegexp(ironic_exc.ClientException, 'boom', ir_utils.call_with_retries, self.call, 'meow', answer=42) self.call.assert_called_with('meow', answer=42) self.assertEqual(5, self.call.call_count) ironic-inspector-7.2.0/ironic_inspector/test/unit/policy_fixture.py0000666000175100017510000000253113241323457025770 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import fixtures from oslo_config import cfg from oslo_policy import opts as policy_opts from ironic_inspector import policy as inspector_policy CONF = cfg.CONF policy_data = """{ } """ class PolicyFixture(fixtures.Fixture): def setUp(self): super(PolicyFixture, self).setUp() self.policy_dir = self.useFixture(fixtures.TempDir()) self.policy_file_name = os.path.join(self.policy_dir.path, 'policy.json') with open(self.policy_file_name, 'w') as policy_file: policy_file.write(policy_data) policy_opts.set_defaults(CONF) CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy') inspector_policy._ENFORCER = None self.addCleanup(inspector_policy.get_enforcer().clear) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_main.py0000666000175100017510000006157113241323457024717 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import json import unittest import mock from oslo_utils import uuidutils from ironic_inspector.common import ironic as ir_utils import ironic_inspector.conf from ironic_inspector.conf import opts as conf_opts from ironic_inspector import introspect from ironic_inspector import introspection_state as istate from ironic_inspector import main from ironic_inspector import node_cache from ironic_inspector.plugins import base as plugins_base from ironic_inspector.plugins import example as example_plugin from ironic_inspector import process from ironic_inspector import rules from ironic_inspector.test import base as test_base from ironic_inspector import utils CONF = ironic_inspector.conf.CONF def _get_error(res): return json.loads(res.data.decode('utf-8'))['error']['message'] class BaseAPITest(test_base.BaseTest): def setUp(self): super(BaseAPITest, self).setUp() main.app.config['TESTING'] = True self.app = main.app.test_client() CONF.set_override('auth_strategy', 'noauth') self.uuid = uuidutils.generate_uuid() class TestApiIntrospect(BaseAPITest): @mock.patch.object(introspect, 'introspect', autospec=True) def test_introspect_no_authentication(self, introspect_mock): CONF.set_override('auth_strategy', 'noauth') res = self.app.post('/v1/introspection/%s' % self.uuid) self.assertEqual(202, res.status_code) introspect_mock.assert_called_once_with(self.uuid, token=None) @mock.patch.object(introspect, 'introspect', autospec=True) def test_intospect_failed(self, introspect_mock): introspect_mock.side_effect = utils.Error("boom") res = self.app.post('/v1/introspection/%s' % self.uuid) self.assertEqual(400, res.status_code) self.assertEqual( 'boom', json.loads(res.data.decode('utf-8'))['error']['message']) introspect_mock.assert_called_once_with( self.uuid, token=None) @mock.patch.object(utils, 'check_auth', autospec=True) @mock.patch.object(introspect, 'introspect', autospec=True) def test_introspect_failed_authentication(self, introspect_mock, auth_mock): CONF.set_override('auth_strategy', 'keystone') auth_mock.side_effect = utils.Error('Boom', code=403) res = self.app.post('/v1/introspection/%s' % self.uuid, headers={'X-Auth-Token': 'token'}) self.assertEqual(403, res.status_code) self.assertFalse(introspect_mock.called) @mock.patch.object(process, 'process', autospec=True) class TestApiContinue(BaseAPITest): def test_continue(self, process_mock): # should be ignored CONF.set_override('auth_strategy', 'keystone') process_mock.return_value = {'result': 42} res = self.app.post('/v1/continue', data='{"foo": "bar"}') self.assertEqual(200, res.status_code) process_mock.assert_called_once_with({"foo": "bar"}) self.assertEqual({"result": 42}, json.loads(res.data.decode())) def test_continue_failed(self, process_mock): process_mock.side_effect = utils.Error("boom") res = self.app.post('/v1/continue', data='{"foo": "bar"}') self.assertEqual(400, res.status_code) process_mock.assert_called_once_with({"foo": "bar"}) self.assertEqual('boom', _get_error(res)) def test_continue_wrong_type(self, process_mock): res = self.app.post('/v1/continue', data='42') self.assertEqual(400, res.status_code) self.assertEqual('Invalid data: expected a JSON object, got int', _get_error(res)) self.assertFalse(process_mock.called) @mock.patch.object(introspect, 'abort', autospec=True) class TestApiAbort(BaseAPITest): def test_ok(self, abort_mock): abort_mock.return_value = '', 202 res = self.app.post('/v1/introspection/%s/abort' % self.uuid, headers={'X-Auth-Token': 'token'}) abort_mock.assert_called_once_with(self.uuid, token='token') self.assertEqual(202, res.status_code) self.assertEqual(b'', res.data) def test_no_authentication(self, abort_mock): abort_mock.return_value = b'', 202 res = self.app.post('/v1/introspection/%s/abort' % self.uuid) abort_mock.assert_called_once_with(self.uuid, token=None) self.assertEqual(202, res.status_code) self.assertEqual(b'', res.data) def test_node_not_found(self, abort_mock): exc = utils.Error("Not Found.", code=404) abort_mock.side_effect = exc res = self.app.post('/v1/introspection/%s/abort' % self.uuid) abort_mock.assert_called_once_with(self.uuid, token=None) self.assertEqual(404, res.status_code) data = json.loads(str(res.data.decode())) self.assertEqual(str(exc), data['error']['message']) def test_abort_failed(self, abort_mock): exc = utils.Error("Locked.", code=409) abort_mock.side_effect = exc res = self.app.post('/v1/introspection/%s/abort' % self.uuid) abort_mock.assert_called_once_with(self.uuid, token=None) self.assertEqual(409, res.status_code) data = json.loads(res.data.decode()) self.assertEqual(str(exc), data['error']['message']) class GetStatusAPIBaseTest(BaseAPITest): def setUp(self): super(GetStatusAPIBaseTest, self).setUp() self.uuid2 = uuidutils.generate_uuid() self.finished_node = node_cache.NodeInfo( uuid=self.uuid, started_at=datetime.datetime(1, 1, 1), finished_at=datetime.datetime(1, 1, 2), error='boom', state=istate.States.error) self.finished_node.links = [ {u'href': u'http://localhost/v1/introspection/%s' % self.finished_node.uuid, u'rel': u'self'}, ] self.finished_node.status = { 'finished': True, 'state': self.finished_node._state, 'started_at': self.finished_node.started_at.isoformat(), 'finished_at': self.finished_node.finished_at.isoformat(), 'error': self.finished_node.error, 'uuid': self.finished_node.uuid, 'links': self.finished_node.links } self.unfinished_node = node_cache.NodeInfo( uuid=self.uuid2, started_at=datetime.datetime(1, 1, 1), state=istate.States.processing) self.unfinished_node.links = [ {u'href': u'http://localhost/v1/introspection/%s' % self.unfinished_node.uuid, u'rel': u'self'} ] finished_at = (self.unfinished_node.finished_at.isoformat() if self.unfinished_node.finished_at else None) self.unfinished_node.status = { 'finished': False, 'state': self.unfinished_node._state, 'started_at': self.unfinished_node.started_at.isoformat(), 'finished_at': finished_at, 'error': None, 'uuid': self.unfinished_node.uuid, 'links': self.unfinished_node.links } @mock.patch.object(node_cache, 'get_node', autospec=True) class TestApiGetStatus(GetStatusAPIBaseTest): def test_get_introspection_in_progress(self, get_mock): get_mock.return_value = self.unfinished_node res = self.app.get('/v1/introspection/%s' % self.uuid) self.assertEqual(200, res.status_code) self.assertEqual(self.unfinished_node.status, json.loads(res.data.decode('utf-8'))) def test_get_introspection_finished(self, get_mock): get_mock.return_value = self.finished_node res = self.app.get('/v1/introspection/%s' % self.uuid) self.assertEqual(200, res.status_code) self.assertEqual(self.finished_node.status, json.loads(res.data.decode('utf-8'))) @mock.patch.object(node_cache, 'get_node_list', autospec=True) class TestApiListStatus(GetStatusAPIBaseTest): def test_list_introspection(self, list_mock): list_mock.return_value = [self.finished_node, self.unfinished_node] res = self.app.get('/v1/introspection') self.assertEqual(200, res.status_code) statuses = json.loads(res.data.decode('utf-8')).get('introspection') self.assertEqual([self.finished_node.status, self.unfinished_node.status], statuses) list_mock.assert_called_once_with(marker=None, limit=CONF.api_max_limit) def test_list_introspection_limit(self, list_mock): res = self.app.get('/v1/introspection?limit=1000') self.assertEqual(200, res.status_code) list_mock.assert_called_once_with(marker=None, limit=1000) def test_list_introspection_makrer(self, list_mock): res = self.app.get('/v1/introspection?marker=%s' % self.finished_node.uuid) self.assertEqual(200, res.status_code) list_mock.assert_called_once_with(marker=self.finished_node.uuid, limit=CONF.api_max_limit) class TestApiGetData(BaseAPITest): @mock.patch.object(main.swift, 'SwiftAPI', autospec=True) def test_get_introspection_data(self, swift_mock): CONF.set_override('store_data', 'swift', 'processing') data = { 'ipmi_address': '1.2.3.4', 'cpus': 2, 'cpu_arch': 'x86_64', 'memory_mb': 1024, 'local_gb': 20, 'interfaces': { 'em1': {'mac': '11:22:33:44:55:66', 'ip': '1.2.0.1'}, } } swift_conn = swift_mock.return_value swift_conn.get_object.return_value = json.dumps(data) res = self.app.get('/v1/introspection/%s/data' % self.uuid) name = 'inspector_data-%s' % self.uuid swift_conn.get_object.assert_called_once_with(name) self.assertEqual(200, res.status_code) self.assertEqual(data, json.loads(res.data.decode('utf-8'))) @mock.patch.object(main.swift, 'SwiftAPI', autospec=True) def test_introspection_data_not_stored(self, swift_mock): CONF.set_override('store_data', 'none', 'processing') swift_conn = swift_mock.return_value res = self.app.get('/v1/introspection/%s/data' % self.uuid) self.assertFalse(swift_conn.get_object.called) self.assertEqual(404, res.status_code) @mock.patch.object(ir_utils, 'get_node', autospec=True) @mock.patch.object(main.swift, 'SwiftAPI', autospec=True) def test_with_name(self, swift_mock, get_mock): get_mock.return_value = mock.Mock(uuid=self.uuid) CONF.set_override('store_data', 'swift', 'processing') data = { 'ipmi_address': '1.2.3.4', 'cpus': 2, 'cpu_arch': 'x86_64', 'memory_mb': 1024, 'local_gb': 20, 'interfaces': { 'em1': {'mac': '11:22:33:44:55:66', 'ip': '1.2.0.1'}, } } swift_conn = swift_mock.return_value swift_conn.get_object.return_value = json.dumps(data) res = self.app.get('/v1/introspection/name1/data') name = 'inspector_data-%s' % self.uuid swift_conn.get_object.assert_called_once_with(name) self.assertEqual(200, res.status_code) self.assertEqual(data, json.loads(res.data.decode('utf-8'))) get_mock.assert_called_once_with('name1', fields=['uuid']) @mock.patch.object(process, 'reapply', autospec=True) class TestApiReapply(BaseAPITest): def setUp(self): super(TestApiReapply, self).setUp() CONF.set_override('store_data', 'swift', 'processing') def test_ok(self, reapply_mock): self.app.post('/v1/introspection/%s/data/unprocessed' % self.uuid) reapply_mock.assert_called_once_with(self.uuid) def test_user_data(self, reapply_mock): res = self.app.post('/v1/introspection/%s/data/unprocessed' % self.uuid, data='some data') self.assertEqual(400, res.status_code) message = json.loads(res.data.decode())['error']['message'] self.assertEqual('User data processing is not supported yet', message) self.assertFalse(reapply_mock.called) def test_swift_disabled(self, reapply_mock): CONF.set_override('store_data', 'none', 'processing') res = self.app.post('/v1/introspection/%s/data/unprocessed' % self.uuid) self.assertEqual(400, res.status_code) message = json.loads(res.data.decode())['error']['message'] self.assertEqual('Inspector is not configured to store ' 'data. Set the [processing] store_data ' 'configuration option to change this.', message) self.assertFalse(reapply_mock.called) def test_node_locked(self, reapply_mock): exc = utils.Error('Locked.', code=409) reapply_mock.side_effect = exc res = self.app.post('/v1/introspection/%s/data/unprocessed' % self.uuid) self.assertEqual(409, res.status_code) message = json.loads(res.data.decode())['error']['message'] self.assertEqual(str(exc), message) reapply_mock.assert_called_once_with(self.uuid) def test_node_not_found(self, reapply_mock): exc = utils.Error('Not found.', code=404) reapply_mock.side_effect = exc res = self.app.post('/v1/introspection/%s/data/unprocessed' % self.uuid) self.assertEqual(404, res.status_code) message = json.loads(res.data.decode())['error']['message'] self.assertEqual(str(exc), message) reapply_mock.assert_called_once_with(self.uuid) def test_generic_error(self, reapply_mock): exc = utils.Error('Oops', code=400) reapply_mock.side_effect = exc res = self.app.post('/v1/introspection/%s/data/unprocessed' % self.uuid) self.assertEqual(400, res.status_code) message = json.loads(res.data.decode())['error']['message'] self.assertEqual(str(exc), message) reapply_mock.assert_called_once_with(self.uuid) class TestApiRules(BaseAPITest): @mock.patch.object(rules, 'get_all') def test_get_all(self, get_all_mock): get_all_mock.return_value = [ mock.Mock(spec=rules.IntrospectionRule, **{'as_dict.return_value': {'uuid': 'foo'}}), mock.Mock(spec=rules.IntrospectionRule, **{'as_dict.return_value': {'uuid': 'bar'}}), ] res = self.app.get('/v1/rules') self.assertEqual(200, res.status_code) self.assertEqual( { 'rules': [{'uuid': 'foo', 'links': [ {'href': '/v1/rules/foo', 'rel': 'self'} ]}, {'uuid': 'bar', 'links': [ {'href': '/v1/rules/bar', 'rel': 'self'} ]}] }, json.loads(res.data.decode('utf-8'))) get_all_mock.assert_called_once_with() for m in get_all_mock.return_value: m.as_dict.assert_called_with(short=True) @mock.patch.object(rules, 'delete_all') def test_delete_all(self, delete_all_mock): res = self.app.delete('/v1/rules') self.assertEqual(204, res.status_code) delete_all_mock.assert_called_once_with() @mock.patch.object(rules, 'create', autospec=True) def test_create(self, create_mock): data = {'uuid': self.uuid, 'conditions': 'cond', 'actions': 'act'} exp = data.copy() exp['description'] = None create_mock.return_value = mock.Mock(spec=rules.IntrospectionRule, **{'as_dict.return_value': exp}) res = self.app.post('/v1/rules', data=json.dumps(data)) self.assertEqual(201, res.status_code) create_mock.assert_called_once_with(conditions_json='cond', actions_json='act', uuid=self.uuid, description=None) self.assertEqual(exp, json.loads(res.data.decode('utf-8'))) @mock.patch.object(rules, 'create', autospec=True) def test_create_api_less_1_6(self, create_mock): data = {'uuid': self.uuid, 'conditions': 'cond', 'actions': 'act'} exp = data.copy() exp['description'] = None create_mock.return_value = mock.Mock(spec=rules.IntrospectionRule, **{'as_dict.return_value': exp}) headers = {conf_opts.VERSION_HEADER: main._format_version((1, 5))} res = self.app.post('/v1/rules', data=json.dumps(data), headers=headers) self.assertEqual(200, res.status_code) create_mock.assert_called_once_with(conditions_json='cond', actions_json='act', uuid=self.uuid, description=None) self.assertEqual(exp, json.loads(res.data.decode('utf-8'))) @mock.patch.object(rules, 'create', autospec=True) def test_create_bad_uuid(self, create_mock): data = {'uuid': 'foo', 'conditions': 'cond', 'actions': 'act'} res = self.app.post('/v1/rules', data=json.dumps(data)) self.assertEqual(400, res.status_code) @mock.patch.object(rules, 'get') def test_get_one(self, get_mock): get_mock.return_value = mock.Mock(spec=rules.IntrospectionRule, **{'as_dict.return_value': {'uuid': 'foo'}}) res = self.app.get('/v1/rules/' + self.uuid) self.assertEqual(200, res.status_code) self.assertEqual({'uuid': 'foo', 'links': [ {'href': '/v1/rules/foo', 'rel': 'self'} ]}, json.loads(res.data.decode('utf-8'))) get_mock.assert_called_once_with(self.uuid) get_mock.return_value.as_dict.assert_called_once_with(short=False) @mock.patch.object(rules, 'delete') def test_delete_one(self, delete_mock): res = self.app.delete('/v1/rules/' + self.uuid) self.assertEqual(204, res.status_code) delete_mock.assert_called_once_with(self.uuid) class TestApiMisc(BaseAPITest): @mock.patch.object(node_cache, 'get_node', autospec=True) def test_404_expected(self, get_mock): get_mock.side_effect = utils.Error('boom', code=404) res = self.app.get('/v1/introspection/%s' % self.uuid) self.assertEqual(404, res.status_code) self.assertEqual('boom', _get_error(res)) def test_404_unexpected(self): res = self.app.get('/v42') self.assertEqual(404, res.status_code) self.assertIn('not found', _get_error(res).lower()) @mock.patch.object(node_cache, 'get_node', autospec=True) def test_500_with_debug(self, get_mock): CONF.set_override('debug', True) get_mock.side_effect = RuntimeError('boom') res = self.app.get('/v1/introspection/%s' % self.uuid) self.assertEqual(500, res.status_code) self.assertEqual('Internal server error (RuntimeError): boom', _get_error(res)) @mock.patch.object(node_cache, 'get_node', autospec=True) def test_500_without_debug(self, get_mock): CONF.set_override('debug', False) get_mock.side_effect = RuntimeError('boom') res = self.app.get('/v1/introspection/%s' % self.uuid) self.assertEqual(500, res.status_code) self.assertEqual('Internal server error', _get_error(res)) class TestApiVersions(BaseAPITest): def _check_version_present(self, res): self.assertEqual('%d.%d' % main.MINIMUM_API_VERSION, res.headers.get(conf_opts.MIN_VERSION_HEADER)) self.assertEqual('%d.%d' % main.CURRENT_API_VERSION, res.headers.get(conf_opts.MAX_VERSION_HEADER)) def test_root_endpoint(self): res = self.app.get("/") self.assertEqual(200, res.status_code) self._check_version_present(res) data = res.data.decode('utf-8') json_data = json.loads(data) expected = {"versions": [{ "status": "CURRENT", "id": '%s.%s' % main.CURRENT_API_VERSION, "links": [{ "rel": "self", "href": ("http://localhost/v%s" % main.CURRENT_API_VERSION[0]) }] }]} self.assertEqual(expected, json_data) @mock.patch.object(main.app.url_map, "iter_rules", autospec=True) def test_version_endpoint(self, mock_rules): mock_rules.return_value = ["/v1/endpoint1", "/v1/endpoint2/", "/v1/endpoint1/", "/v2/endpoint1", "/v1/endpoint3", "/v1/endpoint2//subpoint"] endpoint = "/v1" res = self.app.get(endpoint) self.assertEqual(200, res.status_code) self._check_version_present(res) json_data = json.loads(res.data.decode('utf-8')) expected = {u'resources': [ { u'name': u'endpoint1', u'links': [{ u'rel': u'self', u'href': u'http://localhost/v1/endpoint1'}] }, { u'name': u'endpoint3', u'links': [{ u'rel': u'self', u'href': u'http://localhost/v1/endpoint3'}] }, ]} self.assertEqual(expected, json_data) def test_version_endpoint_invalid(self): endpoint = "/v-1" res = self.app.get(endpoint) self.assertEqual(404, res.status_code) def test_404_unexpected(self): # API version on unknown pages self._check_version_present(self.app.get('/v1/foobar')) @mock.patch.object(node_cache, 'get_node', autospec=True) def test_usual_requests(self, get_mock): get_mock.return_value = node_cache.NodeInfo(uuid=self.uuid, started_at=42.0) # Successful self._check_version_present( self.app.post('/v1/introspection/%s' % self.uuid)) # With error self._check_version_present( self.app.post('/v1/introspection/foobar')) def test_request_correct_version(self): headers = {conf_opts.VERSION_HEADER: main._format_version(main.CURRENT_API_VERSION)} self._check_version_present(self.app.get('/', headers=headers)) def test_request_unsupported_version(self): bad_version = (main.CURRENT_API_VERSION[0], main.CURRENT_API_VERSION[1] + 1) headers = {conf_opts.VERSION_HEADER: main._format_version(bad_version)} res = self.app.get('/', headers=headers) self._check_version_present(res) self.assertEqual(406, res.status_code) error = _get_error(res) self.assertIn('%d.%d' % bad_version, error) self.assertIn('%d.%d' % main.MINIMUM_API_VERSION, error) self.assertIn('%d.%d' % main.CURRENT_API_VERSION, error) class TestPlugins(unittest.TestCase): @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_processing', autospec=True) @mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update', autospec=True) def test_hook(self, mock_post, mock_pre): plugins_base._HOOKS_MGR = None CONF.set_override('processing_hooks', 'example', 'processing') mgr = plugins_base.processing_hooks_manager() mgr.map_method('before_processing', 'introspection_data') mock_pre.assert_called_once_with(mock.ANY, 'introspection_data') mgr.map_method('before_update', 'node_info', {}) mock_post.assert_called_once_with(mock.ANY, 'node_info', {}) def test_manager_is_cached(self): self.assertIs(plugins_base.processing_hooks_manager(), plugins_base.processing_hooks_manager()) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_keystone.py0000666000175100017510000000476713241323457025640 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading as kaloading import mock from oslo_config import cfg from ironic_inspector.common import keystone from ironic_inspector.test import base TESTGROUP = 'keystone_test' class KeystoneTest(base.BaseTest): def setUp(self): super(KeystoneTest, self).setUp() self.cfg.conf.register_group(cfg.OptGroup(TESTGROUP)) def test_register_auth_opts(self): keystone.register_auth_opts(TESTGROUP, 'fake-service') auth_opts = ['auth_type', 'auth_section'] sess_opts = ['certfile', 'keyfile', 'insecure', 'timeout', 'cafile'] for o in auth_opts + sess_opts: self.assertIn(o, self.cfg.conf[TESTGROUP]) self.assertEqual('password', self.cfg.conf[TESTGROUP]['auth_type']) self.assertEqual('fake-service', self.cfg.conf[TESTGROUP]['service_type']) @mock.patch.object(kaloading, 'load_auth_from_conf_options', autospec=True) def test_get_session(self, auth_mock): keystone.register_auth_opts(TESTGROUP, 'fake-service') self.cfg.config(group=TESTGROUP, cafile='/path/to/ca/file') auth1 = mock.Mock() auth_mock.return_value = auth1 sess = keystone.get_session(TESTGROUP) self.assertEqual('/path/to/ca/file', sess.verify) self.assertEqual(auth1, sess.auth) def test_add_auth_options(self): opts = keystone.add_auth_options([], 'fake-service') # check that there is no duplicates names = {o.dest for o in opts} self.assertEqual(len(names), len(opts)) # NOTE(pas-ha) checking for most standard auth and session ones only expected = {'timeout', 'insecure', 'cafile', 'certfile', 'keyfile', 'auth_type', 'auth_url', 'username', 'password', 'tenant_name', 'project_name', 'trust_id', 'domain_id', 'user_domain_id', 'project_domain_id'} self.assertTrue(expected.issubset(names)) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_lldp_basic.py0000666000175100017510000003214313241323457027621 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from ironic_inspector.common import lldp_parsers as nv from ironic_inspector.plugins import lldp_basic from ironic_inspector.test import base as test_base class TestLLDPBasicProcessingHook(test_base.NodeTest): hook = lldp_basic.LLDPBasicProcessingHook() def setUp(self): super(TestLLDPBasicProcessingHook, self).setUp() self.data = { 'inventory': { 'interfaces': [{ 'name': 'em1', }], 'cpu': 1, 'disks': 1, 'memory': 1 }, 'all_interfaces': { 'em1': {'mac': self.macs[0], 'ip': self.ips[0]} } } self.expected = {"em1": {"ip": self.ips[0], "mac": self.macs[0]}} def test_all_valid_data(self): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [1, "04112233aabbcc"], # ChassisId [2, "07373334"], # PortId [3, "003c"], # TTL [4, "686f737430322e6c61622e656e6720706f7274203320" "28426f6e6429"], # PortDesc [5, "737730312d646973742d31622d623132"], # SysName [6, "4e6574776f726b732c20496e632e20353530302c2076657273696f" "6e203132204275696c6420646174653a20323031342d30332d31332030" "383a33383a33302055544320"], # SysDesc [7, "00140014"], # SysCapabilities [8, "0501c000020f020000000000"], # MgmtAddress [8, "110220010db885a3000000008a2e03707334020000000000"], [8, "0706aa11bb22cc3302000003e900"], # MgmtAddress [127, "00120f01036c110010"], # dot3 MacPhyConfigStatus [127, "00120f030300000002"], # dot3 LinkAggregation [127, "00120f0405ea"], # dot3 MTU [127, "0080c2010066"], # dot1 PortVlan [127, "0080c20206000a"], # dot1 PortProtocolVlanId [127, "0080c202060014"], # dot1 PortProtocolVlanId [127, "0080c204080026424203000000"], # dot1 ProtocolIdentity [127, "0080c203006507766c616e313031"], # dot1 VlanName [127, "0080c203006607766c616e313032"], # dot1 VlanName [127, "0080c203006807766c616e313034"], # dot1 VlanName [127, "0080c2060058"], # dot1 MgmtVID [0, ""]] }] expected = { nv.LLDP_CAP_ENABLED_NM: ['Bridge', 'Router'], nv.LLDP_CAP_SUPPORT_NM: ['Bridge', 'Router'], nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:cc", nv.LLDP_MGMT_ADDRESSES_NM: ['192.0.2.15', '2001:db8:85a3::8a2e:370:7334', 'aa:11:bb:22:cc:33'], nv.LLDP_PORT_LINK_AUTONEG_ENABLED_NM: True, nv.LLDP_PORT_LINK_AUTONEG_ENABLED_NM: True, nv.LLDP_PORT_DESC_NM: 'host02.lab.eng port 3 (Bond)', nv.LLDP_PORT_ID_NM: '734', nv.LLDP_PORT_LINK_AGG_ENABLED_NM: True, nv.LLDP_PORT_LINK_AGG_ID_NM: 2, nv.LLDP_PORT_LINK_AGG_SUPPORT_NM: True, nv.LLDP_PORT_MGMT_VLANID_NM: 88, nv.LLDP_PORT_MAU_TYPE_NM: '100BASE-TX full duplex', nv.LLDP_MTU_NM: 1514, nv.LLDP_PORT_CAPABILITIES_NM: ['1000BASE-T fdx', '100BASE-TX fdx', '100BASE-TX hdx', '10BASE-T fdx', '10BASE-T hdx', 'Asym and Sym PAUSE fdx'], nv.LLDP_PORT_PROT_VLAN_ENABLED_NM: True, nv.LLDP_PORT_PROT_VLANIDS_NM: [10, 20], nv.LLDP_PORT_PROT_VLAN_SUPPORT_NM: True, nv.LLDP_PORT_VLANID_NM: 102, nv.LLDP_PORT_VLANS_NM: [{'id': 101, 'name': 'vlan101'}, {'id': 102, 'name': 'vlan102'}, {'id': 104, "name": 'vlan104'}], nv.LLDP_PROTOCOL_IDENTITIES_NM: ['0026424203000000'], nv.LLDP_SYS_DESC_NM: 'Networks, Inc. 5500, version 12' ' Build date: 2014-03-13 08:38:30 UTC ', nv.LLDP_SYS_NAME_NM: 'sw01-dist-1b-b12' } self.hook.before_update(self.data, self.node_info) actual_all_int = self.data['all_interfaces'] actual = actual_all_int['em1']['lldp_processed'] for name, value in expected.items(): if name is nv.LLDP_PORT_VLANS_NM: for d1, d2 in zip(expected[name], actual[name]): for key, value in d1.items(): self.assertEqual(d2[key], value) else: self.assertEqual(actual[name], expected[name]) def test_multiple_interfaces(self): self.data = { 'inventory': { 'interfaces': [ {'name': 'em1', 'lldp': [ [1, "04112233aabbcc"], [2, "07373334"], [3, "003c"]]}, {'name': 'em2', 'lldp': [ [1, "04112233aabbdd"], [2, "07373838"], [3, "003c"]]}, {'name': 'em3', 'lldp': [ [1, "04112233aabbee"], [2, "07373939"], [3, "003c"]]}], 'cpu': 1, 'disks': 1, 'memory': 1 }, 'all_interfaces': { 'em1': {'mac': self.macs[0], 'ip': self.ips[0]}, 'em2': {'mac': self.macs[0], 'ip': self.ips[0]}, 'em3': {'mac': self.macs[0], 'ip': self.ips[0]} } } expected = {"em1": {"ip": self.ips[0], "mac": self.macs[0], "lldp_processed": { nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:cc", nv.LLDP_PORT_ID_NM: "734"}}, "em2": {"ip": self.ips[0], "mac": self.macs[0], "lldp_processed": { nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:dd", nv.LLDP_PORT_ID_NM: "788"}}, "em3": {"ip": self.ips[0], "mac": self.macs[0], "lldp_processed": { nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:ee", nv.LLDP_PORT_ID_NM: "799"}}} self.hook.before_update(self.data, self.node_info) self.assertEqual(expected, self.data['all_interfaces']) def test_chassis_ids(self): # Test IPv4 address self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [1, "0501c000020f"], ]}] self.expected['em1']['lldp_processed'] = { nv.LLDP_CHASSIS_ID_NM: "192.0.2.15" } self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) # Test name self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [1, "0773773031"], ]}] self.expected['em1']['lldp_processed'] = { nv.LLDP_CHASSIS_ID_NM: "sw01" } self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) def test_duplicate_tlvs(self): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [1, "04112233aabbcc"], # ChassisId [1, "04332211ddeeff"], # ChassisId [1, "04556677aabbcc"], # ChassisId [2, "07373334"], # PortId [2, "07373435"], # PortId [2, "07373536"] # PortId ]}] # Only the first unique TLV is processed self.expected['em1']['lldp_processed'] = { nv.LLDP_CHASSIS_ID_NM: "11:22:33:aa:bb:cc", nv.LLDP_PORT_ID_NM: "734" } self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) def test_unhandled_tlvs(self): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [10, "04112233aabbcc"], [12, "07373334"], [128, "00120f080300010000"]]}] # nothing should be written to lldp_processed self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) def test_unhandled_oui(self): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [127, "00906901425030323134323530393236"], [127, "23ac0074657374"], [127, "00120e010300010000"]]}] # nothing should be written to lldp_processed self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) @mock.patch('ironic_inspector.common.lldp_parsers.LOG') def test_null_strings(self, mock_log): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [1, "04"], [4, ""], # PortDesc [5, ""], # SysName [6, ""], # SysDesc [127, "0080c203006507"] # dot1 VlanName ]}] self.expected['em1']['lldp_processed'] = { nv.LLDP_PORT_DESC_NM: '', nv.LLDP_SYS_DESC_NM: '', nv.LLDP_SYS_NAME_NM: '' } self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) self.assertEqual(2, mock_log.warning.call_count) @mock.patch('ironic_inspector.common.lldp_parsers.LOG') def test_truncated_int(self, mock_log): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [127, "00120f04"], # dot3 MTU [127, "0080c201"], # dot1 PortVlan [127, "0080c206"], # dot1 MgmtVID ]}] # nothing should be written to lldp_processed self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) self.assertEqual(3, mock_log.warning.call_count) @mock.patch('ironic_inspector.common.lldp_parsers.LOG') def test_invalid_ip(self, mock_log): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [8, "0501"], # truncated [8, "0507c000020f020000000000"]] # invalid id }] self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) self.assertEqual(2, mock_log.warning.call_count) @mock.patch('ironic_inspector.common.lldp_parsers.LOG') def test_truncated_mac(self, mock_log): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [8, "0506"]] }] self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) self.assertEqual(1, mock_log.warning.call_count) @mock.patch('ironic_inspector.common.lldp_parsers.LOG') def test_bad_value_macphy(self, mock_log): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [127, "00120f01036c11FFFF"], # invalid mau type [127, "00120f01036c11"], # truncated [127, "00120f01036c"] # truncated ]}] self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) self.assertEqual(3, mock_log.warning.call_count) @mock.patch('ironic_inspector.common.lldp_parsers.LOG') def test_bad_value_linkagg(self, mock_log): self.data['inventory']['interfaces'] = [{ 'name': 'em1', 'lldp': [ [127, "00120f0303"], # dot3 LinkAggregation [127, "00120f03"] # truncated ]}] self.hook.before_update(self.data, self.node_info) self.assertEqual(self.expected, self.data['all_interfaces']) self.assertEqual(2, mock_log.warning.call_count) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_utils.py0000666000175100017510000001414613241323457025127 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from ironicclient.v1 import node from keystonemiddleware import auth_token import mock from oslo_config import cfg from ironic_inspector.common import context from ironic_inspector import node_cache from ironic_inspector.test import base from ironic_inspector import utils CONF = cfg.CONF class TestCheckAuth(base.BaseTest): def setUp(self): super(TestCheckAuth, self).setUp() self.cfg.config(auth_strategy='keystone') @mock.patch.object(auth_token, 'AuthProtocol') def test_middleware(self, mock_auth): self.cfg.config(group='keystone_authtoken', admin_user='admin', admin_tenant_name='admin', admin_password='password', auth_uri='http://127.0.0.1:5000', identity_uri='http://127.0.0.1:35357') app = mock.Mock(wsgi_app=mock.sentinel.app) utils.add_auth_middleware(app) call_args = mock_auth.call_args_list[0] args = call_args[0] self.assertEqual(mock.sentinel.app, args[0]) args1 = args[1] self.assertEqual('admin', args1['admin_user']) self.assertEqual('admin', args1['admin_tenant_name']) self.assertEqual('password', args1['admin_password']) self.assertTrue(args1['delay_auth_decision']) self.assertEqual('http://127.0.0.1:5000', args1['auth_uri']) self.assertEqual('http://127.0.0.1:35357', args1['identity_uri']) def test_admin(self): request = mock.Mock(headers={'X-Identity-Status': 'Confirmed'}) request.context = context.RequestContext(roles=['admin']) utils.check_auth(request, rule="is_admin") def test_invalid(self): request = mock.Mock(headers={'X-Identity-Status': 'Invalid'}) self.assertRaises(utils.Error, utils.check_auth, request) def test_not_admin(self): request = mock.Mock(headers={'X-Identity-Status': 'Confirmed'}) request.context = context.RequestContext(roles=['member']) self.assertRaises(utils.Error, utils.check_auth, request, rule="is_admin") def test_disabled(self): self.cfg.config(auth_strategy='noauth') request = mock.Mock(headers={'X-Identity-Status': 'Invalid'}) utils.check_auth(request) def test_public_api(self): request = mock.Mock(headers={'X-Identity-Status': 'Invalid'}) request.context = context.RequestContext(is_public_api=True) utils.check_auth(request, "public_api") class TestProcessingLogger(base.BaseTest): def test_prefix_no_info(self): self.assertEqual('[unidentified node]', utils.processing_logger_prefix()) def test_prefix_only_uuid(self): node_info = node.Node(mock.Mock(), dict(uuid='NNN')) self.assertEqual('[node: NNN]', utils.processing_logger_prefix(node_info=node_info)) def test_prefix_only_bmc(self): data = {'inventory': {'bmc_address': '1.2.3.4'}} self.assertEqual('[node: BMC 1.2.3.4]', utils.processing_logger_prefix(data=data)) def test_prefix_only_mac(self): data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff'} self.assertEqual('[node: MAC aa:bb:cc:dd:ee:ff]', utils.processing_logger_prefix(data=data)) def test_prefix_everything(self): node_info = node.Node(mock.Mock(), dict(uuid='NNN')) data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff', 'inventory': {'bmc_address': '1.2.3.4'}} self.assertEqual('[node: NNN MAC aa:bb:cc:dd:ee:ff BMC 1.2.3.4]', utils.processing_logger_prefix(node_info=node_info, data=data)) def test_prefix_uuid_not_str(self): node_info = node.Node(mock.Mock(), dict(uuid=None)) self.assertEqual('[node: None]', utils.processing_logger_prefix(node_info=node_info)) def test_prefix_NodeInfo_instance(self): node_info = node_cache.NodeInfo('NNN') self.assertEqual('[node: NNN]', utils.processing_logger_prefix(node_info=node_info)) def test_prefix_NodeInfo_instance_with_state(self): node_info = node_cache.NodeInfo('NNN', state='foobar') self.assertEqual('[node: NNN state foobar]', utils.processing_logger_prefix(node_info=node_info)) def test_adapter_with_bmc(self): node_info = node.Node(mock.Mock(), dict(uuid='NNN')) data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff', 'inventory': {'bmc_address': '1.2.3.4'}} logger = utils.getProcessingLogger(__name__) msg, _kwargs = logger.process('foo', {'node_info': node_info, 'data': data}) self.assertEqual( '[node: NNN MAC aa:bb:cc:dd:ee:ff BMC 1.2.3.4] foo', msg) def test_adapter_empty_data(self): logger = utils.getProcessingLogger(__name__) msg, _kwargs = logger.process('foo', {'node_info': None, 'data': None}) self.assertEqual('[unidentified node] foo', msg) def test_adapter_no_data(self): logger = utils.getProcessingLogger(__name__) msg, _kwargs = logger.process('foo', {}) self.assertEqual('foo', msg) class TestIsoTimestamp(base.BaseTest): def test_ok(self): iso_date = '1970-01-01T00:00:00+00:00' self.assertEqual(iso_date, utils.iso_timestamp(0.0)) def test_none(self): self.assertIsNone(utils.iso_timestamp(None)) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_wsgi_service.py0000666000175100017510000004364313241323457026464 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import ssl import sys import unittest import eventlet # noqa import fixtures import mock from oslo_config import cfg from ironic_inspector.test import base as test_base from ironic_inspector import wsgi_service CONF = cfg.CONF class BaseWSGITest(test_base.BaseTest): def setUp(self): # generic mocks setUp method super(BaseWSGITest, self).setUp() self.app = self.useFixture(fixtures.MockPatchObject( wsgi_service.app, 'app', autospec=True)).mock self.mock__shutting_down = (self.useFixture(fixtures.MockPatchObject( wsgi_service.semaphore, 'Semaphore', autospec=True)) .mock.return_value) self.mock__shutting_down.acquire.return_value = True self.mock_log = self.useFixture(fixtures.MockPatchObject( wsgi_service, 'LOG')).mock self.service = wsgi_service.WSGIService() class TestWSGIServiceInitMiddleware(BaseWSGITest): def setUp(self): super(TestWSGIServiceInitMiddleware, self).setUp() self.mock_add_auth_middleware = self.useFixture( fixtures.MockPatchObject(wsgi_service.utils, 'add_auth_middleware')).mock self.mock_add_cors_middleware = self.useFixture( fixtures.MockPatchObject(wsgi_service.utils, 'add_cors_middleware')).mock # 'positive' settings CONF.set_override('auth_strategy', 'keystone') CONF.set_override('store_data', 'swift', 'processing') def test_init_middleware(self): self.service._init_middleware() self.mock_add_auth_middleware.assert_called_once_with(self.app) self.mock_add_cors_middleware.assert_called_once_with(self.app) def test_init_middleware_noauth(self): CONF.set_override('auth_strategy', 'noauth') self.service._init_middleware() self.mock_add_auth_middleware.assert_not_called() self.mock_log.warning.assert_called_once_with( 'Starting unauthenticated, please check configuration') self.mock_add_cors_middleware.assert_called_once_with(self.app) def test_init_middleware_no_store(self): CONF.set_override('store_data', 'none', 'processing') self.service._init_middleware() self.mock_add_auth_middleware.assert_called_once_with(self.app) self.mock_log.warning.assert_called_once_with( 'Introspection data will not be stored. Change "[processing] ' 'store_data" option if this is not the desired behavior') self.mock_add_cors_middleware.assert_called_once_with(self.app) class TestWSGIServiceInitHost(BaseWSGITest): def setUp(self): super(TestWSGIServiceInitHost, self).setUp() self.mock_db_init = self.useFixture(fixtures.MockPatchObject( wsgi_service.db, 'init')).mock self.mock_validate_processing_hooks = self.useFixture( fixtures.MockPatchObject(wsgi_service.plugins_base, 'validate_processing_hooks')).mock self.mock_filter = self.useFixture(fixtures.MockPatchObject( wsgi_service.pxe_filter, 'driver')).mock.return_value self.mock_periodic = self.useFixture(fixtures.MockPatchObject( wsgi_service.periodics, 'periodic')).mock self.mock_PeriodicWorker = self.useFixture(fixtures.MockPatchObject( wsgi_service.periodics, 'PeriodicWorker')).mock self.mock_executor = self.useFixture(fixtures.MockPatchObject( wsgi_service.utils, 'executor')).mock self.mock_ExistingExecutor = self.useFixture(fixtures.MockPatchObject( wsgi_service.periodics, 'ExistingExecutor')).mock self.mock_exit = self.useFixture(fixtures.MockPatchObject( wsgi_service.sys, 'exit')).mock def assert_periodics(self): outer_cleanup_decorator_call = mock.call( spacing=CONF.clean_up_period) self.mock_periodic.assert_has_calls([ outer_cleanup_decorator_call, mock.call()(wsgi_service.periodic_clean_up)]) inner_decorator = self.mock_periodic.return_value inner_cleanup_decorator_call = mock.call( wsgi_service.periodic_clean_up) inner_decorator.assert_has_calls([inner_cleanup_decorator_call]) self.mock_ExistingExecutor.assert_called_once_with( self.mock_executor.return_value) periodic_worker = self.mock_PeriodicWorker.return_value periodic_sync = self.mock_filter.get_periodic_sync_task.return_value callables = [(periodic_sync, None, None), (inner_decorator.return_value, None, None)] self.mock_PeriodicWorker.assert_called_once_with( callables=callables, executor_factory=self.mock_ExistingExecutor.return_value, on_failure=self.service._periodics_watchdog) self.assertIs(periodic_worker, self.service._periodics_worker) self.mock_executor.return_value.submit.assert_called_once_with( self.service._periodics_worker.start) def test_init_host(self): self.service._init_host() self.mock_db_init.asset_called_once_with() self.mock_validate_processing_hooks.assert_called_once_with() self.mock_filter.init_filter.assert_called_once_with() self.assert_periodics() def test_init_host_validate_processing_hooks_exception(self): class MyError(Exception): pass error = MyError('Oops!') self.mock_validate_processing_hooks.side_effect = error # NOTE(milan): have to stop executing the test case at this point to # simulate a real sys.exit() call self.mock_exit.side_effect = SystemExit('Stop!') self.assertRaisesRegex(SystemExit, 'Stop!', self.service._init_host) self.mock_db_init.assert_called_once_with() self.mock_log.critical.assert_called_once_with(str(error)) self.mock_exit.assert_called_once_with(1) self.mock_filter.init_filter.assert_not_called() class TestWSGIServicePeriodicWatchDog(BaseWSGITest): def setUp(self): super(TestWSGIServicePeriodicWatchDog, self).setUp() self.mock_get_callable_name = self.useFixture(fixtures.MockPatchObject( wsgi_service.reflection, 'get_callable_name')).mock self.mock_spawn = self.useFixture(fixtures.MockPatchObject( wsgi_service.eventlet, 'spawn')).mock def test__periodics_watchdog(self): error = RuntimeError('Oops!') self.service._periodics_watchdog( callable_=None, activity=None, spacing=None, exc_info=(None, error, None), traceback=None) self.mock_get_callable_name.assert_called_once_with(None) self.mock_spawn.assert_called_once_with(self.service.shutdown, error=str(error)) class TestWSGIServiceRun(BaseWSGITest): def setUp(self): super(TestWSGIServiceRun, self).setUp() self.mock__init_host = self.useFixture(fixtures.MockPatchObject( self.service, '_init_host')).mock self.mock__init_middleware = self.useFixture(fixtures.MockPatchObject( self.service, '_init_middleware')).mock self.mock__create_ssl_context = self.useFixture( fixtures.MockPatchObject(self.service, '_create_ssl_context')).mock self.mock_shutdown = self.useFixture(fixtures.MockPatchObject( self.service, 'shutdown')).mock # 'positive' settings CONF.set_override('listen_address', '42.42.42.42') CONF.set_override('listen_port', 42) def test_run(self): self.service.run() self.mock__create_ssl_context.assert_called_once_with() self.mock__init_middleware.assert_called_once_with() self.mock__init_host.assert_called_once_with() self.app.run.assert_called_once_with( host=CONF.listen_address, port=CONF.listen_port, ssl_context=self.mock__create_ssl_context.return_value) self.mock_shutdown.assert_called_once_with() def test_run_no_ssl_context(self): self.mock__create_ssl_context.return_value = None self.service.run() self.mock__create_ssl_context.assert_called_once_with() self.mock__init_middleware.assert_called_once_with() self.mock__init_host.assert_called_once_with() self.app.run.assert_called_once_with( host=CONF.listen_address, port=CONF.listen_port) self.mock_shutdown.assert_called_once_with() def test_run_app_error(self): class MyError(Exception): pass error = MyError('Oops!') self.app.run.side_effect = error self.service.run() self.mock__create_ssl_context.assert_called_once_with() self.mock__init_middleware.assert_called_once_with() self.mock__init_host.assert_called_once_with() self.app.run.assert_called_once_with( host=CONF.listen_address, port=CONF.listen_port, ssl_context=self.mock__create_ssl_context.return_value) self.mock_shutdown.assert_called_once_with(error=str(error)) class TestWSGIServiceShutdown(BaseWSGITest): def setUp(self): super(TestWSGIServiceShutdown, self).setUp() self.mock_filter = self.useFixture(fixtures.MockPatchObject( wsgi_service.pxe_filter, 'driver')).mock.return_value self.mock_executor = mock.Mock() self.mock_executor.alive = True self.mock_get_executor = self.useFixture(fixtures.MockPatchObject( wsgi_service.utils, 'executor')).mock self.mock_get_executor.return_value = self.mock_executor self.service = wsgi_service.WSGIService() self.mock__periodic_worker = self.useFixture(fixtures.MockPatchObject( self.service, '_periodics_worker')).mock self.mock_exit = self.useFixture(fixtures.MockPatchObject( wsgi_service.sys, 'exit')).mock def test_shutdown(self): class MyError(Exception): pass error = MyError('Oops!') self.service.shutdown(error=error) self.mock__shutting_down.acquire.assert_called_once_with( blocking=False) self.mock__periodic_worker.stop.assert_called_once_with() self.mock__periodic_worker.wait.assert_called_once_with() self.assertIsNone(self.service._periodics_worker) self.mock_executor.shutdown.assert_called_once_with(wait=True) self.mock_filter.tear_down_filter.assert_called_once_with() self.mock__shutting_down.release.assert_called_once_with() self.mock_exit.assert_called_once_with(error) def test_shutdown_race(self): self.mock__shutting_down.acquire.return_value = False self.service.shutdown() self.mock__shutting_down.acquire.assert_called_once_with( blocking=False) self.mock_log.warning.assert_called_once_with( 'Attempted to shut down while already shutting down') self.mock__periodic_worker.stop.assert_not_called() self.mock__periodic_worker.wait.assert_not_called() self.assertIs(self.mock__periodic_worker, self.service._periodics_worker) self.mock_executor.shutdown.assert_not_called() self.mock_filter.tear_down_filter.assert_not_called() self.mock__shutting_down.release.assert_not_called() self.mock_exit.assert_not_called() def test_shutdown_worker_exception(self): class MyError(Exception): pass error = MyError('Oops!') self.mock__periodic_worker.wait.side_effect = error self.service.shutdown() self.mock__shutting_down.acquire.assert_called_once_with( blocking=False) self.mock__periodic_worker.stop.assert_called_once_with() self.mock__periodic_worker.wait.assert_called_once_with() self.mock_log.exception.assert_called_once_with( 'Service error occurred when stopping periodic workers. Error: %s', error) self.assertIsNone(self.service._periodics_worker) self.mock_executor.shutdown.assert_called_once_with(wait=True) self.mock_filter.tear_down_filter.assert_called_once_with() self.mock__shutting_down.release.assert_called_once_with() self.mock_exit.assert_called_once_with(None) def test_shutdown_no_worker(self): self.service._periodics_worker = None self.service.shutdown() self.mock__shutting_down.acquire.assert_called_once_with( blocking=False) self.mock__periodic_worker.stop.assert_not_called() self.mock__periodic_worker.wait.assert_not_called() self.assertIsNone(self.service._periodics_worker) self.mock_executor.shutdown.assert_called_once_with(wait=True) self.mock_filter.tear_down_filter.assert_called_once_with() self.mock__shutting_down.release.assert_called_once_with() self.mock_exit.assert_called_once_with(None) def test_shutdown_stopped_executor(self): self.mock_executor.alive = False self.service.shutdown() self.mock__shutting_down.acquire.assert_called_once_with( blocking=False) self.mock__periodic_worker.stop.assert_called_once_with() self.mock__periodic_worker.wait.assert_called_once_with() self.assertIsNone(self.service._periodics_worker) self.mock_executor.shutdown.assert_not_called() self.mock_filter.tear_down_filter.assert_called_once_with() self.mock__shutting_down.release.assert_called_once_with() self.mock_exit.assert_called_once_with(None) class TestCreateSSLContext(test_base.BaseTest): def setUp(self): super(TestCreateSSLContext, self).setUp() self.app = mock.Mock() self.service = wsgi_service.WSGIService() def test_use_ssl_false(self): CONF.set_override('use_ssl', False) con = self.service._create_ssl_context() self.assertIsNone(con) @mock.patch.object(sys, 'version_info') def test_old_python_returns_none(self, mock_version_info): mock_version_info.__lt__.return_value = True CONF.set_override('use_ssl', True) con = self.service._create_ssl_context() self.assertIsNone(con) @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), 'This feature is unsupported in this version of python ' 'so the tests will be skipped') @mock.patch.object(ssl, 'create_default_context', autospec=True) def test_use_ssl_true(self, mock_cdc): CONF.set_override('use_ssl', True) m_con = mock_cdc() con = self.service._create_ssl_context() self.assertEqual(m_con, con) @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), 'This feature is unsupported in this version of python ' 'so the tests will be skipped') @mock.patch.object(ssl, 'create_default_context', autospec=True) def test_only_key_path_provided(self, mock_cdc): CONF.set_override('use_ssl', True) CONF.set_override('ssl_key_path', '/some/fake/path') mock_context = mock_cdc() con = self.service._create_ssl_context() self.assertEqual(mock_context, con) self.assertFalse(mock_context.load_cert_chain.called) @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), 'This feature is unsupported in this version of python ' 'so the tests will be skipped') @mock.patch.object(ssl, 'create_default_context', autospec=True) def test_only_cert_path_provided(self, mock_cdc): CONF.set_override('use_ssl', True) CONF.set_override('ssl_cert_path', '/some/fake/path') mock_context = mock_cdc() con = self.service._create_ssl_context() self.assertEqual(mock_context, con) self.assertFalse(mock_context.load_cert_chain.called) @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), 'This feature is unsupported in this version of python ' 'so the tests will be skipped') @mock.patch.object(ssl, 'create_default_context', autospec=True) def test_both_paths_provided(self, mock_cdc): key_path = '/some/fake/path/key' cert_path = '/some/fake/path/cert' CONF.set_override('use_ssl', True) CONF.set_override('ssl_key_path', key_path) CONF.set_override('ssl_cert_path', cert_path) mock_context = mock_cdc() con = self.service._create_ssl_context() self.assertEqual(mock_context, con) mock_context.load_cert_chain.assert_called_once_with(cert_path, key_path) @unittest.skipIf(sys.version_info[:3] < (2, 7, 9), 'This feature is unsupported in this version of python ' 'so the tests will be skipped') @mock.patch.object(ssl, 'create_default_context', autospec=True) def test_load_cert_chain_fails(self, mock_cdc): CONF.set_override('use_ssl', True) key_path = '/some/fake/path/key' cert_path = '/some/fake/path/cert' CONF.set_override('use_ssl', True) CONF.set_override('ssl_key_path', key_path) CONF.set_override('ssl_cert_path', cert_path) mock_context = mock_cdc() mock_context.load_cert_chain.side_effect = IOError('Boom!') con = self.service._create_ssl_context() self.assertEqual(mock_context, con) mock_context.load_cert_chain.assert_called_once_with(cert_path, key_path) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_node_cache.py0000666000175100017510000014767313241323457026053 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime import json import unittest import automaton import mock from oslo_config import cfg import oslo_db from oslo_utils import timeutils from oslo_utils import uuidutils import six from ironic_inspector.common import ironic as ir_utils from ironic_inspector import db from ironic_inspector import introspection_state as istate from ironic_inspector import node_cache from ironic_inspector.test import base as test_base from ironic_inspector import utils CONF = cfg.CONF class TestNodeCache(test_base.NodeTest): def test_add_node(self): # Ensure previous node information is cleared uuid2 = uuidutils.generate_uuid() session = db.get_writer_session() with session.begin(): db.Node(uuid=self.node.uuid, state=istate.States.starting).save(session) db.Node(uuid=uuid2, state=istate.States.starting).save(session) db.Attribute(uuid=uuidutils.generate_uuid(), name='mac', value='11:22:11:22:11:22', node_uuid=self.uuid).save(session) node = node_cache.add_node(self.node.uuid, istate.States.starting, mac=self.macs, bmc_address='1.2.3.4', foo=None) self.assertEqual(self.uuid, node.uuid) self.assertTrue( (datetime.datetime.utcnow() - datetime.timedelta(seconds=60) < node.started_at < datetime.datetime.utcnow() + datetime.timedelta(seconds=60))) self.assertFalse(node._locked) res = set(db.model_query(db.Node.uuid, db.Node.started_at).all()) expected = {(node.uuid, node.started_at), (uuid2, None)} self.assertEqual(expected, res) res = db.model_query(db.Node).get(self.uuid) self.assertIsNotNone(res.version_id) res = (db.model_query(db.Attribute.name, db.Attribute.value, db.Attribute.node_uuid). order_by(db.Attribute.name, db.Attribute.value).all()) self.assertEqual([('bmc_address', '1.2.3.4', self.uuid), ('mac', self.macs[0], self.uuid), ('mac', self.macs[1], self.uuid), ('mac', self.macs[2], self.uuid)], [(row.name, row.value, row.node_uuid) for row in res]) def test__delete_node(self): session = db.get_writer_session() with session.begin(): db.Node(uuid=self.node.uuid, state=istate.States.finished).save(session) db.Attribute(uuid=uuidutils.generate_uuid(), name='mac', value='11:22:11:22:11:22', node_uuid=self.uuid).save( session) data = {'s': 'value', 'b': True, 'i': 42} encoded = json.dumps(data) db.Option(uuid=self.uuid, name='name', value=encoded).save( session) node_cache._delete_node(self.uuid) session = db.get_writer_session() row_node = db.model_query(db.Node).filter_by( uuid=self.uuid).first() self.assertIsNone(row_node) row_attribute = db.model_query(db.Attribute).filter_by( node_uuid=self.uuid).first() self.assertIsNone(row_attribute) row_option = db.model_query(db.Option).filter_by( uuid=self.uuid).first() self.assertIsNone(row_option) @mock.patch.object(node_cache, '_get_lock_ctx', autospec=True) @mock.patch.object(node_cache, '_list_node_uuids') @mock.patch.object(node_cache, '_delete_node') def test_delete_nodes_not_in_list(self, mock__delete_node, mock__list_node_uuids, mock__get_lock_ctx): uuid2 = uuidutils.generate_uuid() uuids = {self.uuid} mock__list_node_uuids.return_value = {self.uuid, uuid2} session = db.get_writer_session() with session.begin(): node_cache.delete_nodes_not_in_list(uuids) mock__delete_node.assert_called_once_with(uuid2) mock__get_lock_ctx.assert_called_once_with(uuid2) mock__get_lock_ctx.return_value.__enter__.assert_called_once_with() def test_active_macs(self): session = db.get_writer_session() with session.begin(): db.Node(uuid=self.node.uuid, state=istate.States.starting).save(session) values = [('mac', '11:22:11:22:11:22', self.uuid), ('mac', '22:11:22:11:22:11', self.uuid)] for value in values: db.Attribute(uuid=uuidutils.generate_uuid(), name=value[0], value=value[1], node_uuid=value[2]).save(session) self.assertEqual({'11:22:11:22:11:22', '22:11:22:11:22:11'}, node_cache.active_macs()) def test__list_node_uuids(self): session = db.get_writer_session() uuid2 = uuidutils.generate_uuid() with session.begin(): db.Node(uuid=self.node.uuid, state=istate.States.starting).save(session) db.Node(uuid=uuid2, state=istate.States.starting).save(session) node_uuid_list = node_cache._list_node_uuids() self.assertEqual({self.uuid, uuid2}, node_uuid_list) def test_add_attribute(self): session = db.get_writer_session() with session.begin(): db.Node(uuid=self.node.uuid, state=istate.States.starting).save(session) node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42) node_info.add_attribute('key', 'value') res = db.model_query(db.Attribute.name, db.Attribute.value, db.Attribute.node_uuid, session=session) res = res.order_by(db.Attribute.name, db.Attribute.value).all() self.assertEqual([('key', 'value', self.uuid)], [tuple(row) for row in res]) # check that .attributes got invalidated and reloaded self.assertEqual({'key': ['value']}, node_info.attributes) def test_add_attribute_same_name(self): session = db.get_writer_session() with session.begin(): db.Node(uuid=self.node.uuid, state=istate.States.starting).save(session) node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42) node_info.add_attribute('key', ['foo', 'bar']) node_info.add_attribute('key', 'baz') res = db.model_query(db.Attribute.name, db.Attribute.value, db.Attribute.node_uuid, session=session) res = res.order_by(db.Attribute.name, db.Attribute.value).all() self.assertEqual([('key', 'bar', self.uuid), ('key', 'baz', self.uuid), ('key', 'foo', self.uuid)], [tuple(row) for row in res]) def test_add_attribute_same_value(self): session = db.get_writer_session() with session.begin(): db.Node(uuid=self.node.uuid, state=istate.States.starting).save(session) node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42) node_info.add_attribute('key', 'value') node_info.add_attribute('key', 'value') res = db.model_query(db.Attribute.name, db.Attribute.value, db.Attribute.node_uuid, session=session) self.assertEqual([('key', 'value', self.uuid), ('key', 'value', self.uuid)], [tuple(row) for row in res]) def test_attributes(self): node_info = node_cache.add_node(self.uuid, istate.States.starting, bmc_address='1.2.3.4', mac=self.macs) self.assertEqual({'bmc_address': ['1.2.3.4'], 'mac': self.macs}, node_info.attributes) # check invalidation session = db.get_writer_session() with session.begin(): db.Attribute(uuid=uuidutils.generate_uuid(), name='foo', value='bar', node_uuid=self.uuid).save(session) # still cached self.assertEqual({'bmc_address': ['1.2.3.4'], 'mac': self.macs}, node_info.attributes) node_info.invalidate_cache() self.assertEqual({'bmc_address': ['1.2.3.4'], 'mac': self.macs, 'foo': ['bar']}, node_info.attributes) class TestNodeCacheFind(test_base.NodeTest): def setUp(self): super(TestNodeCacheFind, self).setUp() self.macs2 = ['00:00:00:00:00:00'] node_cache.add_node(self.uuid, istate.States.starting, bmc_address='1.2.3.4', mac=self.macs) def test_no_data(self): self.assertRaises(utils.Error, node_cache.find_node) self.assertRaises(utils.Error, node_cache.find_node, mac=[]) def test_bmc(self): res = node_cache.find_node(bmc_address='1.2.3.4') self.assertEqual(self.uuid, res.uuid) self.assertTrue( datetime.datetime.utcnow() - datetime.timedelta(seconds=60) < res.started_at < datetime.datetime.utcnow() + datetime.timedelta(seconds=1)) self.assertTrue(res._locked) def test_same_bmc_different_macs(self): uuid2 = uuidutils.generate_uuid() node_cache.add_node(uuid2, istate.States.starting, bmc_address='1.2.3.4', mac=self.macs2) res = node_cache.find_node(bmc_address='1.2.3.4', mac=self.macs) self.assertEqual(self.uuid, res.uuid) res = node_cache.find_node(bmc_address='1.2.3.4', mac=self.macs2) self.assertEqual(uuid2, res.uuid) def test_same_bmc_raises(self): uuid2 = uuidutils.generate_uuid() node_cache.add_node(uuid2, istate.States.starting, bmc_address='1.2.3.4') six.assertRaisesRegex(self, utils.Error, 'Multiple nodes', node_cache.find_node, bmc_address='1.2.3.4') def test_macs(self): res = node_cache.find_node(mac=['11:22:33:33:33:33', self.macs[1]]) self.assertEqual(self.uuid, res.uuid) self.assertTrue( datetime.datetime.utcnow() - datetime.timedelta(seconds=60) < res.started_at < datetime.datetime.utcnow() + datetime.timedelta(seconds=1)) self.assertTrue(res._locked) def test_macs_not_found(self): self.assertRaises(utils.Error, node_cache.find_node, mac=['11:22:33:33:33:33', '66:66:44:33:22:11']) def test_macs_multiple_found(self): node_cache.add_node('uuid2', istate.States.starting, mac=self.macs2) self.assertRaises(utils.Error, node_cache.find_node, mac=[self.macs[0], self.macs2[0]]) def test_both(self): res = node_cache.find_node(bmc_address='1.2.3.4', mac=self.macs) self.assertEqual(self.uuid, res.uuid) self.assertTrue( datetime.datetime.utcnow() - datetime.timedelta(seconds=60) < res.started_at < datetime.datetime.utcnow() + datetime.timedelta(seconds=1)) self.assertTrue(res._locked) def test_inconsistency(self): session = db.get_writer_session() with session.begin(): (db.model_query(db.Node).filter_by(uuid=self.uuid). delete()) self.assertRaises(utils.Error, node_cache.find_node, bmc_address='1.2.3.4') def test_already_finished(self): session = db.get_writer_session() with session.begin(): (db.model_query(db.Node).filter_by(uuid=self.uuid). update({'finished_at': datetime.datetime.utcnow()})) self.assertRaises(utils.Error, node_cache.find_node, bmc_address='1.2.3.4') class TestNodeCacheCleanUp(test_base.NodeTest): def setUp(self): super(TestNodeCacheCleanUp, self).setUp() self.started_at = datetime.datetime.utcnow() session = db.get_writer_session() with session.begin(): db.Node(uuid=self.uuid, state=istate.States.waiting, started_at=self.started_at).save( session) for v in self.macs: db.Attribute(uuid=uuidutils.generate_uuid(), name='mac', value=v, node_uuid=self.uuid).save(session) db.Option(uuid=self.uuid, name='foo', value='bar').save( session) def test_no_timeout(self): CONF.set_override('timeout', 0) self.assertFalse(node_cache.clean_up()) res = [tuple(row) for row in db.model_query(db.Node.finished_at, db.Node.error).all()] self.assertEqual([(None, None)], res) self.assertEqual(len(self.macs), db.model_query(db.Attribute).count()) self.assertEqual(1, db.model_query(db.Option).count()) @mock.patch.object(node_cache, '_get_lock', autospec=True) @mock.patch.object(timeutils, 'utcnow') def test_ok(self, time_mock, get_lock_mock): time_mock.return_value = datetime.datetime.utcnow() self.assertFalse(node_cache.clean_up()) res = [tuple(row) for row in db.model_query( db.Node.finished_at, db.Node.error).all()] self.assertEqual([(None, None)], res) self.assertEqual(len(self.macs), db.model_query(db.Attribute).count()) self.assertEqual(1, db.model_query(db.Option).count()) self.assertFalse(get_lock_mock.called) @mock.patch.object(node_cache, '_get_lock', autospec=True) @mock.patch.object(timeutils, 'utcnow') def test_timeout(self, time_mock, get_lock_mock): # Add a finished node to confirm we don't try to timeout it time_mock.return_value = self.started_at session = db.get_writer_session() finished_at = self.started_at + datetime.timedelta(seconds=60) with session.begin(): db.Node(uuid=self.uuid + '1', started_at=self.started_at, state=istate.States.waiting, finished_at=finished_at).save(session) CONF.set_override('timeout', 99) time_mock.return_value = (self.started_at + datetime.timedelta(seconds=100)) self.assertEqual([self.uuid], node_cache.clean_up()) res = [(row.state, row.finished_at, row.error) for row in db.model_query(db.Node).all()] self.assertEqual( [(istate.States.error, self.started_at + datetime.timedelta(seconds=100), 'Introspection timeout'), (istate.States.waiting, self.started_at + datetime.timedelta(seconds=60), None)], res) self.assertEqual([], db.model_query(db.Attribute).all()) self.assertEqual([], db.model_query(db.Option).all()) get_lock_mock.assert_called_once_with(self.uuid) get_lock_mock.return_value.acquire.assert_called_once_with() @mock.patch.object(node_cache, '_get_lock', autospec=True) @mock.patch.object(timeutils, 'utcnow') def test_timeout_active_state(self, time_mock, get_lock_mock): time_mock.return_value = self.started_at session = db.get_writer_session() CONF.set_override('timeout', 1) for state in [istate.States.starting, istate.States.enrolling, istate.States.processing, istate.States.reapplying]: db.model_query(db.Node, session=session).filter_by( uuid=self.uuid).update({'state': state, 'finished_at': None}) current_time = self.started_at + datetime.timedelta(seconds=2) time_mock.return_value = current_time self.assertEqual([self.uuid], node_cache.clean_up()) res = [(row.state, row.finished_at, row.error) for row in db.model_query(db.Node).all()] self.assertEqual( [(istate.States.error, current_time, 'Introspection timeout')], res) def test_old_status(self): CONF.set_override('node_status_keep_time', 42) session = db.get_writer_session() with session.begin(): db.model_query(db.Node).update( {'finished_at': (datetime.datetime.utcnow() - datetime.timedelta(seconds=100))}) self.assertEqual([], node_cache.clean_up()) self.assertEqual([], db.model_query(db.Node).all()) def test_old_status_disabled(self): # Status clean up is disabled by default session = db.get_writer_session() with session.begin(): db.model_query(db.Node).update( {'finished_at': (datetime.datetime.utcnow() - datetime.timedelta(days=10000))}) self.assertEqual([], node_cache.clean_up()) self.assertNotEqual([], db.model_query(db.Node).all()) class TestNodeCacheGetNode(test_base.NodeTest): def test_ok(self): started_at = (datetime.datetime.utcnow() - datetime.timedelta(seconds=42)) session = db.get_writer_session() with session.begin(): db.Node(uuid=self.uuid, state=istate.States.starting, started_at=started_at).save(session) info = node_cache.get_node(self.uuid) self.assertEqual(self.uuid, info.uuid) self.assertEqual(started_at, info.started_at) self.assertIsNone(info.finished_at) self.assertIsNone(info.error) self.assertFalse(info._locked) def test_locked(self): started_at = (datetime.datetime.utcnow() - datetime.timedelta(seconds=42)) session = db.get_writer_session() with session.begin(): db.Node(uuid=self.uuid, state=istate.States.starting, started_at=started_at).save(session) info = node_cache.get_node(self.uuid, locked=True) self.assertEqual(self.uuid, info.uuid) self.assertEqual(started_at, info.started_at) self.assertIsNone(info.finished_at) self.assertIsNone(info.error) self.assertTrue(info._locked) def test_not_found(self): self.assertRaises(utils.Error, node_cache.get_node, uuidutils.generate_uuid()) def test_with_name(self): started_at = (datetime.datetime.utcnow() - datetime.timedelta(seconds=42)) session = db.get_writer_session() with session.begin(): db.Node(uuid=self.uuid, state=istate.States.starting, started_at=started_at).save(session) ironic = mock.Mock() ironic.node.get.return_value = self.node info = node_cache.get_node('name', ironic=ironic) self.assertEqual(self.uuid, info.uuid) self.assertEqual(started_at, info.started_at) self.assertIsNone(info.finished_at) self.assertIsNone(info.error) self.assertFalse(info._locked) ironic.node.get.assert_called_once_with('name') @mock.patch.object(timeutils, 'utcnow', lambda: datetime.datetime(1, 1, 1)) class TestNodeInfoFinished(test_base.NodeTest): def setUp(self): super(TestNodeInfoFinished, self).setUp() node_cache.add_node(self.uuid, istate.States.processing, bmc_address='1.2.3.4', mac=self.macs) self.node_info = node_cache.NodeInfo( uuid=self.uuid, started_at=datetime.datetime(3, 1, 4)) session = db.get_writer_session() with session.begin(): db.Option(uuid=self.uuid, name='foo', value='bar').save( session) def test_success(self): self.node_info.finished(istate.Events.finish) session = db.get_writer_session() with session.begin(): self.assertEqual((datetime.datetime(1, 1, 1), None), tuple(db.model_query( db.Node.finished_at, db.Node.error).first())) self.assertEqual([], db.model_query(db.Attribute, session=session).all()) self.assertEqual([], db.model_query(db.Option, session=session).all()) def test_error(self): self.node_info.finished(istate.Events.error, error='boom') self.assertEqual((datetime.datetime(1, 1, 1), 'boom'), tuple(db.model_query(db.Node.finished_at, db.Node.error).first())) self.assertEqual([], db.model_query(db.Attribute).all()) self.assertEqual([], db.model_query(db.Option).all()) def test_release_lock(self): self.node_info.acquire_lock() self.node_info.finished(istate.Events.finish) self.assertFalse(self.node_info._locked) class TestNodeInfoOptions(test_base.NodeTest): def setUp(self): super(TestNodeInfoOptions, self).setUp() node_cache.add_node(self.uuid, istate.States.starting, bmc_address='1.2.3.4', mac=self.macs) self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14) session = db.get_writer_session() with session.begin(): db.Option(uuid=self.uuid, name='foo', value='"bar"').save( session) def test_get(self): self.assertEqual({'foo': 'bar'}, self.node_info.options) # should be cached self.assertEqual(self.node_info.options, self.node_info.options) # invalidate cache old_options = self.node_info.options self.node_info.invalidate_cache() self.assertIsNot(old_options, self.node_info.options) self.assertEqual(old_options, self.node_info.options) def test_set(self): data = {'s': 'value', 'b': True, 'i': 42} self.node_info.set_option('name', data) self.assertEqual(data, self.node_info.options['name']) new = node_cache.NodeInfo(uuid=self.uuid, started_at=3.14) self.assertEqual(data, new.options['name']) @mock.patch.object(ir_utils, 'get_client', autospec=True) class TestNodeCacheIronicObjects(unittest.TestCase): def setUp(self): super(TestNodeCacheIronicObjects, self).setUp() self.ports = {'mac1': mock.Mock(address='mac1', spec=['address']), 'mac2': mock.Mock(address='mac2', spec=['address'])} self.uuid = uuidutils.generate_uuid() def test_node_provided(self, mock_ironic): node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, node=mock.sentinel.node) self.assertIs(mock.sentinel.node, node_info.node()) self.assertFalse(mock_ironic.called) def test_node_not_provided(self, mock_ironic): mock_ironic.return_value.node.get.return_value = mock.sentinel.node node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0) self.assertIs(mock.sentinel.node, node_info.node()) self.assertIs(node_info.node(), node_info.node()) mock_ironic.assert_called_once_with() mock_ironic.return_value.node.get.assert_called_once_with(self.uuid) def test_node_ironic_preset(self, mock_ironic): mock_ironic2 = mock.Mock() mock_ironic2.node.get.return_value = mock.sentinel.node node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, ironic=mock_ironic2) self.assertIs(mock.sentinel.node, node_info.node()) self.assertFalse(mock_ironic.called) mock_ironic2.node.get.assert_called_once_with(self.uuid) def test_ports_provided(self, mock_ironic): node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, ports=self.ports) self.assertIs(self.ports, node_info.ports()) self.assertFalse(mock_ironic.called) def test_ports_provided_list(self, mock_ironic): node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, ports=list(self.ports.values())) self.assertEqual(self.ports, node_info.ports()) self.assertFalse(mock_ironic.called) def test_ports_not_provided(self, mock_ironic): mock_ironic.return_value.node.list_ports.return_value = list( self.ports.values()) node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0) self.assertEqual(self.ports, node_info.ports()) self.assertIs(node_info.ports(), node_info.ports()) mock_ironic.assert_called_once_with() mock_ironic.return_value.node.list_ports.assert_called_once_with( self.uuid, limit=0, detail=True) def test_ports_ironic_preset(self, mock_ironic): mock_ironic2 = mock.Mock() mock_ironic2.node.list_ports.return_value = list( self.ports.values()) node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, ironic=mock_ironic2) self.assertEqual(self.ports, node_info.ports()) self.assertFalse(mock_ironic.called) mock_ironic2.node.list_ports.assert_called_once_with( self.uuid, limit=0, detail=True) class TestUpdate(test_base.NodeTest): def setUp(self): super(TestUpdate, self).setUp() self.ironic = mock.Mock() self.ports = {'mac%d' % i: mock.Mock(address='mac%d' % i, uuid=str(i)) for i in range(2)} self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, node=self.node, ports=self.ports, ironic=self.ironic) def test_patch(self): self.ironic.node.update.return_value = mock.sentinel.node self.node_info.patch([{'patch': 'patch'}]) self.ironic.node.update.assert_called_once_with(self.uuid, [{'patch': 'patch'}]) self.assertIs(mock.sentinel.node, self.node_info.node()) def test_patch_path_wo_leading_slash(self): self.ironic.node.update.return_value = mock.sentinel.node patch = [{'op': 'add', 'path': 'driver_info/test', 'value': 42}] expected_patch = copy.deepcopy(patch) expected_patch[0]['path'] = '/' + 'driver_info/test' self.node_info.patch(patch) self.ironic.node.update.assert_called_once_with(self.uuid, expected_patch) self.assertIs(mock.sentinel.node, self.node_info.node()) def test_patch_path_with_leading_slash(self): self.ironic.node.update.return_value = mock.sentinel.node patch = [{'op': 'add', 'path': '/driver_info/test', 'value': 42}] self.node_info.patch(patch) self.ironic.node.update.assert_called_once_with(self.uuid, patch) self.assertIs(mock.sentinel.node, self.node_info.node()) def test_update_properties(self): self.ironic.node.update.return_value = mock.sentinel.node self.node_info.update_properties(prop=42) patch = [{'op': 'add', 'path': '/properties/prop', 'value': 42}] self.ironic.node.update.assert_called_once_with(self.uuid, patch) self.assertIs(mock.sentinel.node, self.node_info.node()) def test_update_capabilities(self): self.ironic.node.update.return_value = mock.sentinel.node self.node.properties['capabilities'] = 'foo:bar,x:y' self.node_info.update_capabilities(x=1, y=2) self.ironic.node.update.assert_called_once_with(self.uuid, mock.ANY) patch = self.ironic.node.update.call_args[0][1] new_caps = ir_utils.capabilities_to_dict(patch[0]['value']) self.assertEqual({'foo': 'bar', 'x': '1', 'y': '2'}, new_caps) def test_replace_field(self): self.ironic.node.update.return_value = mock.sentinel.node self.node.extra['foo'] = 'bar' self.node_info.replace_field('/extra/foo', lambda v: v + '1') patch = [{'op': 'replace', 'path': '/extra/foo', 'value': 'bar1'}] self.ironic.node.update.assert_called_once_with(self.uuid, patch) self.assertIs(mock.sentinel.node, self.node_info.node()) def test_replace_field_not_found(self): self.ironic.node.update.return_value = mock.sentinel.node self.assertRaises(KeyError, self.node_info.replace_field, '/extra/foo', lambda v: v + '1') def test_replace_field_with_default(self): self.ironic.node.update.return_value = mock.sentinel.node self.node_info.replace_field('/extra/foo', lambda v: v + [42], default=[]) patch = [{'op': 'add', 'path': '/extra/foo', 'value': [42]}] self.ironic.node.update.assert_called_once_with(self.uuid, patch) self.assertIs(mock.sentinel.node, self.node_info.node()) def test_replace_field_same_value(self): self.ironic.node.update.return_value = mock.sentinel.node self.node.extra['foo'] = 'bar' self.node_info.replace_field('/extra/foo', lambda v: v) self.assertFalse(self.ironic.node.update.called) def test_patch_port(self): self.ironic.port.update.return_value = mock.sentinel.port self.node_info.patch_port(self.ports['mac0'], ['patch']) self.ironic.port.update.assert_called_once_with('0', ['patch']) self.assertIs(mock.sentinel.port, self.node_info.ports()['mac0']) def test_patch_port_by_mac(self): self.ironic.port.update.return_value = mock.sentinel.port self.node_info.patch_port('mac0', ['patch']) self.ironic.port.update.assert_called_once_with('0', ['patch']) self.assertIs(mock.sentinel.port, self.node_info.ports()['mac0']) def test_delete_port(self): self.node_info.delete_port(self.ports['mac0']) self.ironic.port.delete.assert_called_once_with('0') self.assertEqual(['mac1'], list(self.node_info.ports())) def test_delete_port_by_mac(self): self.node_info.delete_port('mac0') self.ironic.port.delete.assert_called_once_with('0') self.assertEqual(['mac1'], list(self.node_info.ports())) @mock.patch.object(node_cache.LOG, 'warning', autospec=True) def test_create_ports(self, mock_warn): ports = [ 'mac2', {'mac': 'mac3', 'client_id': '42', 'pxe': False}, {'mac': 'mac4', 'pxe': True} ] self.node_info.create_ports(ports) self.assertEqual({'mac0', 'mac1', 'mac2', 'mac3', 'mac4'}, set(self.node_info.ports())) create_calls = [ mock.call(node_uuid=self.uuid, address='mac2', extra={}, pxe_enabled=True), mock.call(node_uuid=self.uuid, address='mac3', extra={'client-id': '42'}, pxe_enabled=False), mock.call(node_uuid=self.uuid, address='mac4', extra={}, pxe_enabled=True), ] self.assertEqual(create_calls, self.ironic.port.create.call_args_list) # No conflicts - cache was not cleared - no calls to port.list self.assertFalse(mock_warn.called) self.assertFalse(self.ironic.port.list.called) @mock.patch.object(node_cache.LOG, 'info', autospec=True) def test__create_port(self, mock_info): uuid = uuidutils.generate_uuid() address = 'mac1' self.ironic.port.create.return_value = mock.Mock(uuid=uuid, address=address) self.node_info._create_port(address, client_id='42') self.ironic.port.create.assert_called_once_with( node_uuid=self.uuid, address='mac1', client_id='42') mock_info.assert_called_once_with( mock.ANY, {'uuid': uuid, 'mac': address, 'attrs': {'client_id': '42'}}, node_info=self.node_info) @mock.patch.object(node_cache.LOG, 'warning', autospec=True) def test_create_ports_with_conflicts(self, mock_warn): self.ironic.port.create.return_value = mock.Mock( uuid='fake', address='mac') ports = [ 'mac', {'mac': 'mac0'}, 'mac1', {'mac': 'mac2', 'client_id': '42', 'pxe': False}, ] self.node_info.create_ports(ports) create_calls = [ mock.call(node_uuid=self.uuid, address='mac', extra={}, pxe_enabled=True), mock.call(node_uuid=self.uuid, address='mac2', extra={'client-id': '42'}, pxe_enabled=False), ] self.assertEqual(create_calls, self.ironic.port.create.call_args_list) mock_warn.assert_called_once_with(mock.ANY, ['mac0', 'mac1'], node_info=self.node_info) class TestNodeCacheGetByPath(test_base.NodeTest): def setUp(self): super(TestNodeCacheGetByPath, self).setUp() self.node = mock.Mock(spec=['uuid', 'properties'], properties={'answer': 42}, uuid=self.uuid) self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, node=self.node) def test_get_by_path(self): self.assertEqual(self.uuid, self.node_info.get_by_path('/uuid')) self.assertEqual(self.uuid, self.node_info.get_by_path('uuid')) self.assertEqual(42, self.node_info.get_by_path('/properties/answer')) self.assertRaises(KeyError, self.node_info.get_by_path, '/foo') self.assertRaises(KeyError, self.node_info.get_by_path, '/extra/foo') @mock.patch.object(node_cache, '_get_lock', autospec=True) class TestLock(test_base.NodeTest): def test_acquire(self, get_lock_mock): node_info = node_cache.NodeInfo(self.uuid) self.assertFalse(node_info._locked) get_lock_mock.assert_called_once_with(self.uuid) self.assertFalse(get_lock_mock.return_value.acquire.called) self.assertTrue(node_info.acquire_lock()) self.assertTrue(node_info._locked) self.assertTrue(node_info.acquire_lock()) self.assertTrue(node_info._locked) get_lock_mock.return_value.acquire.assert_called_once_with(True) def test_release(self, get_lock_mock): node_info = node_cache.NodeInfo(self.uuid) node_info.acquire_lock() self.assertTrue(node_info._locked) node_info.release_lock() self.assertFalse(node_info._locked) node_info.release_lock() self.assertFalse(node_info._locked) get_lock_mock.return_value.acquire.assert_called_once_with(True) get_lock_mock.return_value.release.assert_called_once_with() def test_acquire_non_blocking(self, get_lock_mock): node_info = node_cache.NodeInfo(self.uuid) self.assertFalse(node_info._locked) get_lock_mock.return_value.acquire.side_effect = iter([False, True]) self.assertFalse(node_info.acquire_lock(blocking=False)) self.assertFalse(node_info._locked) self.assertTrue(node_info.acquire_lock(blocking=False)) self.assertTrue(node_info._locked) self.assertTrue(node_info.acquire_lock(blocking=False)) self.assertTrue(node_info._locked) get_lock_mock.return_value.acquire.assert_called_with(False) self.assertEqual(2, get_lock_mock.return_value.acquire.call_count) @mock.patch.object(node_cache, 'add_node', autospec=True) @mock.patch.object(ir_utils, 'get_client', autospec=True) class TestNodeCreate(test_base.NodeTest): def setUp(self): super(TestNodeCreate, self).setUp() self.mock_client = mock.Mock() def test_default_create(self, mock_get_client, mock_add_node): mock_get_client.return_value = self.mock_client self.mock_client.node.create.return_value = self.node node_cache.create_node('fake') self.mock_client.node.create.assert_called_once_with(driver='fake') mock_add_node.assert_called_once_with( self.node.uuid, istate.States.enrolling, ironic=self.mock_client) def test_create_with_args(self, mock_get_client, mock_add_node): mock_get_client.return_value = self.mock_client self.mock_client.node.create.return_value = self.node node_cache.create_node('agent_ipmitool', ironic=self.mock_client) self.assertFalse(mock_get_client.called) self.mock_client.node.create.assert_called_once_with( driver='agent_ipmitool') mock_add_node.assert_called_once_with( self.node.uuid, istate.States.enrolling, ironic=self.mock_client) def test_create_client_error(self, mock_get_client, mock_add_node): mock_get_client.return_value = self.mock_client self.mock_client.node.create.side_effect = ( node_cache.exceptions.InvalidAttribute) node_cache.create_node('fake') mock_get_client.assert_called_once_with() self.mock_client.node.create.assert_called_once_with(driver='fake') self.assertFalse(mock_add_node.called) class TestNodeCacheListNode(test_base.NodeTest): def setUp(self): super(TestNodeCacheListNode, self).setUp() self.uuid2 = uuidutils.generate_uuid() session = db.get_writer_session() with session.begin(): db.Node(uuid=self.uuid, started_at=datetime.datetime(1, 1, 2)).save(session) db.Node(uuid=self.uuid2, started_at=datetime.datetime(1, 1, 1), finished_at=datetime.datetime(1, 1, 3)).save(session) # mind please node(self.uuid).started_at > node(self.uuid2).started_at # and the result ordering is strict in node_cache.get_node_list newer first def test_list_node(self): nodes = node_cache.get_node_list() self.assertEqual([self.uuid, self.uuid2], [node.uuid for node in nodes]) def test_list_node_limit(self): nodes = node_cache.get_node_list(limit=1) self.assertEqual([self.uuid], [node.uuid for node in nodes]) def test_list_node_marker(self): # get nodes started_at after node(self.uuid) nodes = node_cache.get_node_list(marker=self.uuid) self.assertEqual([self.uuid2], [node.uuid for node in nodes]) def test_list_node_wrong_marker(self): self.assertRaises(utils.Error, node_cache.get_node_list, marker='foo-bar') class TestNodeInfoVersionId(test_base.NodeStateTest): def test_get(self): self.node_info._version_id = None self.assertEqual(self.db_node.version_id, self.node_info.version_id) def test_get_missing_uuid(self): self.node_info.uuid = 'foo' self.node_info._version_id = None def func(): return self.node_info.version_id six.assertRaisesRegex(self, utils.NotFoundInCacheError, '.*', func) def test_set(self): with db.ensure_transaction() as session: self.node_info._set_version_id(uuidutils.generate_uuid(), session) row = db.model_query(db.Node).get(self.node_info.uuid) self.assertEqual(self.node_info.version_id, row.version_id) def test_set_race(self): with db.ensure_transaction() as session: row = db.model_query(db.Node, session=session).get( self.node_info.uuid) row.update({'version_id': uuidutils.generate_uuid()}) row.save(session) six.assertRaisesRegex(self, utils.NodeStateRaceCondition, 'Node state mismatch', self.node_info._set_state, istate.States.finished) class TestNodeInfoState(test_base.NodeStateTest): def test_get(self): self.node_info._state = None self.assertEqual(self.db_node.state, self.node_info.state) def test_set(self): self.node_info._set_state(istate.States.finished) row = db.model_query(db.Node).get(self.node_info.uuid) self.assertEqual(self.node_info.state, row.state) def test_set_invalid_state(self): six.assertRaisesRegex(self, oslo_db.exception.DBError, 'constraint failed', self.node_info._set_state, 'foo') def test_commit(self): current_time = timeutils.utcnow() self.node_info.started_at = self.node_info.finished_at = current_time self.node_info.error = "Boo!" self.node_info.commit() row = db.model_query(db.Node).get(self.node_info.uuid) self.assertEqual(self.node_info.started_at, row.started_at) self.assertEqual(self.node_info.finished_at, row.finished_at) self.assertEqual(self.node_info.error, row.error) class TestNodeInfoStateFsm(test_base.NodeStateTest): def test__get_fsm(self): self.node_info._fsm = None fsm = self.node_info._get_fsm() self.assertEqual(self.node_info.state, fsm.current_state) def test__get_fsm_invalid_state(self): self.node_info._fsm = None self.node_info._state = 'foo' six.assertRaisesRegex(self, automaton.exceptions.NotFound, '.*undefined state.*', self.node_info._get_fsm) def test__fsm_ctx_set_state(self): with self.node_info._fsm_ctx() as fsm: fsm.process_event(istate.Events.wait) self.assertEqual(self.node_info.state, istate.States.starting) self.assertEqual(self.node_info.state, istate.States.waiting) def test__fsm_ctx_set_same_state(self): version_id = self.node_info.version_id with self.node_info._fsm_ctx() as fsm: fsm.initialize(self.node_info.state) self.assertEqual(version_id, self.node_info.version_id) def test__fsm_ctx_illegal_event(self): with self.node_info._fsm_ctx() as fsm: six.assertRaisesRegex(self, automaton.exceptions.NotFound, 'no defined transition', fsm.process_event, istate.Events.finish) self.assertEqual(self.node_info.state, istate.States.starting) def test__fsm_ctx_generic_exception(self): class CustomException(Exception): pass def func(fsm): fsm.process_event(istate.Events.wait) raise CustomException('Oops') with self.node_info._fsm_ctx() as fsm: self.assertRaises(CustomException, func, fsm) self.assertEqual(self.node_info.state, istate.States.waiting) def test_fsm_event(self): self.node_info.fsm_event(istate.Events.wait) self.assertEqual(self.node_info.state, istate.States.waiting) def test_fsm_illegal_event(self): six.assertRaisesRegex(self, utils.NodeStateInvalidEvent, 'no defined transition', self.node_info.fsm_event, istate.Events.finish) self.assertEqual(self.node_info.state, istate.States.starting) def test_fsm_illegal_strict_event(self): six.assertRaisesRegex(self, utils.NodeStateInvalidEvent, 'no defined transition', self.node_info.fsm_event, istate.Events.finish, strict=True) self.assertIn('no defined transition', self.node_info.error) self.assertEqual(self.node_info.state, istate.States.error) class TestFsmEvent(test_base.NodeStateTest): def test_event_before(self): @node_cache.fsm_event_before(istate.Events.wait) def function(node_info): self.assertEqual(node_info.state, istate.States.waiting) node_info.fsm_event(istate.Events.process) function(self.node_info) self.assertEqual(self.node_info.state, istate.States.processing) def test_event_after(self): @node_cache.fsm_event_after(istate.Events.process) def function(node_info): node_info.fsm_event(istate.Events.wait) self.assertEqual(node_info.state, istate.States.waiting) function(self.node_info) self.assertEqual(self.node_info.state, istate.States.processing) @mock.patch.object(node_cache, 'LOG', autospec=True) def test_triggers_fsm_error_transition_no_errors(self, log_mock): class CustomException(Exception): pass @node_cache.triggers_fsm_error_transition(no_errors=(CustomException,)) def function(node_info): self.assertEqual(node_info.state, istate.States.starting) raise CustomException('Oops') function(self.node_info) log_msg = ('Not processing error event for the exception: ' '%(exc)s raised by %(func)s') log_mock.debug.assert_called_with(log_msg, mock.ANY, node_info=mock.ANY) self.assertEqual(self.node_info.state, istate.States.starting) def test_triggers_fsm_error_transition_no_errors_empty(self): class CustomException(Exception): pass @node_cache.triggers_fsm_error_transition(no_errors=()) def function(node_info): self.assertEqual(node_info.state, istate.States.starting) raise CustomException('Oops!') # assert an error event was performed self.assertRaises(CustomException, function, self.node_info) self.assertEqual(self.node_info.state, istate.States.error) def test_triggers_fsm_error_transition_no_errors_with_error(self): class CustomException(Exception): pass @node_cache.triggers_fsm_error_transition(errors=(CustomException,)) def function(node_info): self.assertEqual(node_info.state, istate.States.starting) raise CustomException('Oops') # assert a generic error triggers an error event self.assertRaises(CustomException, function, self.node_info) self.assertEqual(self.node_info.state, istate.States.error) def test_triggers_fsm_error_transition_erros_masked(self): class CustomException(Exception): pass @node_cache.triggers_fsm_error_transition(errors=()) def function(node_info): self.assertEqual(node_info.state, istate.States.starting) raise CustomException('Oops') # assert no error event was triggered self.assertRaises(CustomException, function, self.node_info) self.assertEqual(self.node_info.state, istate.States.starting) def test_unlock(self): @node_cache.release_lock def func(node_info): self.assertTrue(node_info._locked) self.node_info.acquire_lock(blocking=True) with mock.patch.object(self.node_info, 'release_lock', autospec=True) as release_lock_mock: func(self.node_info) release_lock_mock.assert_called_once_with() def test_unlock_unlocked(self): @node_cache.release_lock def func(node_info): self.assertFalse(node_info._locked) self.node_info.release_lock() with mock.patch.object(self.node_info, 'release_lock', autospec=True) as release_lock_mock: func(self.node_info) self.assertEqual(0, release_lock_mock.call_count) @mock.patch.object(node_cache, 'triggers_fsm_error_transition', autospec=True) @mock.patch.object(node_cache, 'fsm_event_after', autospec=True) def test_fsm_transition(self, fsm_event_after_mock, trigger_mock): @node_cache.fsm_transition(istate.Events.finish) def func(): pass fsm_event_after_mock.assert_called_once_with(istate.Events.finish) trigger_mock.assert_called_once_with() @mock.patch.object(node_cache, 'triggers_fsm_error_transition', autospec=True) @mock.patch.object(node_cache, 'fsm_event_before', autospec=True) def test_nonreentrant_fsm_transition(self, fsm_event_before_mock, trigger_mock): @node_cache.fsm_transition(istate.Events.abort, reentrant=False) def func(): pass fsm_event_before_mock.assert_called_once_with(istate.Events.abort, strict=True) trigger_mock.assert_called_once_with() @mock.patch.object(node_cache, 'add_node', autospec=True) @mock.patch.object(node_cache, 'NodeInfo', autospec=True) class TestStartIntrospection(test_base.NodeTest): def prepare_mocks(fn): @six.wraps(fn) def inner(self, NodeMock, *args): method_mock = mock.Mock() NodeMock.return_value = self.node_info self.node_info.fsm_event = method_mock fn(self, method_mock, *args) method_mock.assert_called_once_with(istate.Events.start) return inner @prepare_mocks def test_node_in_db_ok_state(self, fsm_event_mock, add_node_mock): def side_effect(*args): self.node_info._state = 'foo' fsm_event_mock.side_effect = side_effect node_cache.start_introspection(self.node.uuid) add_node_mock.assert_called_once_with(self.node_info.uuid, 'foo') @prepare_mocks def test_node_in_db_invalid_state(self, fsm_event_mock, add_node_mock): fsm_event_mock.side_effect = utils.NodeStateInvalidEvent('Oops!') six.assertRaisesRegex(self, utils.NodeStateInvalidEvent, 'Oops!', node_cache.start_introspection, self.node_info.uuid) self.assertFalse(add_node_mock.called) @prepare_mocks def test_node_in_db_race_condition(self, fsm_event_mock, add_node_mock): fsm_event_mock.side_effect = utils.NodeStateRaceCondition() six.assertRaisesRegex(self, utils.NodeStateRaceCondition, '.*', node_cache.start_introspection, self.node_info.uuid) self.assertFalse(add_node_mock.called) @prepare_mocks def test_error_fsm_event(self, fsm_event_mock, add_node_mock): fsm_event_mock.side_effect = utils.Error('Oops!') six.assertRaisesRegex(self, utils.Error, 'Oops!', node_cache.start_introspection, self.node_info.uuid) self.assertFalse(add_node_mock.called) @prepare_mocks def test_node_not_in_db(self, fsm_event_mock, add_node_mock): fsm_event_mock.side_effect = utils.NotFoundInCacheError('Oops!') node_cache.start_introspection(self.node_info.uuid) add_node_mock.assert_called_once_with(self.node_info.uuid, istate.States.starting) @prepare_mocks def test_custom_exc_fsm_event(self, fsm_event_mock, add_node_mock): class CustomError(Exception): pass fsm_event_mock.side_effect = CustomError('Oops!') six.assertRaisesRegex(self, CustomError, 'Oops!', node_cache.start_introspection, self.node_info.uuid) self.assertFalse(add_node_mock.called) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_pci_devices.py0000666000175100017510000001154513241323457030005 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from ironic_inspector import node_cache from ironic_inspector.plugins import base from ironic_inspector.plugins import pci_devices from ironic_inspector.test import base as test_base class TestPciDevicesHook(test_base.NodeTest): hook = pci_devices.PciDevicesHook() def test_parse_pci_alias_entry(self): pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",' ' "name": "baz1"}', '{"vendor_id": "foo2", "product_id": "bar2",' ' "name": "baz2"}'] valid_pci_entry = {("foo1", "bar1"): "baz1", ("foo2", "bar2"): "baz2"} base.CONF.set_override('alias', pci_alias, 'pci_devices') parsed_pci_entry = pci_devices._parse_pci_alias_entry() self.assertEqual(valid_pci_entry, parsed_pci_entry) def test_parse_pci_alias_entry_no_entries(self): pci_alias = [] base.CONF.set_override('alias', pci_alias, 'pci_devices') parsed_pci_alias = pci_devices._parse_pci_alias_entry() self.assertFalse(parsed_pci_alias) @mock.patch('ironic_inspector.plugins.pci_devices.LOG') def test_parse_pci_alias_entry_invalid_json(self, mock_oslo_log): pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",' ' "name": "baz1"}', '{"invalid" = "entry"}'] base.CONF.set_override('alias', pci_alias, 'pci_devices') valid_pci_alias = {("foo1", "bar1"): "baz1"} parsed_pci_alias = pci_devices._parse_pci_alias_entry() self.assertEqual(valid_pci_alias, parsed_pci_alias) mock_oslo_log.error.assert_called_once() @mock.patch('ironic_inspector.plugins.pci_devices.LOG') def test_parse_pci_alias_entry_invalid_keys(self, mock_oslo_log): pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",' ' "name": "baz1"}', '{"invalid": "keys"}'] base.CONF.set_override('alias', pci_alias, 'pci_devices') valid_pci_alias = {("foo1", "bar1"): "baz1"} parsed_pci_alias = pci_devices._parse_pci_alias_entry() self.assertEqual(valid_pci_alias, parsed_pci_alias) mock_oslo_log.error.assert_called_once() @mock.patch.object(hook, 'aliases', {("1234", "5678"): "pci_dev1", ("9876", "5432"): "pci_dev2"}) @mock.patch.object(node_cache.NodeInfo, 'update_capabilities', autospec=True) def test_before_update(self, mock_update_props): self.data['pci_devices'] = [ {"vendor_id": "1234", "product_id": "5678"}, {"vendor_id": "1234", "product_id": "5678"}, {"vendor_id": "1234", "product_id": "7890"}, {"vendor_id": "9876", "product_id": "5432"} ] expected_pci_devices_count = {"pci_dev1": 2, "pci_dev2": 1} self.hook.before_update(self.data, self.node_info) mock_update_props.assert_called_once_with(self.node_info, **expected_pci_devices_count) @mock.patch('ironic_inspector.plugins.pci_devices.LOG') @mock.patch.object(node_cache.NodeInfo, 'update_capabilities', autospec=True) def test_before_update_no_pci_info_from_ipa(self, mock_update_props, mock_oslo_log): pci_alias = ['{"vendor_id": "foo1", "product_id": "bar1",' ' "name": "baz1"}'] base.CONF.set_override('alias', pci_alias, 'pci_devices') self.hook.before_update(self.data, self.node_info) mock_oslo_log.warning.assert_called_once() self.assertFalse(mock_update_props.called) @mock.patch.object(pci_devices, '_parse_pci_alias_entry') @mock.patch('ironic_inspector.plugins.pci_devices.LOG') @mock.patch.object(node_cache.NodeInfo, 'update_capabilities', autospec=True) def test_before_update_no_match(self, mock_update_props, mock_oslo_log, mock_parse_pci_alias): self.data['pci_devices'] = [ {"vendor_id": "1234", "product_id": "5678"}, {"vendor_id": "1234", "product_id": "7890"}, ] mock_parse_pci_alias.return_value = {("9876", "5432"): "pci_dev"} self.hook.before_update(self.data, self.node_info) self.assertFalse(mock_update_props.called) self.assertFalse(mock_oslo_log.info.called) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_introspect.py0000666000175100017510000004346413241323457026166 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import time import fixtures from ironicclient import exceptions import mock from oslo_config import cfg from ironic_inspector.common import ironic as ir_utils from ironic_inspector import introspect from ironic_inspector import introspection_state as istate from ironic_inspector import node_cache from ironic_inspector.pxe_filter import base as pxe_filter from ironic_inspector.test import base as test_base from ironic_inspector import utils CONF = cfg.CONF class BaseTest(test_base.NodeTest): def setUp(self): super(BaseTest, self).setUp() introspect._LAST_INTROSPECTION_TIME = 0 self.node.power_state = 'power off' self.ports = [mock.Mock(address=m) for m in self.macs] self.ports_dict = collections.OrderedDict((p.address, p) for p in self.ports) self.node_info = mock.Mock(uuid=self.uuid, options={}) self.node_info.ports.return_value = self.ports_dict self.node_info.node.return_value = self.node driver_fixture = self.useFixture(fixtures.MockPatchObject( pxe_filter, 'driver', autospec=True)) driver_mock = driver_fixture.mock.return_value self.sync_filter_mock = driver_mock.sync def _prepare(self, client_mock): cli = client_mock.return_value cli.node.get.return_value = self.node cli.node.validate.return_value = mock.Mock(power={'result': True}) return cli @mock.patch.object(node_cache, 'start_introspection', autospec=True) @mock.patch.object(ir_utils, 'get_client', autospec=True) class TestIntrospect(BaseTest): def test_ok(self, client_mock, start_mock): cli = self._prepare(client_mock) start_mock.return_value = self.node_info introspect.introspect(self.node.uuid) cli.node.get.assert_called_once_with(self.uuid) cli.node.validate.assert_called_once_with(self.uuid) start_mock.assert_called_once_with(self.uuid, bmc_address=self.bmc_address, ironic=cli) self.node_info.ports.assert_called_once_with() self.node_info.add_attribute.assert_called_once_with('mac', self.macs) self.sync_filter_mock.assert_called_with(cli) cli.node.set_boot_device.assert_called_once_with(self.uuid, 'pxe', persistent=False) cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') self.node_info.acquire_lock.assert_called_once_with() self.node_info.release_lock.assert_called_once_with() def test_loopback_bmc_address(self, client_mock, start_mock): self.node.driver_info['ipmi_address'] = '127.0.0.1' cli = self._prepare(client_mock) start_mock.return_value = self.node_info introspect.introspect(self.node.uuid) cli.node.get.assert_called_once_with(self.uuid) cli.node.validate.assert_called_once_with(self.uuid) start_mock.assert_called_once_with(self.uuid, bmc_address=None, ironic=cli) self.node_info.ports.assert_called_once_with() self.node_info.add_attribute.assert_called_once_with('mac', self.macs) self.sync_filter_mock.assert_called_with(cli) cli.node.set_boot_device.assert_called_once_with(self.uuid, 'pxe', persistent=False) cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') self.node_info.acquire_lock.assert_called_once_with() self.node_info.release_lock.assert_called_once_with() def test_ok_ilo_and_drac(self, client_mock, start_mock): cli = self._prepare(client_mock) start_mock.return_value = self.node_info for name in ('ilo_address', 'drac_host'): self.node.driver_info = {name: self.bmc_address} introspect.introspect(self.node.uuid) start_mock.assert_called_with(self.uuid, bmc_address=self.bmc_address, ironic=cli) def test_power_failure(self, client_mock, start_mock): cli = self._prepare(client_mock) cli.node.set_boot_device.side_effect = exceptions.BadRequest() cli.node.set_power_state.side_effect = exceptions.BadRequest() start_mock.return_value = self.node_info introspect.introspect(self.node.uuid) cli.node.get.assert_called_once_with(self.uuid) start_mock.assert_called_once_with(self.uuid, bmc_address=self.bmc_address, ironic=cli) cli.node.set_boot_device.assert_called_once_with(self.uuid, 'pxe', persistent=False) cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') start_mock.return_value.finished.assert_called_once_with( introspect.istate.Events.error, error=mock.ANY) self.node_info.acquire_lock.assert_called_once_with() self.node_info.release_lock.assert_called_once_with() def test_unexpected_error(self, client_mock, start_mock): cli = self._prepare(client_mock) start_mock.return_value = self.node_info self.sync_filter_mock.side_effect = RuntimeError() introspect.introspect(self.node.uuid) cli.node.get.assert_called_once_with(self.uuid) start_mock.assert_called_once_with(self.uuid, bmc_address=self.bmc_address, ironic=cli) self.assertFalse(cli.node.set_boot_device.called) start_mock.return_value.finished.assert_called_once_with( introspect.istate.Events.error, error=mock.ANY) self.node_info.acquire_lock.assert_called_once_with() self.node_info.release_lock.assert_called_once_with() def test_no_macs(self, client_mock, start_mock): cli = self._prepare(client_mock) self.node_info.ports.return_value = [] start_mock.return_value = self.node_info introspect.introspect(self.node.uuid) self.node_info.ports.assert_called_once_with() start_mock.assert_called_once_with(self.uuid, bmc_address=self.bmc_address, ironic=cli) self.assertFalse(self.node_info.add_attribute.called) self.assertFalse(self.sync_filter_mock.called) cli.node.set_boot_device.assert_called_once_with(self.uuid, 'pxe', persistent=False) cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') def test_no_lookup_attrs(self, client_mock, start_mock): cli = self._prepare(client_mock) self.node_info.ports.return_value = [] start_mock.return_value = self.node_info self.node_info.attributes = {} introspect.introspect(self.uuid) self.node_info.ports.assert_called_once_with() self.node_info.finished.assert_called_once_with( introspect.istate.Events.error, error=mock.ANY) self.assertEqual(0, self.sync_filter_mock.call_count) self.assertEqual(0, cli.node.set_power_state.call_count) self.node_info.acquire_lock.assert_called_once_with() self.node_info.release_lock.assert_called_once_with() def test_no_lookup_attrs_with_node_not_found_hook(self, client_mock, start_mock): CONF.set_override('node_not_found_hook', 'example', 'processing') cli = self._prepare(client_mock) self.node_info.ports.return_value = [] start_mock.return_value = self.node_info self.node_info.attributes = {} introspect.introspect(self.uuid) self.node_info.ports.assert_called_once_with() self.assertFalse(self.node_info.finished.called) cli.node.set_boot_device.assert_called_once_with(self.uuid, 'pxe', persistent=False) cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') def test_failed_to_get_node(self, client_mock, start_mock): cli = client_mock.return_value cli.node.get.side_effect = exceptions.NotFound() self.assertRaisesRegex(utils.Error, 'Node %s was not found' % self.uuid, introspect.introspect, self.uuid) cli.node.get.side_effect = exceptions.BadRequest() self.assertRaisesRegex(utils.Error, '%s: Bad Request' % self.uuid, introspect.introspect, self.uuid) self.assertEqual(0, self.node_info.ports.call_count) self.assertEqual(0, self.sync_filter_mock.call_count) self.assertEqual(0, cli.node.set_power_state.call_count) self.assertFalse(start_mock.called) self.assertFalse(self.node_info.acquire_lock.called) def test_failed_to_validate_node(self, client_mock, start_mock): cli = client_mock.return_value cli.node.get.return_value = self.node cli.node.validate.return_value = mock.Mock(power={'result': False, 'reason': 'oops'}) self.assertRaisesRegex( utils.Error, 'Failed validation of power interface', introspect.introspect, self.uuid) cli.node.validate.assert_called_once_with(self.uuid) self.assertEqual(0, self.node_info.ports.call_count) self.assertEqual(0, self.sync_filter_mock.call_count) self.assertEqual(0, cli.node.set_power_state.call_count) self.assertFalse(start_mock.called) self.assertFalse(self.node_info.acquire_lock.called) def test_wrong_provision_state(self, client_mock, start_mock): self.node.provision_state = 'active' cli = client_mock.return_value cli.node.get.return_value = self.node self.assertRaisesRegex( utils.Error, 'Invalid provision state for introspection: "active"', introspect.introspect, self.uuid) self.assertEqual(0, self.node_info.ports.call_count) self.assertEqual(0, self.sync_filter_mock.call_count) self.assertEqual(0, cli.node.set_power_state.call_count) self.assertFalse(start_mock.called) self.assertFalse(self.node_info.acquire_lock.called) @mock.patch.object(time, 'time') def test_introspection_delay(self, time_mock, client_mock, start_mock): time_mock.return_value = 42 introspect._LAST_INTROSPECTION_TIME = 40 CONF.set_override('introspection_delay', 10) cli = self._prepare(client_mock) start_mock.return_value = self.node_info introspect.introspect(self.uuid) self.sleep_fixture.mock.assert_called_once_with(8) cli.node.set_boot_device.assert_called_once_with(self.uuid, 'pxe', persistent=False) cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') # updated to the current time.time() self.assertEqual(42, introspect._LAST_INTROSPECTION_TIME) @mock.patch.object(time, 'time') def test_introspection_delay_not_needed(self, time_mock, client_mock, start_mock): time_mock.return_value = 100 introspect._LAST_INTROSPECTION_TIME = 40 CONF.set_override('introspection_delay', 10) cli = self._prepare(client_mock) start_mock.return_value = self.node_info introspect.introspect(self.uuid) self.sleep_fixture.mock().assert_not_called() cli.node.set_boot_device.assert_called_once_with(self.uuid, 'pxe', persistent=False) cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') # updated to the current time.time() self.assertEqual(100, introspect._LAST_INTROSPECTION_TIME) @mock.patch.object(node_cache, 'get_node', autospec=True) @mock.patch.object(ir_utils, 'get_client', autospec=True) class TestAbort(BaseTest): def setUp(self): super(TestAbort, self).setUp() self.node_info.started_at = None self.node_info.finished_at = None # NOTE(milan): node_info.finished() is a mock; no fsm_event call, then self.fsm_calls = [ mock.call(istate.Events.abort, strict=False), ] def test_ok(self, client_mock, get_mock): cli = self._prepare(client_mock) get_mock.return_value = self.node_info self.node_info.acquire_lock.return_value = True self.node_info.started_at = time.time() self.node_info.finished_at = None introspect.abort(self.node.uuid) get_mock.assert_called_once_with(self.uuid, ironic=cli, locked=False) self.node_info.acquire_lock.assert_called_once_with(blocking=False) self.sync_filter_mock.assert_called_once_with(cli) cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') self.node_info.finished.assert_called_once_with( introspect.istate.Events.abort_end, error='Canceled by operator') self.node_info.fsm_event.assert_has_calls(self.fsm_calls) def test_node_not_found(self, client_mock, get_mock): cli = self._prepare(client_mock) exc = utils.Error('Not found.', code=404) get_mock.side_effect = exc self.assertRaisesRegex(utils.Error, str(exc), introspect.abort, self.uuid) self.assertEqual(0, self.sync_filter_mock.call_count) self.assertEqual(0, cli.node.set_power_state.call_count) self.assertEqual(0, self.node_info.finished.call_count) self.assertEqual(0, self.node_info.fsm_event.call_count) def test_node_locked(self, client_mock, get_mock): cli = self._prepare(client_mock) get_mock.return_value = self.node_info self.node_info.acquire_lock.return_value = False self.node_info.started_at = time.time() self.assertRaisesRegex(utils.Error, 'Node is locked, please, ' 'retry later', introspect.abort, self.uuid) self.assertEqual(0, self.sync_filter_mock.call_count) self.assertEqual(0, cli.node.set_power_state.call_count) self.assertEqual(0, self.node_info.finshed.call_count) self.assertEqual(0, self.node_info.fsm_event.call_count) def test_firewall_update_exception(self, client_mock, get_mock): cli = self._prepare(client_mock) get_mock.return_value = self.node_info self.node_info.acquire_lock.return_value = True self.node_info.started_at = time.time() self.node_info.finished_at = None self.sync_filter_mock.side_effect = Exception('Boom') introspect.abort(self.uuid) get_mock.assert_called_once_with(self.uuid, ironic=cli, locked=False) self.node_info.acquire_lock.assert_called_once_with(blocking=False) self.sync_filter_mock.assert_called_once_with(cli) cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') self.node_info.finished.assert_called_once_with( introspect.istate.Events.abort_end, error='Canceled by operator') self.node_info.fsm_event.assert_has_calls(self.fsm_calls) def test_node_power_off_exception(self, client_mock, get_mock): cli = self._prepare(client_mock) get_mock.return_value = self.node_info self.node_info.acquire_lock.return_value = True self.node_info.started_at = time.time() self.node_info.finished_at = None cli.node.set_power_state.side_effect = Exception('BadaBoom') introspect.abort(self.uuid) get_mock.assert_called_once_with(self.uuid, ironic=cli, locked=False) self.node_info.acquire_lock.assert_called_once_with(blocking=False) self.sync_filter_mock.assert_called_once_with(cli) cli.node.set_power_state.assert_called_once_with(self.uuid, 'off') self.node_info.finished.assert_called_once_with( introspect.istate.Events.abort_end, error='Canceled by operator') self.node_info.fsm_event.assert_has_calls(self.fsm_calls) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_pxe_filter.py0000666000175100017510000002773513241323457026140 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from automaton import exceptions as automaton_errors from eventlet import semaphore import fixtures from futurist import periodics import mock from oslo_config import cfg import six import stevedore from ironic_inspector.common import ironic as ir_utils from ironic_inspector.pxe_filter import base as pxe_filter from ironic_inspector.pxe_filter import interface from ironic_inspector.test import base as test_base CONF = cfg.CONF class TestDriverManager(test_base.BaseTest): def setUp(self): super(TestDriverManager, self).setUp() pxe_filter._DRIVER_MANAGER = None stevedore_driver_fixture = self.useFixture(fixtures.MockPatchObject( stevedore.driver, 'DriverManager', autospec=True)) self.stevedore_driver_mock = stevedore_driver_fixture.mock def test_default(self): driver_manager = pxe_filter._driver_manager() self.stevedore_driver_mock.assert_called_once_with( pxe_filter._STEVEDORE_DRIVER_NAMESPACE, name='iptables', invoke_on_load=True ) self.assertIsNotNone(driver_manager) self.assertIs(pxe_filter._DRIVER_MANAGER, driver_manager) def test_pxe_filter_name(self): CONF.set_override('driver', 'foo', 'pxe_filter') driver_manager = pxe_filter._driver_manager() self.stevedore_driver_mock.assert_called_once_with( pxe_filter._STEVEDORE_DRIVER_NAMESPACE, 'foo', invoke_on_load=True ) self.assertIsNotNone(driver_manager) self.assertIs(pxe_filter._DRIVER_MANAGER, driver_manager) def test_default_existing_driver_manager(self): pxe_filter._DRIVER_MANAGER = True driver_manager = pxe_filter._driver_manager() self.stevedore_driver_mock.assert_not_called() self.assertIs(pxe_filter._DRIVER_MANAGER, driver_manager) def test_manage_firewall(self): # FIXME(milan): to be removed after the transition period of # deprecating the firewall option group # NOTE(milan) the default filter driver is iptables # this should revert it to noop CONF.set_override('manage_firewall', False, 'iptables') driver_manager = pxe_filter._driver_manager() self.stevedore_driver_mock.assert_called_once_with( pxe_filter._STEVEDORE_DRIVER_NAMESPACE, name='noop', invoke_on_load=True) self.assertIsNotNone(driver_manager) self.assertIs(pxe_filter._DRIVER_MANAGER, driver_manager) class TestDriverManagerLoading(test_base.BaseTest): def setUp(self): super(TestDriverManagerLoading, self).setUp() pxe_filter._DRIVER_MANAGER = None @mock.patch.object(pxe_filter, 'NoopFilter', autospec=True) def test_pxe_filter_driver_loads(self, noop_driver_cls): CONF.set_override('driver', 'noop', 'pxe_filter') driver_manager = pxe_filter._driver_manager() noop_driver_cls.assert_called_once_with() self.assertIs(noop_driver_cls.return_value, driver_manager.driver) def test_invalid_filter_driver(self): CONF.set_override('driver', 'foo', 'pxe_filter') six.assertRaisesRegex(self, stevedore.exception.NoMatches, 'foo', pxe_filter._driver_manager) self.assertIsNone(pxe_filter._DRIVER_MANAGER) class BaseFilterBaseTest(test_base.BaseTest): def setUp(self): super(BaseFilterBaseTest, self).setUp() self.mock_lock = mock.MagicMock(spec=semaphore.BoundedSemaphore) self.mock_bounded_semaphore = self.useFixture( fixtures.MockPatchObject(semaphore, 'BoundedSemaphore')).mock self.mock_bounded_semaphore.return_value = self.mock_lock self.driver = pxe_filter.NoopFilter() def assert_driver_is_locked(self): """Assert the driver is currently locked and wasn't locked before.""" self.driver.lock.__enter__.assert_called_once_with() self.driver.lock.__exit__.assert_not_called() def assert_driver_was_locked_once(self): """Assert the driver was locked exactly once before.""" self.driver.lock.__enter__.assert_called_once_with() self.driver.lock.__exit__.assert_called_once_with(None, None, None) def assert_driver_was_not_locked(self): """Assert the driver was not locked""" self.mock_lock.__enter__.assert_not_called() self.mock_lock.__exit__.assert_not_called() class TestLockedDriverEvent(BaseFilterBaseTest): def setUp(self): super(TestLockedDriverEvent, self).setUp() self.mock_fsm_reset_on_error = self.useFixture( fixtures.MockPatchObject(self.driver, 'fsm_reset_on_error')).mock self.expected_args = (None,) self.expected_kwargs = {'foo': None} self.mock_fsm = self.useFixture( fixtures.MockPatchObject(self.driver, 'fsm')).mock (self.driver.fsm_reset_on_error.return_value. __enter__.return_value) = self.mock_fsm def test_locked_driver_event(self): event = 'foo' @pxe_filter.locked_driver_event(event) def fun(driver, *args, **kwargs): self.assertIs(self.driver, driver) self.assertEqual(self.expected_args, args) self.assertEqual(self.expected_kwargs, kwargs) self.assert_driver_is_locked() self.assert_driver_was_not_locked() fun(self.driver, *self.expected_args, **self.expected_kwargs) self.mock_fsm_reset_on_error.assert_called_once_with() self.mock_fsm.process_event.assert_called_once_with(event) self.assert_driver_was_locked_once() class TestBaseFilterFsmPrecautions(BaseFilterBaseTest): def setUp(self): super(TestBaseFilterFsmPrecautions, self).setUp() self.mock_fsm = self.useFixture( fixtures.MockPatchObject(pxe_filter.NoopFilter, 'fsm')).mock # NOTE(milan): overriding driver so that the patch ^ is applied self.mock_bounded_semaphore.reset_mock() self.driver = pxe_filter.NoopFilter() self.mock_reset = self.useFixture( fixtures.MockPatchObject(self.driver, 'reset')).mock def test___init__(self): self.assertIs(self.mock_lock, self.driver.lock) self.mock_bounded_semaphore.assert_called_once_with() self.assertIs(self.mock_fsm, self.driver.fsm) self.mock_fsm.initialize.assert_called_once_with( start_state=pxe_filter.States.uninitialized) def test_fsm_reset_on_error(self): with self.driver.fsm_reset_on_error() as fsm: self.assertIs(self.mock_fsm, fsm) self.mock_reset.assert_not_called() def test_fsm_automaton_error(self): def fun(): with self.driver.fsm_reset_on_error(): raise automaton_errors.NotFound('Oops!') self.assertRaisesRegex(pxe_filter.InvalidFilterDriverState, '.*NoopFilter.*Oops!', fun) self.mock_reset.assert_not_called() def test_fsm_reset_on_error_ctx_custom_error(self): class MyError(Exception): pass def fun(): with self.driver.fsm_reset_on_error(): raise MyError('Oops!') self.assertRaisesRegex(MyError, 'Oops!', fun) self.mock_reset.assert_called_once_with() class TestBaseFilterInterface(BaseFilterBaseTest): def setUp(self): super(TestBaseFilterInterface, self).setUp() self.mock_get_client = self.useFixture( fixtures.MockPatchObject(ir_utils, 'get_client')).mock self.mock_ironic = mock.Mock() self.mock_get_client.return_value = self.mock_ironic self.mock_periodic = self.useFixture( fixtures.MockPatchObject(periodics, 'periodic')).mock self.mock_reset = self.useFixture( fixtures.MockPatchObject(self.driver, 'reset')).mock self.mock_log = self.useFixture( fixtures.MockPatchObject(pxe_filter, 'LOG')).mock self.driver.fsm_reset_on_error = self.useFixture( fixtures.MockPatchObject(self.driver, 'fsm_reset_on_error')).mock def test_init_filter(self): self.driver.init_filter() self.mock_log.debug.assert_called_once_with( 'Initializing the PXE filter driver %s', self.driver) self.mock_reset.assert_not_called() def test_sync(self): self.driver.sync(self.mock_ironic) self.mock_log.debug.assert_called_once_with( 'Syncing the PXE filter driver %s', self.driver) self.mock_reset.assert_not_called() def test_tear_down_filter(self): self.assert_driver_was_not_locked() self.driver.tear_down_filter() self.assert_driver_was_locked_once() self.mock_reset.assert_called_once_with() def test_get_periodic_sync_task(self): sync_mock = self.useFixture( fixtures.MockPatchObject(self.driver, 'sync')).mock self.driver.get_periodic_sync_task() self.mock_periodic.assert_called_once_with(spacing=15, enabled=True) self.mock_periodic.return_value.call_args[0][0]() sync_mock.assert_called_once_with(self.mock_get_client.return_value) def test_get_periodic_sync_task_invalid_state(self): sync_mock = self.useFixture( fixtures.MockPatchObject(self.driver, 'sync')).mock sync_mock.side_effect = pxe_filter.InvalidFilterDriverState('Oops!') self.driver.get_periodic_sync_task() self.mock_periodic.assert_called_once_with(spacing=15, enabled=True) self.assertRaisesRegex(periodics.NeverAgain, 'Oops!', self.mock_periodic.return_value.call_args[0][0]) def test_get_periodic_sync_task_custom_error(self): class MyError(Exception): pass sync_mock = self.useFixture( fixtures.MockPatchObject(self.driver, 'sync')).mock sync_mock.side_effect = MyError('Oops!') self.driver.get_periodic_sync_task() self.mock_periodic.assert_called_once_with(spacing=15, enabled=True) self.assertRaisesRegex( MyError, 'Oops!', self.mock_periodic.return_value.call_args[0][0]) def test_get_periodic_sync_task_disabled(self): CONF.set_override('sync_period', 0, 'pxe_filter') self.driver.get_periodic_sync_task() self.mock_periodic.assert_called_once_with(spacing=float('inf'), enabled=False) def test_get_periodic_sync_task_custom_spacing(self): CONF.set_override('sync_period', 4224, 'pxe_filter') self.driver.get_periodic_sync_task() self.mock_periodic.assert_called_once_with(spacing=4224, enabled=True) class TestDriverReset(BaseFilterBaseTest): def setUp(self): super(TestDriverReset, self).setUp() self.mock_fsm = self.useFixture( fixtures.MockPatchObject(self.driver, 'fsm')).mock def test_reset(self): self.driver.reset() self.assert_driver_was_not_locked() self.mock_fsm.process_event.assert_called_once_with( pxe_filter.Events.reset) class TestDriver(test_base.BaseTest): def setUp(self): super(TestDriver, self).setUp() self.mock_driver = mock.Mock(spec=interface.FilterDriver) self.mock__driver_manager = self.useFixture( fixtures.MockPatchObject(pxe_filter, '_driver_manager')).mock self.mock__driver_manager.return_value.driver = self.mock_driver def test_driver(self): ret = pxe_filter.driver() self.assertIs(self.mock_driver, ret) self.mock__driver_manager.assert_called_once_with() ironic-inspector-7.2.0/ironic_inspector/test/unit/test_dnsmasq_pxe_filter.py0000666000175100017510000003564613241323457027666 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import os import fixtures from ironicclient import exc as ironic_exc import mock from oslo_config import cfg import six from ironic_inspector.common import ironic as ir_utils from ironic_inspector import node_cache from ironic_inspector.pxe_filter import dnsmasq from ironic_inspector.test import base as test_base CONF = cfg.CONF class DnsmasqTestBase(test_base.BaseTest): def setUp(self): super(DnsmasqTestBase, self).setUp() self.driver = dnsmasq.DnsmasqFilter() class TestDnsmasqDriverAPI(DnsmasqTestBase): def setUp(self): super(TestDnsmasqDriverAPI, self).setUp() self.mock__execute = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_execute')).mock self.driver._sync = mock.Mock() self.driver._tear_down = mock.Mock() self.mock__purge_dhcp_hostsdir = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_purge_dhcp_hostsdir')).mock self.mock_ironic = mock.Mock() get_client_mock = self.useFixture( fixtures.MockPatchObject(ir_utils, 'get_client')).mock get_client_mock.return_value = self.mock_ironic self.start_command = '/far/boo buzz -V --ack 42' CONF.set_override('dnsmasq_start_command', self.start_command, 'dnsmasq_pxe_filter') self.stop_command = '/what/ever' CONF.set_override('dnsmasq_stop_command', self.stop_command, 'dnsmasq_pxe_filter') def test_init_filter(self): self.driver.init_filter() self.mock__purge_dhcp_hostsdir.assert_called_once_with() self.driver._sync.assert_called_once_with(self.mock_ironic) self.mock__execute.assert_called_once_with(self.start_command) def test_sync(self): self.driver.init_filter() # NOTE(milan) init_filter performs an initial sync self.driver._sync.reset_mock() self.driver.sync(self.mock_ironic) self.driver._sync.assert_called_once_with(self.mock_ironic) def test_tear_down_filter(self): mock_reset = self.useFixture( fixtures.MockPatchObject(self.driver, 'reset')).mock self.driver.init_filter() self.driver.tear_down_filter() mock_reset.assert_called_once_with() def test_reset(self): self.driver.init_filter() # NOTE(milan) init_filter calls _base_cmd self.mock__execute.reset_mock() self.driver.reset() self.mock__execute.assert_called_once_with( self.stop_command, ignore_errors=True) class TestExclusiveWriteOrPass(test_base.BaseTest): def setUp(self): super(TestExclusiveWriteOrPass, self).setUp() self.mock_open = self.useFixture(fixtures.MockPatchObject( six.moves.builtins, 'open', new=mock.mock_open())).mock self.mock_fd = self.mock_open.return_value self.mock_fcntl = self.useFixture(fixtures.MockPatchObject( dnsmasq.fcntl, 'flock', autospec=True)).mock self.path = '/foo/bar/baz' self.buf = 'spam' self.fcntl_lock_call = mock.call( self.mock_fd, dnsmasq.fcntl.LOCK_EX | dnsmasq.fcntl.LOCK_NB) self.fcntl_unlock_call = mock.call(self.mock_fd, dnsmasq.fcntl.LOCK_UN) self.mock_log = self.useFixture(fixtures.MockPatchObject( dnsmasq.LOG, 'debug')).mock self.mock_sleep = self.useFixture(fixtures.MockPatchObject( dnsmasq.time, 'sleep')).mock def test_write(self): wrote = dnsmasq._exclusive_write_or_pass(self.path, self.buf) self.assertEqual(True, wrote) self.mock_open.assert_called_once_with(self.path, 'w', 1) self.mock_fcntl.assert_has_calls( [self.fcntl_lock_call, self.fcntl_unlock_call]) self.mock_fd.write.assert_called_once_with(self.buf) self.mock_log.assert_not_called() def test_write_would_block(self): err = IOError('Oops!') err.errno = os.errno.EWOULDBLOCK # lock/unlock paired calls self.mock_fcntl.side_effect = [ # first try err, None, # second try None, None] wrote = dnsmasq._exclusive_write_or_pass(self.path, self.buf) self.assertEqual(True, wrote) self.mock_open.assert_called_once_with(self.path, 'w', 1) self.mock_fcntl.assert_has_calls( [self.fcntl_lock_call, self.fcntl_unlock_call], [self.fcntl_lock_call, self.fcntl_unlock_call]) self.mock_fd.write.assert_called_once_with(self.buf) self.mock_log.assert_called_once_with( '%s locked; will try again (later)', self.path) self.mock_sleep.assert_called_once_with( dnsmasq._EXCLUSIVE_WRITE_ATTEMPTS_DELAY) def test_write_would_block_too_many_times(self): self.useFixture(fixtures.MonkeyPatch( 'ironic_inspector.pxe_filter.dnsmasq._EXCLUSIVE_WRITE_ATTEMPTS', 1)) err = IOError('Oops!') err.errno = os.errno.EWOULDBLOCK self.mock_fcntl.side_effect = [err, None] wrote = dnsmasq._exclusive_write_or_pass(self.path, self.buf) self.assertEqual(False, wrote) self.mock_open.assert_called_once_with(self.path, 'w', 1) self.mock_fcntl.assert_has_calls( [self.fcntl_lock_call, self.fcntl_unlock_call]) self.mock_fd.write.assert_not_called() retry_log_call = mock.call('%s locked; will try again (later)', self.path) failed_log_call = mock.call( 'Failed to write the exclusively-locked path: %(path)s for ' '%(attempts)s times', { 'attempts': dnsmasq._EXCLUSIVE_WRITE_ATTEMPTS, 'path': self.path }) self.mock_log.assert_has_calls([retry_log_call, failed_log_call]) self.mock_sleep.assert_called_once_with( dnsmasq._EXCLUSIVE_WRITE_ATTEMPTS_DELAY) def test_write_custom_ioerror(self): err = IOError('Oops!') err.errno = os.errno.EBADF self.mock_fcntl.side_effect = [err, None] self.assertRaisesRegex( IOError, 'Oops!', dnsmasq._exclusive_write_or_pass, self.path, self.buf) self.mock_open.assert_called_once_with(self.path, 'w', 1) self.mock_fcntl.assert_has_calls( [self.fcntl_lock_call, self.fcntl_unlock_call]) self.mock_fd.write.assert_not_called() self.mock_log.assert_not_called() class TestMACHandlers(test_base.BaseTest): def setUp(self): super(TestMACHandlers, self).setUp() self.mock_listdir = self.useFixture( fixtures.MockPatchObject(os, 'listdir')).mock self.mock_stat = self.useFixture( fixtures.MockPatchObject(os, 'stat')).mock self.mock_remove = self.useFixture( fixtures.MockPatchObject(os, 'remove')).mock self.mac = 'ff:ff:ff:ff:ff:ff' self.dhcp_hostsdir = '/far' CONF.set_override('dhcp_hostsdir', self.dhcp_hostsdir, 'dnsmasq_pxe_filter') self.mock_join = self.useFixture( fixtures.MockPatchObject(os.path, 'join')).mock self.mock_join.return_value = "%s/%s" % (self.dhcp_hostsdir, self.mac) self.mock__exclusive_write_or_pass = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_exclusive_write_or_pass')).mock def test__whitelist_mac(self): dnsmasq._whitelist_mac(self.mac) self.mock_join.assert_called_once_with(self.dhcp_hostsdir, self.mac) self.mock__exclusive_write_or_pass.assert_called_once_with( self.mock_join.return_value, '%s\n' % self.mac) def test__blacklist_mac(self): dnsmasq._blacklist_mac(self.mac) self.mock_join.assert_called_once_with(self.dhcp_hostsdir, self.mac) self.mock__exclusive_write_or_pass.assert_called_once_with( self.mock_join.return_value, '%s,ignore\n' % self.mac) def test__get_blacklist(self): self.mock_listdir.return_value = [self.mac] self.mock_stat.return_value.st_size = len('%s,ignore\n' % self.mac) ret = dnsmasq._get_blacklist() self.assertEqual({self.mac}, ret) self.mock_listdir.assert_called_once_with(self.dhcp_hostsdir) self.mock_join.assert_called_once_with(self.dhcp_hostsdir, self.mac) self.mock_stat.assert_called_once_with(self.mock_join.return_value) def test__get_no_blacklist(self): self.mock_listdir.return_value = [self.mac] self.mock_stat.return_value.st_size = len('%s\n' % self.mac) ret = dnsmasq._get_blacklist() self.assertEqual(set(), ret) self.mock_listdir.assert_called_once_with(self.dhcp_hostsdir) self.mock_join.assert_called_once_with(self.dhcp_hostsdir, self.mac) self.mock_stat.assert_called_once_with(self.mock_join.return_value) def test__purge_dhcp_hostsdir(self): self.mock_listdir.return_value = [self.mac] dnsmasq._purge_dhcp_hostsdir() self.mock_listdir.assert_called_once_with(self.dhcp_hostsdir) self.mock_join.assert_called_once_with(self.dhcp_hostsdir, self.mac) self.mock_remove.assert_called_once_with('%s/%s' % (self.dhcp_hostsdir, self.mac)) def test_disabled__purge_dhcp_hostsdir(self): CONF.set_override('purge_dhcp_hostsdir', False, 'dnsmasq_pxe_filter') dnsmasq._purge_dhcp_hostsdir() self.mock_listdir.assert_not_called() self.mock_join.assert_not_called() self.mock_remove.assert_not_called() class TestSync(DnsmasqTestBase): def setUp(self): super(TestSync, self).setUp() self.mock__get_blacklist = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_get_blacklist')).mock self.mock__whitelist_mac = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_whitelist_mac')).mock self.mock__blacklist_mac = self.useFixture( fixtures.MockPatchObject(dnsmasq, '_blacklist_mac')).mock self.mock_ironic = mock.Mock() self.mock_utcnow = self.useFixture( fixtures.MockPatchObject(dnsmasq.timeutils, 'utcnow')).mock self.timestamp_start = datetime.datetime.utcnow() self.timestamp_end = (self.timestamp_start + datetime.timedelta(seconds=42)) self.mock_utcnow.side_effect = [self.timestamp_start, self.timestamp_end] self.mock_log = self.useFixture( fixtures.MockPatchObject(dnsmasq, 'LOG')).mock get_client_mock = self.useFixture( fixtures.MockPatchObject(ir_utils, 'get_client')).mock get_client_mock.return_value = self.mock_ironic self.mock_active_macs = self.useFixture( fixtures.MockPatchObject(node_cache, 'active_macs')).mock self.ironic_macs = {'new_mac', 'active_mac'} self.active_macs = {'active_mac'} self.blacklist_macs = {'gone_mac', 'active_mac'} self.mock__get_blacklist.return_value = self.blacklist_macs self.mock_ironic.port.list.return_value = [ mock.Mock(address=address) for address in self.ironic_macs] self.mock_active_macs.return_value = self.active_macs def test__sync(self): self.driver._sync(self.mock_ironic) self.mock__whitelist_mac.assert_has_calls([mock.call('active_mac'), mock.call('gone_mac')], any_order=True) self.mock__blacklist_mac.assert_has_calls([mock.call('new_mac')], any_order=True) self.mock_ironic.port.list.assert_called_once_with(limit=0, fields=['address']) self.mock_active_macs.assert_called_once_with() self.mock__get_blacklist.assert_called_once_with() self.mock_log.debug.assert_has_calls([ mock.call('Syncing the driver'), mock.call('The dnsmasq PXE filter was synchronized (took %s)', self.timestamp_end - self.timestamp_start) ]) @mock.patch('time.sleep', lambda _x: None) def test__sync_with_port_list_retries(self): self.mock_ironic.port.list.side_effect = [ ironic_exc.ConnectionRefused('boom'), [mock.Mock(address=address) for address in self.ironic_macs] ] self.driver._sync(self.mock_ironic) self.mock__whitelist_mac.assert_has_calls([mock.call('active_mac'), mock.call('gone_mac')], any_order=True) self.mock__blacklist_mac.assert_has_calls([mock.call('new_mac')], any_order=True) self.mock_ironic.port.list.assert_called_with(limit=0, fields=['address']) self.mock_active_macs.assert_called_once_with() self.mock__get_blacklist.assert_called_once_with() self.mock_log.debug.assert_has_calls([ mock.call('Syncing the driver'), mock.call('The dnsmasq PXE filter was synchronized (took %s)', self.timestamp_end - self.timestamp_start) ]) class Test_Execute(test_base.BaseTest): def setUp(self): super(Test_Execute, self).setUp() self.mock_execute = self.useFixture( fixtures.MockPatchObject(dnsmasq.processutils, 'execute') ).mock CONF.set_override('rootwrap_config', '/path/to/rootwrap.conf') self.rootwrap_cmd = dnsmasq._ROOTWRAP_COMMAND.format( rootwrap_config=CONF.rootwrap_config) self.useFixture(fixtures.MonkeyPatch( 'ironic_inspector.pxe_filter.dnsmasq._ROOTWRAP_COMMAND', self.rootwrap_cmd)) self.command = 'foobar baz' def test__execute(self): dnsmasq._execute(self.command) self.mock_execute.assert_called_once_with( self.command, run_as_root=True, shell=True, check_exit_code=True, root_helper=self.rootwrap_cmd) def test__execute_ignoring_errors(self): dnsmasq._execute(self.command, ignore_errors=True) self.mock_execute.assert_called_once_with( self.command, run_as_root=True, shell=True, check_exit_code=False, root_helper=self.rootwrap_cmd) def test__execute_empty(self): dnsmasq._execute() self.mock_execute.assert_not_called() ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_standard.py0000666000175100017510000004360713241323457027334 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_utils import units import six from ironic_inspector import node_cache from ironic_inspector.plugins import base from ironic_inspector.plugins import standard as std_plugins from ironic_inspector import process from ironic_inspector.test import base as test_base from ironic_inspector import utils CONF = cfg.CONF @mock.patch('ironic_inspector.common.ironic.get_client', new=mock.Mock()) class TestSchedulerHook(test_base.NodeTest): def setUp(self): super(TestSchedulerHook, self).setUp() self.hook = std_plugins.SchedulerHook() self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, node=self.node) def test_hook_loadable_by_name(self): CONF.set_override('processing_hooks', 'scheduler', 'processing') ext = base.processing_hooks_manager()['scheduler'] self.assertIsInstance(ext.obj, std_plugins.SchedulerHook) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_ok(self, mock_patch): patch = [ {'path': '/properties/cpus', 'value': '4', 'op': 'add'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, {'path': '/properties/memory_mb', 'value': '12288', 'op': 'add'}, ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patch, mock_patch) @mock.patch.object(node_cache.NodeInfo, 'patch') def test_no_overwrite(self, mock_patch): CONF.set_override('overwrite_existing', False, 'processing') self.node.properties = { 'memory_mb': '4096', 'cpu_arch': 'i686' } patch = [ {'path': '/properties/cpus', 'value': '4', 'op': 'add'}, ] self.hook.before_update(self.data, self.node_info) self.assertCalledWithPatch(patch, mock_patch) class TestValidateInterfacesHookLoad(test_base.NodeTest): def test_hook_loadable_by_name(self): CONF.set_override('processing_hooks', 'validate_interfaces', 'processing') ext = base.processing_hooks_manager()['validate_interfaces'] self.assertIsInstance(ext.obj, std_plugins.ValidateInterfacesHook) class TestValidateInterfacesHookBeforeProcessing(test_base.NodeTest): def setUp(self): super(TestValidateInterfacesHookBeforeProcessing, self).setUp() self.hook = std_plugins.ValidateInterfacesHook() def test_no_interfaces(self): self.assertRaisesRegex(utils.Error, 'Hardware inventory is empty or missing', self.hook.before_processing, {}) self.assertRaisesRegex(utils.Error, 'Hardware inventory is empty or missing', self.hook.before_processing, {'inventory': {}}) del self.inventory['interfaces'] self.assertRaisesRegex(utils.Error, 'interfaces key is missing or empty', self.hook.before_processing, self.data) def test_only_pxe(self): self.hook.before_processing(self.data) self.assertEqual(self.pxe_interfaces, self.data['interfaces']) self.assertEqual([self.pxe_mac], self.data['macs']) self.assertEqual(self.all_interfaces, self.data['all_interfaces']) def test_only_pxe_mac_format(self): self.data['boot_interface'] = self.pxe_mac self.hook.before_processing(self.data) self.assertEqual(self.pxe_interfaces, self.data['interfaces']) self.assertEqual([self.pxe_mac], self.data['macs']) self.assertEqual(self.all_interfaces, self.data['all_interfaces']) def test_only_pxe_not_found(self): self.data['boot_interface'] = 'aa:bb:cc:dd:ee:ff' self.assertRaisesRegex(utils.Error, 'No suitable interfaces', self.hook.before_processing, self.data) def test_only_pxe_no_boot_interface(self): del self.data['boot_interface'] self.hook.before_processing(self.data) self.active_interfaces[self.pxe_iface_name]['pxe'] = False self.all_interfaces[self.pxe_iface_name]['pxe'] = False self.assertEqual(self.active_interfaces, self.data['interfaces']) self.assertEqual(sorted(i['mac'] for i in self.active_interfaces.values()), sorted(self.data['macs'])) self.assertEqual(self.all_interfaces, self.data['all_interfaces']) def test_only_active(self): CONF.set_override('add_ports', 'active', 'processing') self.hook.before_processing(self.data) self.assertEqual(self.active_interfaces, self.data['interfaces']) self.assertEqual(sorted(i['mac'] for i in self.active_interfaces.values()), sorted(self.data['macs'])) self.assertEqual(self.all_interfaces, self.data['all_interfaces']) def test_all(self): CONF.set_override('add_ports', 'all', 'processing') self.hook.before_processing(self.data) self.assertEqual(self.all_interfaces, self.data['interfaces']) self.assertEqual(sorted(i['mac'] for i in self.all_interfaces.values()), sorted(self.data['macs'])) self.assertEqual(self.all_interfaces, self.data['all_interfaces']) @mock.patch.object(node_cache.NodeInfo, 'create_ports') def test_disabled_bad_conf(self, mock_create_port): CONF.set_override('add_ports', 'disabled', 'processing') CONF.set_override('keep_ports', 'added', 'processing') self.assertRaisesRegex(utils.Error, 'Configuration error:', self.hook.__init__) mock_create_port.assert_not_called() @mock.patch.object(node_cache.NodeInfo, 'create_ports') def test_disabled(self, mock_create_port): CONF.set_override('add_ports', 'disabled', 'processing') CONF.set_override('keep_ports', 'all', 'processing') self.hook.before_processing(self.data) self.assertEqual(self.active_interfaces, self.data['interfaces']) mock_create_port.assert_not_called() def test_malformed_interfaces(self): self.inventory['interfaces'] = [ # no name {'mac_address': '11:11:11:11:11:11', 'ipv4_address': '1.1.1.1'}, # empty {}, ] self.assertRaisesRegex(utils.Error, 'No interfaces supplied', self.hook.before_processing, self.data) def test_skipped_interfaces(self): CONF.set_override('add_ports', 'all', 'processing') self.inventory['interfaces'] = [ # local interface (by name) {'name': 'lo', 'mac_address': '11:11:11:11:11:11', 'ipv4_address': '1.1.1.1'}, # local interface (by IP address) {'name': 'em1', 'mac_address': '22:22:22:22:22:22', 'ipv4_address': '127.0.0.1'}, # no MAC provided {'name': 'em3', 'ipv4_address': '2.2.2.2'}, # malformed MAC provided {'name': 'em4', 'mac_address': 'foobar', 'ipv4_address': '2.2.2.2'}, ] self.assertRaisesRegex(utils.Error, 'No suitable interfaces found', self.hook.before_processing, self.data) @mock.patch.object(node_cache.NodeInfo, 'delete_port', autospec=True) @mock.patch.object(node_cache.NodeInfo, 'create_ports', autospec=True) class TestValidateInterfacesHookBeforeUpdateDeletion(test_base.NodeTest): def setUp(self): super(TestValidateInterfacesHookBeforeUpdateDeletion, self).setUp() self.hook = std_plugins.ValidateInterfacesHook() self.interfaces_to_create = sorted(self.valid_interfaces.values(), key=lambda i: i['mac']) self.existing_ports = [mock.Mock(spec=['address', 'uuid'], address=a) for a in (self.macs[1], '44:44:44:44:44:44')] self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, node=self.node, ports=self.existing_ports) def test_keep_all(self, mock_create_ports, mock_delete_port): self.hook.before_update(self.data, self.node_info) # NOTE(dtantsur): dictionary ordering is not defined mock_create_ports.assert_called_once_with(self.node_info, mock.ANY) self.assertEqual(self.interfaces_to_create, sorted(mock_create_ports.call_args[0][1], key=lambda i: i['mac'])) self.assertFalse(mock_delete_port.called) def test_keep_present(self, mock_create_ports, mock_delete_port): CONF.set_override('keep_ports', 'present', 'processing') self.data['all_interfaces'] = self.all_interfaces self.hook.before_update(self.data, self.node_info) mock_create_ports.assert_called_once_with(self.node_info, mock.ANY) self.assertEqual(self.interfaces_to_create, sorted(mock_create_ports.call_args[0][1], key=lambda i: i['mac'])) mock_delete_port.assert_called_once_with(self.node_info, self.existing_ports[1]) def test_keep_added(self, mock_create_ports, mock_delete_port): CONF.set_override('keep_ports', 'added', 'processing') self.data['macs'] = [self.pxe_mac] self.hook.before_update(self.data, self.node_info) mock_create_ports.assert_called_once_with(self.node_info, mock.ANY) self.assertEqual(self.interfaces_to_create, sorted(mock_create_ports.call_args[0][1], key=lambda i: i['mac'])) mock_delete_port.assert_any_call(self.node_info, self.existing_ports[0]) mock_delete_port.assert_any_call(self.node_info, self.existing_ports[1]) @mock.patch.object(node_cache.NodeInfo, 'patch_port', autospec=True) @mock.patch.object(node_cache.NodeInfo, 'create_ports', autospec=True) class TestValidateInterfacesHookBeforeUpdatePXEEnabled(test_base.NodeTest): def setUp(self): super(TestValidateInterfacesHookBeforeUpdatePXEEnabled, self).setUp() self.hook = std_plugins.ValidateInterfacesHook() # Note(milan) assumes the ordering of self.macs from test_base.NodeTest # where the first item '11:22:33:44:55:66' is the MAC of the # self.pxe_iface_name 'eth1', the "real" PXE interface sorted_interfaces = sorted(self.valid_interfaces.values(), key=lambda i: i['mac']) self.existing_ports = [ mock.Mock(spec=['address', 'uuid', 'pxe_enabled'], address=iface['mac'], pxe_enabled=True) for iface in sorted_interfaces ] self.node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=0, node=self.node, ports=self.existing_ports) def test_fix_pxe_enabled(self, mock_create_ports, mock_patch_port): self.hook.before_update(self.data, self.node_info) # Note(milan) there are just 2 self.valid_interfaces, 'eth1' and 'ib0' # eth1 is the PXE booting interface and eth1.mac < ib0.mac mock_patch_port.assert_called_once_with( self.node_info, self.existing_ports[1], [{'op': 'replace', 'path': '/pxe_enabled', 'value': False}]) def test_no_overwrite(self, mock_create_ports, mock_patch_port): CONF.set_override('overwrite_existing', False, 'processing') self.hook.before_update(self.data, self.node_info) self.assertFalse(mock_patch_port.called) class TestRootDiskSelection(test_base.NodeTest): def setUp(self): super(TestRootDiskSelection, self).setUp() self.hook = std_plugins.RootDiskSelectionHook() self.inventory['disks'] = [ {'model': 'Model 1', 'size': 20 * units.Gi, 'name': '/dev/sdb'}, {'model': 'Model 2', 'size': 5 * units.Gi, 'name': '/dev/sda'}, {'model': 'Model 3', 'size': 10 * units.Gi, 'name': '/dev/sdc'}, {'model': 'Model 4', 'size': 4 * units.Gi, 'name': '/dev/sdd'}, {'model': 'Too Small', 'size': 1 * units.Gi, 'name': '/dev/sde'}, ] self.matched = self.inventory['disks'][2].copy() self.node_info = mock.Mock(spec=node_cache.NodeInfo, _state='foo', uuid=self.uuid, **{'node.return_value': self.node}) def test_no_hints(self): del self.data['root_disk'] self.hook.before_update(self.data, self.node_info) self.assertEqual(0, self.data['local_gb']) self.assertNotIn('root_disk', self.data) self.node_info.update_properties.assert_called_once_with(local_gb='0') def test_no_hints_no_overwrite(self): CONF.set_override('overwrite_existing', False, 'processing') del self.data['root_disk'] self.hook.before_update(self.data, self.node_info) self.assertEqual(0, self.data['local_gb']) self.assertNotIn('root_disk', self.data) self.assertFalse(self.node_info.update_properties.called) def test_no_inventory(self): self.node.properties['root_device'] = {'model': 'foo'} del self.data['inventory'] del self.data['root_disk'] self.assertRaisesRegex(utils.Error, 'Hardware inventory is empty or missing', self.hook.before_update, self.data, self.node_info) self.assertNotIn('local_gb', self.data) self.assertNotIn('root_disk', self.data) self.assertFalse(self.node_info.update_properties.called) def test_no_disks(self): self.node.properties['root_device'] = {'size': 10} self.inventory['disks'] = [] six.assertRaisesRegex(self, utils.Error, 'No disks satisfied root device hints', self.hook.before_update, self.data, self.node_info) self.assertNotIn('local_gb', self.data) self.assertFalse(self.node_info.update_properties.called) def test_one_matches(self): self.node.properties['root_device'] = {'size': 10} self.hook.before_update(self.data, self.node_info) self.assertEqual(self.matched, self.data['root_disk']) self.assertEqual(9, self.data['local_gb']) self.node_info.update_properties.assert_called_once_with(local_gb='9') def test_local_gb_without_spacing(self): CONF.set_override('disk_partitioning_spacing', False, 'processing') self.node.properties['root_device'] = {'size': 10} self.hook.before_update(self.data, self.node_info) self.assertEqual(self.matched, self.data['root_disk']) self.assertEqual(10, self.data['local_gb']) self.node_info.update_properties.assert_called_once_with(local_gb='10') def test_all_match(self): self.node.properties['root_device'] = {'size': 10, 'model': 'Model 3'} self.hook.before_update(self.data, self.node_info) self.assertEqual(self.matched, self.data['root_disk']) self.assertEqual(9, self.data['local_gb']) self.node_info.update_properties.assert_called_once_with(local_gb='9') def test_one_fails(self): self.node.properties['root_device'] = {'size': 10, 'model': 'Model 42'} del self.data['root_disk'] self.assertRaisesRegex(utils.Error, 'No disks satisfied root device hints', self.hook.before_update, self.data, self.node_info) self.assertNotIn('local_gb', self.data) self.assertNotIn('root_disk', self.data) self.assertFalse(self.node_info.update_properties.called) def test_size_string(self): self.node.properties['root_device'] = {'size': '10'} self.hook.before_update(self.data, self.node_info) self.assertEqual(self.matched, self.data['root_disk']) self.assertEqual(9, self.data['local_gb']) self.node_info.update_properties.assert_called_once_with(local_gb='9') def test_size_invalid(self): for bad_size in ('foo', None, {}): self.node.properties['root_device'] = {'size': bad_size} self.assertRaisesRegex(utils.Error, 'No disks could be found', self.hook.before_update, self.data, self.node_info) self.assertNotIn('local_gb', self.data) self.assertFalse(self.node_info.update_properties.called) class TestRamdiskError(test_base.InventoryTest): def setUp(self): super(TestRamdiskError, self).setUp() self.msg = 'BOOM' self.bmc_address = '1.2.3.4' self.data['error'] = self.msg def test_no_logs(self): self.assertRaisesRegex(utils.Error, self.msg, process.process, self.data) ironic-inspector-7.2.0/ironic_inspector/test/unit/test_plugins_raid_device.py0000666000175100017510000001134313241323457027762 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from ironic_inspector import node_cache from ironic_inspector.plugins import base from ironic_inspector.plugins import raid_device from ironic_inspector.test import base as test_base class TestRaidDeviceDetection(test_base.NodeTest): hook = raid_device.RaidDeviceDetection() def test_loadable_by_name(self): base.CONF.set_override('processing_hooks', 'raid_device', 'processing') ext = base.processing_hooks_manager()['raid_device'] self.assertIsInstance(ext.obj, raid_device.RaidDeviceDetection) def test_missing_local_gb(self): introspection_data = {} self.hook.before_processing(introspection_data) self.assertEqual(1, introspection_data['local_gb']) def test_local_gb_not_changed(self): introspection_data = {'local_gb': 42} self.hook.before_processing(introspection_data) self.assertEqual(42, introspection_data['local_gb']) class TestRaidDeviceDetectionUpdate(test_base.NodeTest): hook = raid_device.RaidDeviceDetection() @mock.patch.object(node_cache.NodeInfo, 'patch') def _check(self, data, patch, mock_patch): self.hook.before_processing(data) self.hook.before_update(data, self.node_info) self.assertCalledWithPatch(patch, mock_patch) def test_no_previous_block_devices(self): introspection_data = {'inventory': { 'disks': [ {'name': '/dev/sda', 'serial': 'foo'}, {'name': '/dev/sdb', 'serial': 'bar'}, ] }} expected = [{'op': 'add', 'path': '/extra/block_devices', 'value': {'serials': ['foo', 'bar']}}] self._check(introspection_data, expected) def test_no_previous_block_devices_old_ramdisk(self): introspection_data = {'block_devices': {'serials': ['foo', 'bar']}} expected = [{'op': 'add', 'path': '/extra/block_devices', 'value': introspection_data['block_devices']}] self._check(introspection_data, expected) def test_root_device_found(self): self.node.extra['block_devices'] = {'serials': ['foo', 'bar']} introspection_data = {'inventory': { 'disks': [ {'name': '/dev/sda', 'serial': 'foo'}, {'name': '/dev/sdb', 'serial': 'baz'}, ] }} expected = [{'op': 'remove', 'path': '/extra/block_devices'}, {'op': 'add', 'path': '/properties/root_device', 'value': {'serial': 'baz'}}] self._check(introspection_data, expected) def test_root_device_found_old_ramdisk(self): self.node.extra['block_devices'] = {'serials': ['foo', 'bar']} introspection_data = {'block_devices': {'serials': ['foo', 'baz']}} expected = [{'op': 'remove', 'path': '/extra/block_devices'}, {'op': 'add', 'path': '/properties/root_device', 'value': {'serial': 'baz'}}] self._check(introspection_data, expected) def test_root_device_already_exposed(self): self.node.properties['root_device'] = {'serial': 'foo'} introspection_data = {'inventory': { 'disks': [ {'name': '/dev/sda', 'serial': 'foo'}, {'name': '/dev/sdb', 'serial': 'baz'}, ] }} self._check(introspection_data, []) def test_multiple_new_devices(self): self.node.extra['block_devices'] = {'serials': ['foo', 'bar']} introspection_data = {'inventory': { 'disks': [ {'name': '/dev/sda', 'serial': 'foo'}, {'name': '/dev/sdb', 'serial': 'baz'}, {'name': '/dev/sdc', 'serial': 'qux'}, ] }} self._check(introspection_data, []) def test_no_new_devices(self): self.node.extra['block_devices'] = {'serials': ['foo', 'bar']} introspection_data = {'inventory': { 'disks': [ {'name': '/dev/sda', 'serial': 'foo'}, {'name': '/dev/sdb', 'serial': 'bar'}, ] }} self._check(introspection_data, []) def test_no_block_devices_from_ramdisk(self): introspection_data = {} self._check(introspection_data, []) ironic-inspector-7.2.0/ironic_inspector/test/functional.py0000666000175100017510000007323113241323457024113 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet eventlet.monkey_patch() import contextlib # noqa import copy import datetime import json import os import tempfile import time import unittest import fixtures import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import timeutils from oslo_utils import uuidutils import pytz import requests import six from six.moves import urllib from ironic_inspector.cmd import all as inspector_cmd from ironic_inspector.common import ironic as ir_utils from ironic_inspector.common import swift from ironic_inspector import db from ironic_inspector import dbsync from ironic_inspector import introspection_state as istate from ironic_inspector import main from ironic_inspector import node_cache from ironic_inspector import rules from ironic_inspector.test import base from ironic_inspector.test.unit import test_rules CONF = """ [ironic] auth_type=none endpoint_override=http://url [pxe_filter] driver = noop [DEFAULT] debug = True introspection_delay = 0 auth_strategy=noauth [database] connection = sqlite:///%(db_file)s [processing] processing_hooks=$default_processing_hooks,lldp_basic """ DEFAULT_SLEEP = 2 TEST_CONF_FILE = None def get_test_conf_file(): global TEST_CONF_FILE if not TEST_CONF_FILE: d = tempfile.mkdtemp() TEST_CONF_FILE = os.path.join(d, 'test.conf') db_file = os.path.join(d, 'test.db') with open(TEST_CONF_FILE, 'wb') as fp: content = CONF % {'db_file': db_file} fp.write(content.encode('utf-8')) return TEST_CONF_FILE def get_error(response): return response.json()['error']['message'] def _query_string(*field_names): def outer(func): @six.wraps(func) def inner(*args, **kwargs): queries = [] for field_name in field_names: field = kwargs.pop(field_name, None) if field is not None: queries.append('%s=%s' % (field_name, field)) query_string = '&'.join(queries) if query_string: query_string = '?' + query_string return func(*args, query_string=query_string, **kwargs) return inner return outer class Base(base.NodeTest): ROOT_URL = 'http://127.0.0.1:5050' IS_FUNCTIONAL = True def setUp(self): super(Base, self).setUp() rules.delete_all() self.cli_fixture = self.useFixture( fixtures.MockPatchObject(ir_utils, 'get_client')) self.cli = self.cli_fixture.mock.return_value self.cli.node.get.return_value = self.node self.cli.node.update.return_value = self.node self.cli.node.list.return_value = [self.node] self.patch = [ {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, {'path': '/properties/local_gb', 'value': '999', 'op': 'add'} ] self.patch_root_hints = [ {'op': 'add', 'path': '/properties/cpus', 'value': '4'}, {'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'}, {'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'}, {'path': '/properties/local_gb', 'value': '19', 'op': 'add'} ] self.node.power_state = 'power off' self.cfg = self.useFixture(config_fixture.Config()) conf_file = get_test_conf_file() self.cfg.set_config_files([conf_file]) def tearDown(self): super(Base, self).tearDown() node_cache._delete_node(self.uuid) def call(self, method, endpoint, data=None, expect_error=None, api_version=None): if data is not None: data = json.dumps(data) endpoint = self.ROOT_URL + endpoint headers = {'X-Auth-Token': 'token'} if api_version: headers[main._VERSION_HEADER] = '%d.%d' % api_version res = getattr(requests, method.lower())(endpoint, data=data, headers=headers) if expect_error: self.assertEqual(expect_error, res.status_code) else: if res.status_code >= 400: msg = ('%(meth)s %(url)s failed with code %(code)s: %(msg)s' % {'meth': method.upper(), 'url': endpoint, 'code': res.status_code, 'msg': get_error(res)}) raise AssertionError(msg) return res def call_introspect(self, uuid, **kwargs): endpoint = '/v1/introspection/%s' % uuid return self.call('post', endpoint, **kwargs) def call_get_status(self, uuid, **kwargs): return self.call('get', '/v1/introspection/%s' % uuid, **kwargs).json() @_query_string('marker', 'limit') def call_get_statuses(self, query_string='', **kwargs): path = '/v1/introspection' return self.call('get', path + query_string, **kwargs).json() def call_abort_introspect(self, uuid, **kwargs): return self.call('post', '/v1/introspection/%s/abort' % uuid, **kwargs) def call_reapply(self, uuid, **kwargs): return self.call('post', '/v1/introspection/%s/data/unprocessed' % uuid, **kwargs) def call_continue(self, data, **kwargs): return self.call('post', '/v1/continue', data=data, **kwargs).json() def call_add_rule(self, data, **kwargs): return self.call('post', '/v1/rules', data=data, **kwargs).json() def call_list_rules(self, **kwargs): return self.call('get', '/v1/rules', **kwargs).json()['rules'] def call_delete_rules(self, **kwargs): self.call('delete', '/v1/rules', **kwargs) def call_delete_rule(self, uuid, **kwargs): self.call('delete', '/v1/rules/' + uuid, **kwargs) def call_get_rule(self, uuid, **kwargs): return self.call('get', '/v1/rules/' + uuid, **kwargs).json() def _fake_status(self, finished=mock.ANY, state=mock.ANY, error=mock.ANY, started_at=mock.ANY, finished_at=mock.ANY, links=mock.ANY): return {'uuid': self.uuid, 'finished': finished, 'error': error, 'state': state, 'finished_at': finished_at, 'started_at': started_at, 'links': [{u'href': u'%s/v1/introspection/%s' % (self.ROOT_URL, self.uuid), u'rel': u'self'}]} def check_status(self, status, finished, state, error=None): self.assertEqual( self._fake_status(finished=finished, state=state, finished_at=finished and mock.ANY or None, error=error), status ) curr_time = datetime.datetime.fromtimestamp( time.time(), tz=pytz.timezone(time.tzname[0])) started_at = timeutils.parse_isotime(status['started_at']) self.assertLess(started_at, curr_time) if finished: finished_at = timeutils.parse_isotime(status['finished_at']) self.assertLess(started_at, finished_at) self.assertLess(finished_at, curr_time) else: self.assertIsNone(status['finished_at']) def db_row(self): """return database row matching self.uuid.""" return db.model_query(db.Node).get(self.uuid) class Test(Base): def test_bmc(self): self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) res = self.call_continue(self.data) self.assertEqual({'uuid': self.uuid}, res) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.update.assert_called_with(self.uuid, mock.ANY) self.assertCalledWithPatch(self.patch, self.cli.node.update) self.cli.port.create.assert_called_once_with( node_uuid=self.uuid, address='11:22:33:44:55:66', extra={}, pxe_enabled=True) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished) def test_port_creation_update_and_deletion(self): cfg.CONF.set_override('add_ports', 'active', 'processing') cfg.CONF.set_override('keep_ports', 'added', 'processing') uuid_to_delete = uuidutils.generate_uuid() uuid_to_update = uuidutils.generate_uuid() # Two ports already exist: one with incorrect pxe_enabled, the other # should be deleted. self.cli.node.list_ports.return_value = [ mock.Mock(address=self.macs[1], uuid=uuid_to_update, node_uuid=self.uuid, extra={}, pxe_enabled=True), mock.Mock(address='foobar', uuid=uuid_to_delete, node_uuid=self.uuid, extra={}, pxe_enabled=True), ] # Two more ports are created, one with client_id. Make sure the # returned object has the same properties as requested in create(). self.cli.port.create.side_effect = mock.Mock self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) res = self.call_continue(self.data) self.assertEqual({'uuid': self.uuid}, res) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.update.assert_called_with(self.uuid, mock.ANY) self.assertCalledWithPatch(self.patch, self.cli.node.update) calls = [ mock.call(node_uuid=self.uuid, address=self.macs[0], extra={}, pxe_enabled=True), mock.call(node_uuid=self.uuid, address=self.macs[2], extra={'client-id': self.client_id}, pxe_enabled=False), ] self.cli.port.create.assert_has_calls(calls, any_order=True) self.cli.port.delete.assert_called_once_with(uuid_to_delete) self.cli.port.update.assert_called_once_with( uuid_to_update, [{'op': 'replace', 'path': '/pxe_enabled', 'value': False}]) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished) def test_introspection_statuses(self): self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) # NOTE(zhenguo): only test finished=False here, as we don't know # other nodes status in this thread. statuses = self.call_get_statuses().get('introspection') self.assertIn(self._fake_status(finished=False), statuses) # check we've got 1 status with a limit of 1 statuses = self.call_get_statuses(limit=1).get('introspection') self.assertEqual(1, len(statuses)) all_statuses = self.call_get_statuses().get('introspection') marker_statuses = self.call_get_statuses( marker=self.uuid, limit=1).get('introspection') marker_index = all_statuses.index(self.call_get_status(self.uuid)) # marker is the last row on previous page self.assertEqual(all_statuses[marker_index+1:marker_index+2], marker_statuses) self.call_continue(self.data) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished) # fetch all statuses and db nodes to assert pagination statuses = self.call_get_statuses().get('introspection') nodes = db.model_query(db.Node).order_by( db.Node.started_at.desc()).all() # assert ordering self.assertEqual([node.uuid for node in nodes], [status_.get('uuid') for status_ in statuses]) # assert pagination half = len(nodes) // 2 marker = nodes[half].uuid statuses = self.call_get_statuses(marker=marker).get('introspection') self.assertEqual([node.uuid for node in nodes[half + 1:]], [status_.get('uuid') for status_ in statuses]) # assert status links work self.assertEqual([self.call_get_status(status_.get('uuid')) for status_ in statuses], [self.call('GET', urllib.parse.urlparse( status_.get('links')[0].get('href')).path).json() for status_ in statuses]) def test_rules_api(self): res = self.call_list_rules() self.assertEqual([], res) rule = { 'conditions': [ {'op': 'eq', 'field': 'memory_mb', 'value': 1024}, ], 'actions': [{'action': 'fail', 'message': 'boom'}], 'description': 'Cool actions' } res = self.call_add_rule(rule) self.assertTrue(res['uuid']) rule['uuid'] = res['uuid'] rule['links'] = res['links'] rule['conditions'] = [ test_rules.BaseTest.condition_defaults(rule['conditions'][0]), ] self.assertEqual(rule, res) res = self.call('get', rule['links'][0]['href']).json() self.assertEqual(rule, res) res = self.call_list_rules() self.assertEqual(rule['links'], res[0].pop('links')) self.assertEqual([{'uuid': rule['uuid'], 'description': 'Cool actions'}], res) res = self.call_get_rule(rule['uuid']) self.assertEqual(rule, res) self.call_delete_rule(rule['uuid']) res = self.call_list_rules() self.assertEqual([], res) links = rule.pop('links') del rule['uuid'] for _ in range(3): self.call_add_rule(rule) res = self.call_list_rules() self.assertEqual(3, len(res)) self.call_delete_rules() res = self.call_list_rules() self.assertEqual([], res) self.call('get', links[0]['href'], expect_error=404) self.call('delete', links[0]['href'], expect_error=404) def test_introspection_rules(self): self.node.extra['bar'] = 'foo' rules = [ { 'conditions': [ {'field': 'memory_mb', 'op': 'eq', 'value': 12288}, {'field': 'local_gb', 'op': 'gt', 'value': 998}, {'field': 'local_gb', 'op': 'lt', 'value': 1000}, {'field': 'local_gb', 'op': 'matches', 'value': '[0-9]+'}, {'field': 'cpu_arch', 'op': 'contains', 'value': '[0-9]+'}, {'field': 'root_disk.wwn', 'op': 'is-empty'}, {'field': 'inventory.interfaces[*].ipv4_address', 'op': 'contains', 'value': r'127\.0\.0\.1', 'invert': True, 'multiple': 'all'}, {'field': 'i.do.not.exist', 'op': 'is-empty'}, ], 'actions': [ {'action': 'set-attribute', 'path': '/extra/foo', 'value': 'bar'} ] }, { 'conditions': [ {'field': 'memory_mb', 'op': 'ge', 'value': 100500}, ], 'actions': [ {'action': 'set-attribute', 'path': '/extra/bar', 'value': 'foo'}, {'action': 'fail', 'message': 'boom'} ] } ] for rule in rules: self.call_add_rule(rule) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_continue(self.data) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.update.assert_any_call( self.uuid, [{'op': 'add', 'path': '/extra/foo', 'value': 'bar'}]) def test_conditions_scheme_actions_path(self): rules = [ { 'conditions': [ {'field': 'node://properties.local_gb', 'op': 'eq', 'value': 40}, {'field': 'node://driver_info.ipmi_address', 'op': 'eq', 'value': self.bmc_address}, ], 'actions': [ {'action': 'set-attribute', 'path': '/extra/foo', 'value': 'bar'} ] }, { 'conditions': [ {'field': 'data://inventory.cpu.count', 'op': 'eq', 'value': self.data['inventory']['cpu']['count']}, ], 'actions': [ {'action': 'set-attribute', 'path': '/driver_info/ipmi_address', 'value': '{data[inventory][bmc_address]}'} ] } ] for rule in rules: self.call_add_rule(rule) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_continue(self.data) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.update.assert_any_call( self.uuid, [{'op': 'add', 'path': '/extra/foo', 'value': 'bar'}]) self.cli.node.update.assert_any_call( self.uuid, [{'op': 'add', 'path': '/driver_info/ipmi_address', 'value': self.data['inventory']['bmc_address']}]) def test_root_device_hints(self): self.node.properties['root_device'] = {'size': 20} self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) res = self.call_continue(self.data) self.assertEqual({'uuid': self.uuid}, res) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.assertCalledWithPatch(self.patch_root_hints, self.cli.node.update) self.cli.port.create.assert_called_once_with( node_uuid=self.uuid, address='11:22:33:44:55:66', extra={}, pxe_enabled=True) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished) def test_abort_introspection(self): self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) res = self.call_abort_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.assertEqual(202, res.status_code) status = self.call_get_status(self.uuid) self.assertTrue(status['finished']) self.assertEqual('Canceled by operator', status['error']) # Note(mkovacik): we're checking just this doesn't pass OK as # there might be either a race condition (hard to test) that # yields a 'Node already finished.' or an attribute-based # look-up error from some pre-processing hooks because # node_info.finished() deletes the look-up attributes only # after releasing the node lock self.call('post', '/v1/continue', self.data, expect_error=400) @mock.patch.object(swift, 'store_introspection_data', autospec=True) @mock.patch.object(swift, 'get_introspection_data', autospec=True) def test_stored_data_processing(self, get_mock, store_mock): cfg.CONF.set_override('store_data', 'swift', 'processing') # ramdisk data copy # please mind the data is changed during processing ramdisk_data = json.dumps(copy.deepcopy(self.data)) get_mock.return_value = ramdisk_data self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') res = self.call_continue(self.data) self.assertEqual({'uuid': self.uuid}, res) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) inspect_started_at = timeutils.parse_isotime(status['started_at']) self.check_status(status, finished=True, state=istate.States.finished) res = self.call_reapply(self.uuid) self.assertEqual(202, res.status_code) self.assertEqual('', res.text) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished) # checks the started_at updated in DB is correct reapply_started_at = timeutils.parse_isotime(status['started_at']) self.assertLess(inspect_started_at, reapply_started_at) # reapply request data get_mock.assert_called_once_with(self.uuid, suffix='UNPROCESSED') # store ramdisk data, store processing result data, store # reapply processing result data; the ordering isn't # guaranteed as store ramdisk data runs in a background # thread; hower, last call has to always be reapply processing # result data store_ramdisk_call = mock.call(mock.ANY, self.uuid, suffix='UNPROCESSED') store_processing_call = mock.call(mock.ANY, self.uuid, suffix=None) self.assertEqual(3, len(store_mock.call_args_list)) self.assertIn(store_ramdisk_call, store_mock.call_args_list[0:2]) self.assertIn(store_processing_call, store_mock.call_args_list[0:2]) self.assertEqual(store_processing_call, store_mock.call_args_list[2]) # second reapply call get_mock.return_value = ramdisk_data res = self.call_reapply(self.uuid) self.assertEqual(202, res.status_code) self.assertEqual('', res.text) eventlet.greenthread.sleep(DEFAULT_SLEEP) # reapply saves the result self.assertEqual(4, len(store_mock.call_args_list)) self.assertEqual(store_processing_call, store_mock.call_args_list[-1]) @mock.patch.object(swift, 'store_introspection_data', autospec=True) @mock.patch.object(swift, 'get_introspection_data', autospec=True) def test_edge_state_transitions(self, get_mock, store_mock): """Assert state transitions work as expected in edge conditions.""" cfg.CONF.set_override('store_data', 'swift', 'processing') # ramdisk data copy # please mind the data is changed during processing ramdisk_data = json.dumps(copy.deepcopy(self.data)) get_mock.return_value = ramdisk_data # multiple introspect calls self.call_introspect(self.uuid) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) # an error -start-> starting state transition is possible self.call_abort_introspect(self.uuid) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) # double abort works self.call_abort_introspect(self.uuid) status = self.call_get_status(self.uuid) error = status['error'] self.check_status(status, finished=True, state=istate.States.error, error=error) self.call_abort_introspect(self.uuid) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.error, error=error) # preventing stale data race condition # waiting -> processing is a strict state transition self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) row = self.db_row() row.state = istate.States.processing with db.ensure_transaction() as session: row.save(session) self.call_continue(self.data, expect_error=400) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.error, error=mock.ANY) self.assertIn('no defined transition', status['error']) # multiple reapply calls self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_continue(self.data) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_reapply(self.uuid) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished, error=None) self.call_reapply(self.uuid) # assert an finished -reapply-> reapplying -> finished state transition status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished, error=None) def test_without_root_disk(self): del self.data['root_disk'] self.inventory['disks'] = [] self.patch[-1] = {'path': '/properties/local_gb', 'value': '0', 'op': 'add'} self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) res = self.call_continue(self.data) self.assertEqual({'uuid': self.uuid}, res) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.update.assert_called_with(self.uuid, mock.ANY) self.assertCalledWithPatch(self.patch, self.cli.node.update) self.cli.port.create.assert_called_once_with( node_uuid=self.uuid, extra={}, address='11:22:33:44:55:66', pxe_enabled=True) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished) @mock.patch.object(swift, 'store_introspection_data', autospec=True) @mock.patch.object(swift, 'get_introspection_data', autospec=True) def test_lldp_plugin(self, get_mock, store_mock): cfg.CONF.set_override('store_data', 'swift', 'processing') ramdisk_data = json.dumps(copy.deepcopy(self.data)) get_mock.return_value = ramdisk_data self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'reboot') status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) res = self.call_continue(self.data) self.assertEqual({'uuid': self.uuid}, res) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished) # Verify that the lldp_processed data is written to swift # as expected by the lldp plugin updated_data = store_mock.call_args[0][0] lldp_out = updated_data['all_interfaces']['eth1'] expected_chassis_id = "11:22:33:aa:bb:cc" expected_port_id = "734" self.assertEqual(expected_chassis_id, lldp_out['lldp_processed']['switch_chassis_id']) self.assertEqual(expected_port_id, lldp_out['lldp_processed']['switch_port_id']) @contextlib.contextmanager def mocked_server(): conf_file = get_test_conf_file() dbsync.main(args=['--config-file', conf_file, 'upgrade']) cfg.CONF.reset() cfg.CONF.unregister_opt(dbsync.command_opt) eventlet.greenthread.spawn_n(inspector_cmd.main, args=['--config-file', conf_file]) eventlet.greenthread.sleep(1) # Wait for service to start up to 30 seconds for i in range(10): try: requests.get('http://127.0.0.1:5050/v1') except requests.ConnectionError: if i == 9: raise print('Service did not start yet') eventlet.greenthread.sleep(3) else: break # start testing yield # Make sure all processes finished executing eventlet.greenthread.sleep(1) if __name__ == '__main__': with mocked_server(): unittest.main(verbosity=2) ironic-inspector-7.2.0/ironic_inspector/test/base.py0000666000175100017510000002004013241323457022651 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import time import fixtures import futurist import mock from oslo_concurrency import lockutils from oslo_config import fixture as config_fixture from oslo_log import log from oslo_utils import units from oslo_utils import uuidutils from oslotest import base as test_base from ironic_inspector.common import i18n import ironic_inspector.conf from ironic_inspector.conf import opts as conf_opts from ironic_inspector import db from ironic_inspector import introspection_state as istate from ironic_inspector import node_cache from ironic_inspector.plugins import base as plugins_base from ironic_inspector.test.unit import policy_fixture from ironic_inspector import utils CONF = ironic_inspector.conf.CONF class BaseTest(test_base.BaseTestCase): IS_FUNCTIONAL = False def setUp(self): super(BaseTest, self).setUp() if not self.IS_FUNCTIONAL: self.init_test_conf() self.session = db.get_writer_session() engine = self.session.get_bind() db.Base.metadata.create_all(engine) engine.connect() self.addCleanup(engine.dispose) plugins_base._HOOKS_MGR = None node_cache._SEMAPHORES = lockutils.Semaphores() patch = mock.patch.object(i18n, '_', lambda s: s) patch.start() # 'p=patch' magic is due to how closures work self.addCleanup(lambda p=patch: p.stop()) utils._EXECUTOR = futurist.SynchronousExecutor(green=True) def init_test_conf(self): CONF.reset() log.register_options(CONF) self.cfg = self.useFixture(config_fixture.Config(CONF)) self.cfg.set_default('connection', "sqlite:///", group='database') self.cfg.set_default('slave_connection', None, group='database') self.cfg.set_default('max_retries', 10, group='database') conf_opts.parse_args([], default_config_files=[]) self.policy = self.useFixture(policy_fixture.PolicyFixture()) def assertPatchEqual(self, expected, actual): expected = sorted(expected, key=lambda p: p['path']) actual = sorted(actual, key=lambda p: p['path']) self.assertEqual(expected, actual) def assertCalledWithPatch(self, expected, mock_call): def _get_patch_param(call): try: if isinstance(call[0][1], list): return call[0][1] except IndexError: pass return call[0][0] actual = sum(map(_get_patch_param, mock_call.call_args_list), []) self.assertPatchEqual(actual, expected) class InventoryTest(BaseTest): def setUp(self): super(InventoryTest, self).setUp() # Prepare some realistic inventory # https://github.com/openstack/ironic-inspector/blob/master/HTTP-API.rst # noqa self.bmc_address = '1.2.3.4' self.macs = ( ['11:22:33:44:55:66', '66:55:44:33:22:11', '7c:fe:90:29:26:52']) self.ips = ['1.2.1.2', '1.2.1.1', '1.2.1.3'] self.inactive_mac = '12:12:21:12:21:12' self.pxe_mac = self.macs[0] self.all_macs = self.macs + [self.inactive_mac] self.pxe_iface_name = 'eth1' self.client_id = ( 'ff:00:00:00:00:00:02:00:00:02:c9:00:7c:fe:90:03:00:29:26:52') self.valid_interfaces = { self.pxe_iface_name: {'ip': self.ips[0], 'mac': self.macs[0], 'client_id': None, 'pxe': True}, 'ib0': {'ip': self.ips[2], 'mac': self.macs[2], 'client_id': self.client_id, 'pxe': False} } self.data = { 'boot_interface': '01-' + self.pxe_mac.replace(':', '-'), 'inventory': { 'interfaces': [ {'name': 'eth1', 'mac_address': self.macs[0], 'ipv4_address': self.ips[0], 'lldp': [ [1, "04112233aabbcc"], [2, "07373334"], [3, "003c"]]}, {'name': 'eth2', 'mac_address': self.inactive_mac}, {'name': 'eth3', 'mac_address': self.macs[1], 'ipv4_address': self.ips[1]}, {'name': 'ib0', 'mac_address': self.macs[2], 'ipv4_address': self.ips[2], 'client_id': self.client_id} ], 'disks': [ {'name': '/dev/sda', 'model': 'Big Data Disk', 'size': 1000 * units.Gi}, {'name': '/dev/sdb', 'model': 'Small OS Disk', 'size': 20 * units.Gi}, ], 'cpu': { 'count': 4, 'architecture': 'x86_64' }, 'memory': { 'physical_mb': 12288 }, 'bmc_address': self.bmc_address }, 'root_disk': {'name': '/dev/sda', 'model': 'Big Data Disk', 'size': 1000 * units.Gi, 'wwn': None}, 'interfaces': self.valid_interfaces, } self.inventory = self.data['inventory'] self.all_interfaces = { 'eth1': {'mac': self.macs[0], 'ip': self.ips[0], 'client_id': None, 'pxe': True}, 'eth2': {'mac': self.inactive_mac, 'ip': None, 'client_id': None, 'pxe': False}, 'eth3': {'mac': self.macs[1], 'ip': self.ips[1], 'client_id': None, 'pxe': False}, 'ib0': {'mac': self.macs[2], 'ip': self.ips[2], 'client_id': self.client_id, 'pxe': False} } self.active_interfaces = { name: data for (name, data) in self.all_interfaces.items() if data.get('ip') } self.pxe_interfaces = { self.pxe_iface_name: self.all_interfaces[self.pxe_iface_name] } class NodeTest(InventoryTest): def setUp(self): super(NodeTest, self).setUp() self.uuid = uuidutils.generate_uuid() fake_node = { 'driver': 'pxe_ipmitool', 'driver_info': {'ipmi_address': self.bmc_address}, 'properties': {'cpu_arch': 'i386', 'local_gb': 40}, 'uuid': self.uuid, 'power_state': 'power on', 'provision_state': 'inspecting', 'extra': {}, 'instance_uuid': None, 'maintenance': False } mock_to_dict = mock.Mock(return_value=fake_node) self.node = mock.Mock(**fake_node) self.node.to_dict = mock_to_dict self.ports = [] self.node_info = node_cache.NodeInfo( uuid=self.uuid, started_at=datetime.datetime(1, 1, 1), node=self.node, ports=self.ports) self.node_info.node = mock.Mock(return_value=self.node) self.sleep_fixture = self.useFixture( fixtures.MockPatchObject(time, 'sleep', autospec=True)) class NodeStateTest(NodeTest): def setUp(self): super(NodeStateTest, self).setUp() self.node_info._version_id = uuidutils.generate_uuid() self.node_info._state = istate.States.starting self.db_node = db.Node(uuid=self.node_info.uuid, version_id=self.node_info._version_id, state=self.node_info._state, started_at=self.node_info.started_at, finished_at=self.node_info.finished_at, error=self.node_info.error) self.db_node.save(self.session) ironic-inspector-7.2.0/test-requirements.txt0000666000175100017510000000117413241323457021305 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. coverage!=4.4,>=4.0 # Apache-2.0 doc8>=0.6.0 # Apache-2.0 flake8-import-order>=0.13 # LGPLv3 hacking>=1.0.0 # Apache-2.0 mock>=2.0.0 # BSD sphinx!=1.6.6,>=1.6.2 # BSD openstackdocstheme>=1.18.1 # Apache-2.0 os-testr>=1.0.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 pydot3>=1.0.8 # MIT License ironic-inspector-7.2.0/playbooks/0000775000175100017510000000000013241324014017030 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/playbooks/legacy/0000775000175100017510000000000013241324014020274 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/playbooks/legacy/ironic-inspector-tempest-dsvm-discovery/0000775000175100017510000000000013241324014030216 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/playbooks/legacy/ironic-inspector-tempest-dsvm-discovery/run.yaml0000666000175100017510000001623013241323457031724 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-tempest-dsvm-ironic-inspector-discovery from old job gate-tempest-dsvm-ironic-inspector-discovery-ubuntu-xenial tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=True" # Standardize VM size for each supported ramdisk case "tinyipa" in 'tinyipa') export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=384" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=tinyipa" ;; 'tinyipa256') export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=256" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=tinyipa" ;; 'coreos') export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=1280" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=coreos" ;; # if using a ramdisk without a known good value, use the devstack # default by not exporting any value for IRONIC_VM_SPECS_RAM esac EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export DEVSTACK_GATE_TEMPEST_REGEX="InspectorDiscoveryTest" EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export PROJECTS="openstack/ironic-inspector $PROJECTS" export PROJECTS="openstack/python-ironic-inspector-client $PROJECTS" export DEVSTACK_GATE_IRONIC_INSPECTOR=1 export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic-inspector git://git.openstack.org/openstack/ironic-inspector" export IRONIC_INSPECTOR_AUTO_DISCOVERY=1 if [ "$IRONIC_INSPECTOR_AUTO_DISCOVERY" == "1" ]; then # discovery test requires sudo for iptables and virsh export DEVSTACK_GATE_REMOVE_STACK_SUDO=0 # enable enroll hook export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK=enroll" # we are deleting node from ironic for simulate node discovery, # so inspector has to sync cache asap export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_CLEAN_UP_PERIOD=5" fi # Make IPXE configuration consistent between Mitaka and Master export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_IPXE_ENABLED=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_RAMDISK_ELEMENT=ironic-agent" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_DHCP_FILTER=dnsmasq" EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-vars-early # use tempest plugin export DEVSTACK_LOCAL_CONFIG+=$'\n'"TEMPEST_PLUGINS+=' /opt/stack/new/ironic-tempest-plugin'" export TEMPEST_CONCURRENCY=1 EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PROJECTS="openstack/ironic $PROJECTS" export PROJECTS="openstack/ironic-lib $PROJECTS" export PROJECTS="openstack/ironic-python-agent $PROJECTS" export PROJECTS="openstack/ironic-tempest-plugin $PROJECTS" export PROJECTS="openstack/python-ironicclient $PROJECTS" export PROJECTS="openstack/pyghmi $PROJECTS" export PROJECTS="openstack/virtualbmc $PROJECTS" export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_IRONIC=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_VIRT_DRIVER=ironic export DEVSTACK_GATE_CONFIGDRIVE=1 export DEVSTACK_GATE_IRONIC_DRIVER=ipmi export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEFAULT_DEPLOY_INTERFACE=direct" export BRANCH_OVERRIDE="{{ branch_override | default('default') }}" if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi if [[ "$ZUUL_BRANCH" != "stable/ocata" && "$BRANCH_OVERRIDE" != "stable/ocata" ]]; then export DEVSTACK_GATE_TLSPROXY=1 fi # direct deploy requires Swift temporary URLs export DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_ENABLE_TEMPURLS=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"SWIFT_TEMPURL_KEY=secretkey" if [ "wholedisk" == "wholedisk" ] ; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_TEMPEST_WHOLE_DISK_IMAGE=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=0" else export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_TEMPEST_WHOLE_DISK_IMAGE=False" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=1" fi if [ -n "" ] ; then export DEVSTACK_GATE_IRONIC_BUILD_RAMDISK=1 export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_BUILD_RAMDISK=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"USE_SUBNETPOOL=False" else export DEVSTACK_GATE_IRONIC_BUILD_RAMDISK=0 export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_BUILD_RAMDISK=False" fi if [ "bios" == "uefi" ] ; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BOOT_MODE=uefi" fi export DEVSTACK_PROJECT_FROM_GIT="" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_COUNT=1" # Ensure the ironic-vars-EARLY file exists touch ironic-vars-early # Pull in the EARLY variables injected by the optional builders source ironic-vars-early export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic git://git.openstack.org/openstack/ironic" # Ensure the ironic-EXTRA-vars file exists touch ironic-extra-vars # Pull in the EXTRA variables injected by the optional builders source ironic-extra-vars cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ironic-inspector-7.2.0/playbooks/legacy/ironic-inspector-tempest-dsvm-discovery/post.yaml0000666000175100017510000000063313241323457032105 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ironic-inspector-7.2.0/playbooks/legacy/ironic-inspector-tempest-dsvm-python3/0000775000175100017510000000000013241324014027613 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/playbooks/legacy/ironic-inspector-tempest-dsvm-python3/run.yaml0000666000175100017510000001132413241323457031320 0ustar zuulzuul00000000000000- hosts: all name: ironic-inspector-tempest-dsvm-python3 tasks: - name: Ensure workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export DEVSTACK_GATE_USE_PYTHON3=True EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x cat << 'EOF' >>"/tmp/dg-local.conf" [[local|localrc]] disable_service s-account disable_service s-container disable_service s-object disable_service s-proxy EOF executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export DEVSTACK_GATE_TEMPEST_REGEX="Inspector" EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-vars-early # use tempest plugin export DEVSTACK_LOCAL_CONFIG+=$'\n'"TEMPEST_PLUGINS+=' /opt/stack/new/ironic-tempest-plugin'" export TEMPEST_CONCURRENCY=1 EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export IRONIC_INSPECTOR_AUTO_DISCOVERY=1 if [ "$IRONIC_INSPECTOR_AUTO_DISCOVERY" == "1" ]; then # discovery test requires sudo for iptables and virsh export DEVSTACK_GATE_REMOVE_STACK_SUDO=0 # enable enroll hook export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK=enroll" # we are deleting node from ironic for simulate node discovery, # so inspector has to sync cache asap export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_CLEAN_UP_PERIOD=5" fi # PXE Filter Driver export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_DHCP_FILTER=iptables" EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_IRONIC=1 export DEVSTACK_GATE_IRONIC_INSPECTOR=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_VIRT_DRIVER=ironic export DEVSTACK_GATE_CONFIGDRIVE=1 export BRANCH_OVERRIDE="{{ branch_override | default('default') }}" if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi export PROJECTS="openstack/ironic openstack/ironic-inspector $PROJECTS" export PROJECTS="openstack/ironic-tempest-plugin $PROJECTS" export DEVSTACK_LOCAL_CONFIG="enable_plugin ironic git://git.openstack.org/openstack/ironic" export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic-inspector https://git.openstack.org/openstack/ironic-inspector" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_AUTOMATED_CLEAN_ENABLED=False" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BUILD_DEPLOY_RAMDISK=False" # IPA requires at least 1 GiB of RAM export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=1024"$'\n'"IRONIC_VM_COUNT=1" # Ensure the ironic-vars-EARLY file exists touch ironic-vars-early # Pull in the EARLY variables injected by the optional builders source ironic-vars-early # Ensure the ironic-EXTRA-vars file exists touch ironic-extra-vars # Pull in the EXTRA variables injected by the optional builders source ironic-extra-vars cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ironic-inspector-7.2.0/playbooks/legacy/ironic-inspector-tempest-dsvm-python3/post.yaml0000666000175100017510000000063313241323457031502 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ironic-inspector-7.2.0/playbooks/legacy/ironic-inspector-grenade-dsvm/0000775000175100017510000000000013241324014026135 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/playbooks/legacy/ironic-inspector-grenade-dsvm/run.yaml0000666000175100017510000001776013241323457027654 0ustar zuulzuul00000000000000- hosts: all name: Autoconverted job legacy-grenade-dsvm-ironic-inspector from old job gate-grenade-dsvm-ironic-inspector-ubuntu-xenial tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack-infra/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ git://git.openstack.org \ openstack-infra/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-vars-early # Set this early so that we do not have to be as careful with builder ordering in jobs. export GRENADE_PLUGINRC="enable_grenade_plugin ironic https://git.openstack.org/openstack/ironic" EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export PROJECTS="openstack-dev/grenade $PROJECTS" export DEVSTACK_GATE_GRENADE=pullup export DEVSTACK_GATE_OS_TEST_TIMEOUT=2400 export DEVSTACK_GATE_TEMPEST_BAREMETAL_BUILD_TIMEOUT=1200 export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BUILD_DEPLOY_RAMDISK=False" export DEVSTACK_GATE_TLSPROXY=0 EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export GRENADE_PLUGINRC+=$'\n'"enable_grenade_plugin ironic-inspector https://git.openstack.org/openstack/ironic-inspector" EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=True" # Standardize VM size for each supported ramdisk case "tinyipa" in 'tinyipa') export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=384" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=tinyipa" ;; 'tinyipa256') export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=256" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=tinyipa" ;; 'coreos') export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=1280" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=coreos" ;; # if using a ramdisk without a known good value, use the devstack # default by not exporting any value for IRONIC_VM_SPECS_RAM esac EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-vars-early # use tempest plugin export DEVSTACK_LOCAL_CONFIG+=$'\n'"TEMPEST_PLUGINS+=' /opt/stack/new/ironic-tempest-plugin'" export TEMPEST_CONCURRENCY=1 EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | cat << 'EOF' >> ironic-extra-vars export PROJECTS="openstack/ironic-inspector $PROJECTS" export PROJECTS="openstack/python-ironic-inspector-client $PROJECTS" export DEVSTACK_GATE_IRONIC_INSPECTOR=1 export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic-inspector git://git.openstack.org/openstack/ironic-inspector" export IRONIC_INSPECTOR_AUTO_DISCOVERY={inspector-auto-discovery} if [ "$IRONIC_INSPECTOR_AUTO_DISCOVERY" == "1" ]; then # discovery test requires sudo for iptables and virsh export DEVSTACK_GATE_REMOVE_STACK_SUDO=0 # enable enroll hook export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK=enroll" # we are deleting node from ironic for simulate node discovery, # so inspector has to sync cache asap export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_CLEAN_UP_PERIOD=5" fi # Make IPXE configuration consistent between Mitaka and Master export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_IPXE_ENABLED=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_RAMDISK_ELEMENT=ironic-agent" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_DHCP_FILTER=iptables" EOF chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PROJECTS="openstack/ironic $PROJECTS" export PROJECTS="openstack/ironic-lib $PROJECTS" export PROJECTS="openstack/ironic-python-agent $PROJECTS" export PROJECTS="openstack/ironic-tempest-plugin $PROJECTS" export PROJECTS="openstack/python-ironicclient $PROJECTS" export PROJECTS="openstack/pyghmi $PROJECTS" export PROJECTS="openstack/virtualbmc $PROJECTS" export PYTHONUNBUFFERED=true export DEVSTACK_GATE_TEMPEST=1 export DEVSTACK_GATE_IRONIC=1 export DEVSTACK_GATE_NEUTRON=1 export DEVSTACK_GATE_VIRT_DRIVER=ironic export DEVSTACK_GATE_CONFIGDRIVE=1 # TODO(dtantsur): change to ipmi in Rocky export DEVSTACK_GATE_IRONIC_DRIVER=pxe_ipmitool export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEFAULT_DEPLOY_INTERFACE=iscsi" export BRANCH_OVERRIDE="{{ branch_override | default('default') }}" if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi if [[ "$ZUUL_BRANCH" != "stable/ocata" && "$BRANCH_OVERRIDE" != "stable/ocata" ]]; then export DEVSTACK_GATE_TLSPROXY=1 fi if [ "partition" == "wholedisk" ] ; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_TEMPEST_WHOLE_DISK_IMAGE=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=0" else export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_TEMPEST_WHOLE_DISK_IMAGE=False" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=1" fi if [ -n "" ] ; then export DEVSTACK_GATE_IRONIC_BUILD_RAMDISK=1 export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_BUILD_RAMDISK=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"USE_SUBNETPOOL=False" else export DEVSTACK_GATE_IRONIC_BUILD_RAMDISK=0 export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_BUILD_RAMDISK=False" fi if [ "bios" == "uefi" ] ; then export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BOOT_MODE=uefi" fi export DEVSTACK_PROJECT_FROM_GIT="" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_COUNT=7" # Ensure the ironic-vars-EARLY file exists touch ironic-vars-early # Pull in the EARLY variables injected by the optional builders source ironic-vars-early export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic git://git.openstack.org/openstack/ironic" # Ensure the ironic-EXTRA-vars file exists touch ironic-extra-vars # Pull in the EXTRA variables injected by the optional builders source ironic-extra-vars cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' ironic-inspector-7.2.0/playbooks/legacy/ironic-inspector-grenade-dsvm/post.yaml0000666000175100017510000000063313241323457030024 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs ironic-inspector-7.2.0/setup.cfg0000666000175100017510000001002613241324014016647 0ustar zuulzuul00000000000000[metadata] name = ironic-inspector summary = Hardware introspection for OpenStack Bare Metal description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = https://docs.openstack.org/ironic-inspector/latest/ license = Apache-2 classifier = Environment :: Console Environment :: OpenStack Intended Audience :: System Administrators Intended Audience :: Information Technology License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] packages = ironic_inspector [entry_points] console_scripts = ironic-inspector = ironic_inspector.cmd.all:main ironic-inspector-dbsync = ironic_inspector.dbsync:main ironic-inspector-rootwrap = oslo_rootwrap.cmd:main ironic_inspector.hooks.processing = scheduler = ironic_inspector.plugins.standard:SchedulerHook validate_interfaces = ironic_inspector.plugins.standard:ValidateInterfacesHook ramdisk_error = ironic_inspector.plugins.standard:RamdiskErrorHook root_disk_selection = ironic_inspector.plugins.standard:RootDiskSelectionHook example = ironic_inspector.plugins.example:ExampleProcessingHook extra_hardware = ironic_inspector.plugins.extra_hardware:ExtraHardwareHook raid_device = ironic_inspector.plugins.raid_device:RaidDeviceDetection capabilities = ironic_inspector.plugins.capabilities:CapabilitiesHook local_link_connection = ironic_inspector.plugins.local_link_connection:GenericLocalLinkConnectionHook lldp_basic = ironic_inspector.plugins.lldp_basic:LLDPBasicProcessingHook pci_devices = ironic_inspector.plugins.pci_devices:PciDevicesHook ironic_inspector.hooks.node_not_found = example = ironic_inspector.plugins.example:example_not_found_hook enroll = ironic_inspector.plugins.discovery:enroll_node_not_found_hook ironic_inspector.rules.conditions = eq = ironic_inspector.plugins.rules:EqCondition lt = ironic_inspector.plugins.rules:LtCondition gt = ironic_inspector.plugins.rules:GtCondition le = ironic_inspector.plugins.rules:LeCondition ge = ironic_inspector.plugins.rules:GeCondition ne = ironic_inspector.plugins.rules:NeCondition in-net = ironic_inspector.plugins.rules:NetCondition matches = ironic_inspector.plugins.rules:MatchesCondition contains = ironic_inspector.plugins.rules:ContainsCondition is-empty = ironic_inspector.plugins.rules:EmptyCondition ironic_inspector.rules.actions = example = ironic_inspector.plugins.example:ExampleRuleAction fail = ironic_inspector.plugins.rules:FailAction set-attribute = ironic_inspector.plugins.rules:SetAttributeAction set-capability = ironic_inspector.plugins.rules:SetCapabilityAction extend-attribute = ironic_inspector.plugins.rules:ExtendAttributeAction ironic_inspector.pxe_filter = dnsmasq = ironic_inspector.pxe_filter.dnsmasq:DnsmasqFilter iptables = ironic_inspector.pxe_filter.iptables:IptablesFilter noop = ironic_inspector.pxe_filter.base:NoopFilter oslo.config.opts = ironic_inspector = ironic_inspector.conf.opts:list_opts oslo.config.opts.defaults = ironic_inspector = ironic_inspector.conf.opts:set_config_defaults oslo.policy.enforcer = ironic_inspector = ironic_inspector.policy:get_oslo_policy_enforcer oslo.policy.policies = ironic_inspector.api = ironic_inspector.policy:list_policies [compile_catalog] directory = ironic_inspector/locale domain = ironic_inspector [update_catalog] domain = ironic-inspector output_dir = ironic_inspector/locale input_file = ironic_inspector/locale/ironic_inspector.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = ironic_inspector/locale/ironic_inspector.pot [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source warning-is-error = 1 [pbr] autodoc_index_modules = True autodoc_exclude_modules = ironic_inspector.migrations.* ironic_inspector.test.* ironic.common.i18n api_doc_dir = contributor/api [egg_info] tag_build = tag_date = 0 ironic-inspector-7.2.0/doc/0000775000175100017510000000000013241324014015572 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/doc/source/0000775000175100017510000000000013241324014017072 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/doc/source/conf.py0000666000175100017510000000664413241323457020417 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', 'oslo_config.sphinxext', 'oslo_config.sphinxconfiggen'] try: import openstackdocstheme extensions.append('openstackdocstheme') except ImportError: openstackdocstheme = None repository_name = 'openstack/ironic-inspector' bug_project = 'ironic-inspector' bug_tag = '' html_last_updated_fmt = '%Y-%m-%d %H:%M' wsme_protocols = ['restjson'] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ironic Inspector' copyright = u'OpenStack Foundation' config_generator_config_file = '../../config-generator.conf' sample_config_basename = '_static/ironic-inspector' policy_generator_config_file = '../../policy-generator.conf' sample_policy_basename = '_static/ironic-inspector' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #from ironic import version as ironic_version # The full version, including alpha/beta/rc tags. #release = ironic_version.version_info.release_string() # The short X.Y version. #version = ironic_version.version_info.version_string() # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['ironic.'] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # NOTE(cinerama): mock out nova modules so docs can build without warnings #import mock #import sys #MOCK_MODULES = ['nova', 'nova.compute', 'nova.context'] #for module in MOCK_MODULES: # sys.modules[module] = mock.Mock() # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. if openstackdocstheme is not None: html_theme = 'openstackdocs' else: html_theme = 'default' #html_theme_path = ["."] #html_theme = '_theme' #html_static_path = ['_static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( 'index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual' ), ] # -- Options for seqdiag ------------------------------------------------------ seqdiag_html_image_format = "SVG" ironic-inspector-7.2.0/doc/source/images/0000775000175100017510000000000013241324014020337 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/doc/source/images/states.svg0000666000175100017510000004111113241323457022375 0ustar zuulzuul00000000000000 Ironic Inspector states aborting aborting error error aborting->error abort_end aborting->error timeout error->error abort error->error error reapplying reapplying error->reapplying reapply starting starting error->starting start enrolling enrolling enrolling->error error enrolling->error timeout processing processing enrolling->processing process processing->error error processing->error timeout finished finished processing->finished finish reapplying->error error reapplying->error timeout reapplying->reapplying reapply reapplying->finished finish starting->error error starting->error timeout waiting waiting starting->waiting wait finished->reapplying reapply finished->starting start finished->finished finish waiting->aborting abort waiting->error timeout waiting->processing process waiting->starting start ironic-inspector-7.2.0/doc/source/configuration/0000775000175100017510000000000013241324014021741 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/doc/source/configuration/policy.rst0000666000175100017510000000034513241323457024010 0ustar zuulzuul00000000000000======== Policies ======== The following is an overview of all available policies in **ironic inspector**. For a sample configuration file, refer to :doc:`sample-policy`. .. show-policy:: :config-file: policy-generator.conf ironic-inspector-7.2.0/doc/source/configuration/index.rst0000666000175100017510000000056413241323457023623 0ustar zuulzuul00000000000000Configuration Guide =================== The ironic-inspector service operation is defined by a configuration file. The overview of configuration file options follow. .. toctree:: :maxdepth: 1 Ironic Inspector Configuration Options Sample Ironic Inspector Configuration Policies Sample policy file ironic-inspector-7.2.0/doc/source/configuration/ironic-inspector.rst0000666000175100017510000000017513241323457026001 0ustar zuulzuul00000000000000 --------------------- ironic-inspector.conf --------------------- .. show-options:: :config-file: config-generator.conf ironic-inspector-7.2.0/doc/source/configuration/sample-policy.rst0000666000175100017510000000074613241323457025274 0ustar zuulzuul00000000000000======================= Ironic Inspector Policy ======================= The following is a sample **ironic-inspector** policy file, autogenerated from Ironic Inspector when this documentation is built. To avoid issues, make sure your version of **ironic-inspector** matches that of the example policy file. The sample policy can also be downloaded as a :download:`file `. .. literalinclude:: /_static/ironic-inspector.policy.yaml.sample ironic-inspector-7.2.0/doc/source/configuration/sample-config.rst0000666000175100017510000000107513241323457025236 0ustar zuulzuul00000000000000====================================== Ironic Inspector Configuration Options ====================================== The following is a sample Ironic Inspector configuration for adaptation and use. It is auto-generated from Ironic Inspector when this documentation is built, so if you find issues with an option, please compare your version of Ironic Inspector with the version of this documentation. The sample configuration can also be downloaded as a :download:`file `. .. literalinclude:: /_static/ironic-inspector.conf.sample ironic-inspector-7.2.0/doc/source/index.rst0000666000175100017510000000053513241323457020752 0ustar zuulzuul00000000000000.. include:: ../../README.rst Using Ironic Inspector ====================== .. toctree:: :maxdepth: 2 install/index configuration/index user/index admin/index Contributor Docs ================ .. toctree:: :maxdepth: 2 contributor/index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ironic-inspector-7.2.0/doc/source/user/0000775000175100017510000000000013241324014020050 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/doc/source/user/http-api.rst0000666000175100017510000002732213241323457022352 0ustar zuulzuul00000000000000HTTP API -------- .. _http_api: By default **ironic-inspector** listens on ``0.0.0.0:5050``, port can be changed in configuration. Protocol is JSON over HTTP. Start Introspection ~~~~~~~~~~~~~~~~~~~ ``POST /v1/introspection/`` initiate hardware introspection for node ````. All power management configuration for this node needs to be done prior to calling the endpoint. Requires X-Auth-Token header with Keystone token for authentication. Response: * 202 - accepted introspection request * 400 - bad request * 401, 403 - missing or invalid authentication * 404 - node cannot be found Get Introspection Status ~~~~~~~~~~~~~~~~~~~~~~~~ ``GET /v1/introspection/`` get hardware introspection status. Requires X-Auth-Token header with Keystone token for authentication. Response: * 200 - OK * 400 - bad request * 401, 403 - missing or invalid authentication * 404 - node cannot be found Response body: JSON dictionary with keys: * ``finished`` (boolean) whether introspection is finished (``true`` on introspection completion or if it ends because of an error) * ``state`` state of the introspection * ``error`` error string or ``null``; ``Canceled by operator`` in case introspection was aborted * ``uuid`` node UUID * ``started_at`` a UTC ISO8601 timestamp * ``finished_at`` a UTC ISO8601 timestamp or ``null`` * ``links`` containing a self URL Get All Introspection Statuses ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``GET /v1/introspection`` get all hardware introspection statuses. Requires X-Auth-Token header with Keystone token for authentication. Returned status list is sorted by the ``started_at, uuid`` attribute pair, newer items first, and is paginated with these query string fields: * ``marker`` the UUID of the last node returned previously * ``limit`` default, max: ``CONF.api_max_limit`` Response: * 200 - OK * 400 - bad request * 401, 403 - missing or invalid authentication Response body: a JSON object containing a list of status objects:: { 'introspection': [ { 'finished': false, 'state': 'waiting', 'error': null, ... }, ... ] } Each status object contains these keys: * ``finished`` (boolean) whether introspection is finished (``true`` on introspection completion or if it ends because of an error) * ``state`` state of the introspection * ``error`` error string or ``null``; ``Canceled by operator`` in case introspection was aborted * ``uuid`` node UUID * ``started_at`` an UTC ISO8601 timestamp * ``finished_at`` an UTC ISO8601 timestamp or ``null`` Abort Running Introspection ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``POST /v1/introspection//abort`` abort running introspection. Requires X-Auth-Token header with Keystone token for authentication. Response: * 202 - accepted * 400 - bad request * 401, 403 - missing or invalid authentication * 404 - node cannot be found * 409 - inspector has locked this node for processing Get Introspection Data ~~~~~~~~~~~~~~~~~~~~~~ ``GET /v1/introspection//data`` get stored data from successful introspection. Requires X-Auth-Token header with Keystone token for authentication. Response: * 200 - OK * 400 - bad request * 401, 403 - missing or invalid authentication * 404 - data cannot be found or data storage not configured Response body: JSON dictionary with introspection data .. note:: We do not provide any backward compatibility guarantees regarding the format and contents of the stored data. Notably, it depends on the ramdisk used and plugins enabled both in the ramdisk and in inspector itself. Reapply introspection on stored data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``POST /v1/introspection//data/unprocessed`` to trigger introspection on stored unprocessed data. No data is allowed to be sent along with the request. Requires X-Auth-Token header with Keystone token for authentication. Requires enabling Swift store in processing section of the configuration file. Response: * 202 - accepted * 400 - bad request or store not configured * 401, 403 - missing or invalid authentication * 404 - node not found for Node ID * 409 - inspector locked node for processing Introspection Rules ~~~~~~~~~~~~~~~~~~~ See :ref:`rules ` for details. All these API endpoints require X-Auth-Token header with Keystone token for authentication. * ``POST /v1/rules`` create a new introspection rule. Request body: JSON dictionary with keys: * ``conditions`` rule conditions, see :ref:`rules ` * ``actions`` rule actions, see :ref:`rules ` * ``description`` (optional) human-readable description * ``uuid`` (optional) rule UUID, autogenerated if missing Response * 200 - OK for API version < 1.6 * 201 - OK for API version 1.6 and higher * 400 - bad request Response body: JSON dictionary with introspection rule representation (the same as above with UUID filled in). * ``GET /v1/rules`` list all introspection rules. Response * 200 - OK Response body: JSON dictionary with key ``rules`` - list of short rule representations. Short rule representation is a JSON dictionary with keys: * ``uuid`` rule UUID * ``description`` human-readable description * ``links`` list of HTTP links, use one with ``rel=self`` to get the full rule details * ``DELETE /v1/rules`` delete all introspection rules. Response * 204 - OK * ``GET /v1/rules/`` get one introspection rule by its ````. Response * 200 - OK * 404 - not found Response body: JSON dictionary with introspection rule representation (see ``POST /v1/rules`` above). * ``DELETE /v1/rules/`` delete one introspection rule by its ````. Response * 204 - OK * 404 - not found Ramdisk Callback ~~~~~~~~~~~~~~~~ .. _ramdisk_callback: ``POST /v1/continue`` internal endpoint for the ramdisk to post back discovered data. Should not be used for anything other than implementing the ramdisk. Request body: JSON dictionary with at least these keys: * ``inventory`` full `hardware inventory`_ from the ironic-python-agent with at least the following keys: * ``memory`` memory information containing at least key ``physical_mb`` - physical memory size as reported by dmidecode, * ``cpu`` CPU information containing at least keys ``count`` (CPU count) and ``architecture`` (CPU architecture, e.g. ``x86_64``), * ``bmc_address`` IP address of the node's BMC, * ``interfaces`` list of dictionaries with the following keys: * ``name`` interface name, * ``ipv4_address`` IPv4 address of the interface, * ``mac_address`` MAC (physical) address of the interface. * ``client_id`` InfiniBand Client-ID, for Ethernet is None. * ``disks`` list of disk block devices containing at least ``name`` and ``size`` (in bytes) keys. In case ``disks`` are not provided **ironic-inspector** assumes that this is a disk-less node. * ``root_disk`` default deployment root disk as calculated by the ironic-python-agent algorithm. .. note:: **ironic-inspector** default plugin ``root_disk_selection`` may change ``root_disk`` based on root device hints if node specify hints via properties ``root_device`` key. See `Specifying the disk for deployment root device hints`_ for more details. * ``boot_interface`` MAC address of the NIC that the machine PXE booted from either in standard format ``11:22:33:44:55:66`` or in *PXELinux* ``BOOTIF`` format ``01-11-22-33-44-55-66``. Strictly speaking, this key is optional, but some features will now work as expected, if it is not provided. Optionally the following keys might be provided: * ``error`` error happened during ramdisk run, interpreted by ``ramdisk_error`` plugin. * ``logs`` base64-encoded logs from the ramdisk. .. note:: This list highly depends on enabled plugins, provided above are expected keys for the default set of plugins. See :ref:`plugins ` for details. .. note:: This endpoint is not expected to be versioned, though versioning will work on it. Response: * 200 - OK * 400 - bad request * 403 - node is not on introspection * 404 - node cannot be found or multiple nodes found Response body: JSON dictionary with ``uuid`` key. .. _hardware inventory: https://docs.openstack.org/ironic-python-agent/latest/admin/how_it_works.html#hardware-inventory .. _Specifying the disk for deployment root device hints: https://docs.openstack.org/ironic/latest/install/advanced.html#specifying-the-disk-for-deployment-root-device-hints Error Response ~~~~~~~~~~~~~~ If an error happens during request processing, **Ironic Inspector** returns a response with an appropriate HTTP code set, e.g. 400 for bad request or 404 when something was not found (usually node in cache or node in ironic). The following JSON body is returned:: { "error": { "message": "Full error message" } } This body may be extended in the future to include details that are more error specific. API Versioning ~~~~~~~~~~~~~~ The API supports optional API versioning. You can query for minimum and maximum API version supported by the server. You can also declare required API version in your requests, so that the server rejects request of unsupported version. .. note:: Versioning was introduced in **Ironic Inspector 2.1.0**. All versions must be supplied as string in form of ``X.Y``, where ``X`` is a major version and is always ``1`` for now, ``Y`` is a minor version. * If ``X-OpenStack-Ironic-Inspector-API-Version`` header is sent with request, the server will check if it supports this version. HTTP error 406 will be returned for unsupported API version. * All HTTP responses contain ``X-OpenStack-Ironic-Inspector-API-Minimum-Version`` and ``X-OpenStack-Ironic-Inspector-API-Maximum-Version`` headers with minimum and maximum API versions supported by the server. .. note:: Maximum is server API version used by default. API Discovery ~~~~~~~~~~~~~ The API supports API discovery. You can query different parts of the API to discover what other endpoints are available. * ``GET /`` List API Versions Response: * 200 - OK Response body: JSON dictionary containing a list of ``versions``, each version contains: * ``status`` Either CURRENT or SUPPORTED * ``id`` The version identifier * ``links`` A list of links to this version endpoint containing: * ``href`` The URL * ``rel`` The relationship between the version and the href * ``GET /v1`` List API v1 resources Response: * 200 - OK Response body: JSON dictionary containing a list of ``resources``, each resource contains: * ``name`` The name of this resources * ``links`` A list of link to this resource containing: * ``href`` The URL * ``rel`` The relationship between the resource and the href Version History ^^^^^^^^^^^^^^^ * **1.0** version of API at the moment of introducing versioning. * **1.1** adds endpoint to retrieve stored introspection data. * **1.2** endpoints for manipulating introspection rules. * **1.3** endpoint for canceling running introspection * **1.4** endpoint for reapplying the introspection over stored data. * **1.5** support for Ironic node names. * **1.6** endpoint for rules creating returns 201 instead of 200 on success. * **1.7** UUID, started_at, finished_at in the introspection status API. * **1.8** support for listing all introspection statuses. * **1.9** de-activate setting IPMI credentials, if IPMI credentials are requested, API gets HTTP 400 response. * **1.10** adds node state to the GET /v1/introspection/ and GET /v1/introspection API response data. * **1.11** adds invert&multiple fields into rules response data * **1.12** this version indicates that support for setting IPMI credentials was completely removed from API (all versions). ironic-inspector-7.2.0/doc/source/user/usage.rst0000666000175100017510000003637513241323457021740 0ustar zuulzuul00000000000000Usage ----- .. _usage_guide: Refer to :ref:`api ` for information on the HTTP API. Refer to the `client documentation`_ for information on how to use CLI and Python library. .. _client documentation: https://docs.openstack.org/python-ironic-inspector-client/latest/ Using from Ironic API ~~~~~~~~~~~~~~~~~~~~~ Ironic Kilo introduced support for hardware introspection under name of "inspection". **ironic-inspector** introspection is supported for some generic drivers, please refer to `Ironic inspection documentation`_ for details. .. _Ironic inspection documentation: https://docs.openstack.org/ironic/latest/admin/inspection.html Node States ~~~~~~~~~~~ .. _node_states: * The nodes should be moved to ``MANAGEABLE`` provision state before introspection (requires *python-ironicclient* of version 0.5.0 or newer):: openstack baremetal node manage * After successful introspection and before deploying nodes should be made available to Nova, by moving them to ``AVAILABLE`` state:: openstack baremetal node provide .. note:: Due to how Nova interacts with Ironic driver, you should wait 1 minute before Nova becomes aware of available nodes after issuing this command. Use ``nova hypervisor-stats`` command output to check it. Introspection Rules ~~~~~~~~~~~~~~~~~~~ .. _introspection_rules: Inspector supports a simple JSON-based DSL to define rules to run during introspection. Inspector provides an API to manage such rules, and will run them automatically after running all processing hooks. A rule consists of conditions to check, and actions to run. If conditions evaluate to true on the introspection data, then actions are run on a node. Available conditions and actions are defined by plugins, and can be extended, see :ref:`contributing_link` for details. See :ref:`api ` for specific calls to define introspection rules. Conditions ^^^^^^^^^^ A condition is represented by an object with fields: ``op`` the type of comparison operation, default available operators include: * ``eq``, ``le``, ``ge``, ``ne``, ``lt``, ``gt`` - basic comparison operators; * ``in-net`` - checks that an IP address is in a given network; * ``matches`` - requires a full match against a given regular expression; * ``contains`` - requires a value to contain a given regular expression; * ``is-empty`` - checks that field is an empty string, list, dict or None value. ``field`` a `JSON path `_ to the field in the introspection data to use in comparison. Starting with the Mitaka release, you can also apply conditions to ironic node field. Prefix field with schema (``data://`` or ``node://``) to distinguish between values from introspection data and node. Both schemes use JSON path:: {"field": "node://property.path", "op": "eq", "value": "val"} {"field": "data://introspection.path", "op": "eq", "value": "val"} if scheme (node or data) is missing, condition compares data with introspection data. ``invert`` boolean value, whether to invert the result of the comparison. ``multiple`` how to treat situations where the ``field`` query returns multiple results (e.g. the field contains a list), available options are: * ``any`` (the default) require any to match, * ``all`` require all to match, * ``first`` requrie the first to match. All other fields are passed to the condition plugin, e.g. numeric comparison operations require a ``value`` field to compare against. Actions ^^^^^^^ An action is represented by an object with fields: ``action`` type of action. Possible values are defined by plugins. All other fields are passed to the action plugin. Default available actions include: * ``fail`` fail introspection. Requires a ``message`` parameter for the failure message. * ``set-attribute`` sets an attribute on an Ironic node. Requires a ``path`` field, which is the path to the attribute as used by ironic (e.g. ``/properties/something``), and a ``value`` to set. * ``set-capability`` sets a capability on an Ironic node. Requires ``name`` and ``value`` fields, which are the name and the value for a new capability accordingly. Existing value for this same capability is replaced. * ``extend-attribute`` the same as ``set-attribute``, but treats existing value as a list and appends value to it. If optional ``unique`` parameter is set to ``True``, nothing will be added if given value is already in a list. Starting from Mitaka release, ``value`` field in actions supports fetching data from introspection, it's using `python string formatting notation `_ :: {"action": "set-attribute", "path": "/driver_info/ipmi_address", "value": "{data[inventory][bmc_address]}"} Plugins ~~~~~~~ .. _introspection_plugins: **ironic-inspector** heavily relies on plugins for data processing. Even the standard functionality is largely based on plugins. Set ``processing_hooks`` option in the configuration file to change the set of plugins to be run on introspection data. Note that order does matter in this option, especially for hooks that have dependencies on other hooks. These are plugins that are enabled by default and should not be disabled, unless you understand what you're doing: ``scheduler`` validates and updates basic hardware scheduling properties: CPU number and architecture, memory and disk size. .. note:: Diskless nodes have the disk size property ``local_gb == 0``. Always use node driver ``root_device`` hints to prevent unexpected HW failures passing silently. ``validate_interfaces`` validates network interfaces information. Creates new ports, optionally deletes ports that were not present in the introspection data. Also sets the ``pxe_enabled`` flag for the PXE-booting port and unsets it for all the other ports to avoid **nova** picking a random port to boot the node. The following plugins are enabled by default, but can be disabled if not needed: ``ramdisk_error`` reports error, if ``error`` field is set by the ramdisk, also optionally stores logs from ``logs`` field, see :ref:`api ` for details. ``capabilities`` detect node capabilities: CPU, boot mode, etc. See `Capabilities Detection`_ for more details. ``pci_devices`` gathers the list of all PCI devices returned by the ramdisk and compares to those defined in ``alias`` field(s) from ``pci_devices`` section of configuration file. The recognized PCI devices and their count are then stored in node properties. This information can be later used in nova flavors for node scheduling. Here are some plugins that can be additionally enabled: ``example`` example plugin logging it's input and output. ``raid_device`` gathers block devices from ramdisk and exposes root device in multiple runs. ``extra_hardware`` stores the value of the 'data' key returned by the ramdisk as a JSON encoded string in a Swift object. The plugin will also attempt to convert the data into a format usable by introspection rules. If this is successful then the new format will be stored in the 'extra' key. The 'data' key is then deleted from the introspection data, as unless converted it's assumed unusable by introspection rules. ``local_link_connection`` Processes LLDP data returned from inspection specifically looking for the port ID and chassis ID, if found it configures the local link connection information on the nodes Ironic ports with that data. To enable LLDP in the inventory from IPA ``ipa-collect-lldp=1`` should be passed as a kernel parameter to the IPA ramdisk. In order to avoid processing the raw LLDP data twice, the ``lldp_basic`` plugin should also be installed and run prior to this plugin. ``lldp_basic`` Processes LLDP data returned from inspection and parses TLVs from the Basic Management (802.1AB), 802.1Q, and 802.3 sets and stores the processed data back to the Ironic inspector data in Swift. Refer to :ref:`contributing_link` for information on how to write your own plugin. Discovery ~~~~~~~~~ Starting from Mitaka, **ironic-inspector** is able to register new nodes in Ironic. The existing ``node-not-found-hook`` handles what happens if **ironic-inspector** receives inspection data from a node it can not identify. This can happen if a node is manually booted without registering it with Ironic first. For discovery, the configuration file option ``node_not_found_hook`` should be set to load the hook called ``enroll``. This hook will enroll the unidentified node into Ironic using the ``fake`` driver (this driver is a configurable option, set ``enroll_node_driver`` in the **ironic-inspector** configuration file, to the Ironic driver you want). The ``enroll`` hook will also set the ``ipmi_address`` property on the new node, if its available in the introspection data we received, see :ref:`ramdisk_callback `. Once the ``enroll`` hook is finished, **ironic-inspector** will process the introspection data in the same way it would for an identified node. It runs the processing :ref:`plugins `, and after that it runs introspection rules, which would allow for more customisable node configuration, see :ref:`rules `. A rule to set a node's Ironic driver to the ``agent_ipmitool`` driver and populate the required driver_info for that driver would look like:: [{ "description": "Set IPMI driver_info if no credentials", "actions": [ {"action": "set-attribute", "path": "driver", "value": "agent_ipmitool"}, {"action": "set-attribute", "path": "driver_info/ipmi_username", "value": "username"}, {"action": "set-attribute", "path": "driver_info/ipmi_password", "value": "password"} ], "conditions": [ {"op": "is-empty", "field": "node://driver_info.ipmi_password"}, {"op": "is-empty", "field": "node://driver_info.ipmi_username"} ] },{ "description": "Set deploy info if not already set on node", "actions": [ {"action": "set-attribute", "path": "driver_info/deploy_kernel", "value": ""}, {"action": "set-attribute", "path": "driver_info/deploy_ramdisk", "value": ""} ], "conditions": [ {"op": "is-empty", "field": "node://driver_info.deploy_ramdisk"}, {"op": "is-empty", "field": "node://driver_info.deploy_kernel"} ] }] All nodes discovered and enrolled via the ``enroll`` hook, will contain an ``auto_discovered`` flag in the introspection data, this flag makes it possible to distinguish between manually enrolled nodes and auto-discovered nodes in the introspection rules using the rule condition ``eq``:: { "description": "Enroll auto-discovered nodes with fake driver", "actions": [ {"action": "set-attribute", "path": "driver", "value": "fake"} ], "conditions": [ {"op": "eq", "field": "data://auto_discovered", "value": true} ] } Reapplying introspection on stored data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To allow correcting mistakes in introspection rules the API provides an entry point that triggers the introspection over stored data. The data to use for processing is kept in Swift separately from the data already processed. Reapplying introspection overwrites processed data in the store. Updating the introspection data through the endpoint isn't supported yet. Following preconditions are checked before reapplying introspection: * no data is being sent along with the request * Swift store is configured and enabled * introspection data is stored in Swift for the node UUID * node record is kept in database for the UUID * introspection is not ongoing for the node UUID Should the preconditions fail an immediate response is given to the user: * ``400`` if the request contained data or in case Swift store is not enabled in configuration * ``404`` in case Ironic doesn't keep track of the node UUID * ``409`` if an introspection is already ongoing for the node If the preconditions are met a background task is executed to carry out the processing and a ``202 Accepted`` response is returned to the endpoint user. As requested, these steps are performed in the background task: * preprocessing hooks * post processing hooks, storing result in Swift * introspection rules These steps are avoided, based on the feature requirements: * ``node_not_found_hook`` is skipped * power operations * roll-back actions done by hooks Limitations: * there's no way to update the unprocessed data atm. * the unprocessed data is never cleaned from the store * check for stored data presence is performed in background; missing data situation still results in a ``202`` response Capabilities Detection ~~~~~~~~~~~~~~~~~~~~~~ Starting with the Newton release, **Ironic Inspector** can optionally discover several node capabilities. A recent (Newton or newer) IPA image is required for it to work. Boot mode ^^^^^^^^^ The current boot mode (BIOS or UEFI) can be detected and recorded as ``boot_mode`` capability in Ironic. It will make some drivers to change their behaviour to account for this capability. Set the ``[capabilities]boot_mode`` configuration option to ``True`` to enable. CPU capabilities ^^^^^^^^^^^^^^^^ Several CPU flags are detected by default and recorded as following capabilities: * ``cpu_aes`` AES instructions. * ``cpu_vt`` virtualization support. * ``cpu_txt`` TXT support. * ``cpu_hugepages`` huge pages (2 MiB) support. * ``cpu_hugepages_1g`` huge pages (1 GiB) support. It is possible to define your own rules for detecting CPU capabilities. Set the ``[capabilities]cpu_flags`` configuration option to a mapping between a CPU flag and a capability, for example:: cpu_flags = aes:cpu_aes,svm:cpu_vt,vmx:cpu_vt See the default value of this option for a more detail example. InfiniBand support ^^^^^^^^^^^^^^^^^^ Starting with the Ocata release, **Ironic Inspector** supports detection of InfiniBand network interfaces. A recent (Ocata or newer) IPA image is required for that to work. When an InfiniBand network interface is discovered, the **Ironic Inspector** adds a ``client-id`` attribute to the ``extra`` attribute in the ironic port. The **Ironic Inspector** should be configured with ``iptables.ethoib_interfaces`` to indicate the Ethernet Over InfiniBand (EoIB) which are used for physical access access to the DHCP network. For example if **Ironic Inspector** DHCP server is using ``br-inspector`` and the ``br-inspector`` has EoIB port e.g. ``eth0``, the ``iptables.ethoib_interfaces`` should be set to ``eth0``. The ``iptables.ethoib_interfaces`` allows to map the baremetal GUID to it's EoIB MAC based on the neighs files. This is needed for blocking DHCP traffic of the nodes (MACs) which are not part of the introspection. The format of the ``/sys/class/net//eth/neighs`` file:: # EMAC= IMAC= # For example: IMAC=97:fe:80:00:00:00:00:00:00:7c:fe:90:03:00:29:26:52 qp number=97:fe lid=80:00:00:00:00:00:00 GUID=7c:fe:90:03:00:29:26:52 Example of content:: EMAC=02:00:02:97:00:01 IMAC=97:fe:80:00:00:00:00:00:00:7c:fe:90:03:00:29:26:52 EMAC=02:00:00:61:00:02 IMAC=61:fe:80:00:00:00:00:00:00:7c:fe:90:03:00:29:24:4f ironic-inspector-7.2.0/doc/source/user/index.rst0000666000175100017510000000056613241323457021734 0ustar zuulzuul00000000000000User Guide ========== How Ironic Inspector Works -------------------------- .. toctree:: :maxdepth: 2 workflow How to use Ironic Inspector --------------------------- .. toctree:: :maxdepth: 2 usage HTTP API Reference ------------------ .. toctree:: :maxdepth: 2 http-api Troubleshooting --------------- .. toctree:: :maxdepth: 2 troubleshooting ironic-inspector-7.2.0/doc/source/user/workflow.rst0000666000175100017510000000674513241323457022504 0ustar zuulzuul00000000000000How Ironic Inspector Works ========================== Workflow -------- Usual hardware introspection flow is as follows: * Operator enrolls nodes into Ironic_ e.g. via `openstack baremetal CLI`_ command. Power management credentials should be provided to Ironic at this step. * Nodes are put in the correct state for introspection as described in :ref:`node states `. * Operator sends nodes on introspection using **ironic-inspector** API or CLI (see :ref:`usage `). * On receiving node UUID **ironic-inspector**: * validates node power credentials, current power and provisioning states, * allows access to PXE boot service for the nodes, * issues reboot command for the nodes, so that they boot the ramdisk. * The ramdisk collects the required information and posts it back to **ironic-inspector**. * On receiving data from the ramdisk, **ironic-inspector**: * validates received data, * finds the node in Ironic database using it's BMC address (MAC address in case of SSH driver), * fills missing node properties with received data and creates missing ports. .. note:: **ironic-inspector** is responsible to create Ironic ports for some or all NIC's found on the node. **ironic-inspector** is also capable of deleting ports that should not be present. There are two important configuration options that affect this behavior: ``add_ports`` and ``keep_ports`` (please refer to :doc:`the sample configuration file ` for a detailed explanation). Default values as of **ironic-inspector** 1.1.0 are ``add_ports=pxe``, ``keep_ports=all``, which means that only one port will be added, which is associated with NIC the ramdisk PXE booted from. No ports will be deleted. This setting ensures that deploying on introspected nodes will succeed despite `Ironic bug 1405131 `_. Ironic inspection feature by default requires different settings: ``add_ports=all``, ``keep_ports=present``, which means that ports will be created for all detected NIC's, and all other ports will be deleted. Refer to the `Ironic inspection documentation`_ for details. Ironic inspector can also be configured to not create any ports. This is done by setting ``add_ports=disabled``. If setting ``add_ports`` to disabled the ``keep_ports`` option should be also set to ``all``. This will ensure no manually added ports will be deleted. .. _Ironic inspection documentation: https://docs.openstack.org/ironic/latest/admin/inspection.html * Separate API (see :ref:`usage ` and :ref:`api `) can be used to query introspection results for a given node. * Nodes are put in the correct state for deploying as described in :ref:`node states `. Starting DHCP server and configuring PXE boot environment is not part of this package and should be done separately. State machine diagram --------------------- .. _state_machine_diagram: The diagram below shows the introspection states that an **ironic-inspector** FSM goes through during the node introspection, discovery and reprocessing. The diagram also shows events that trigger state transitions. .. figure:: ../images/states.svg :width: 660px :align: center :alt: ironic-inspector state machine diagram .. _Ironic: https://wiki.openstack.org/wiki/Ironic .. _openstack baremetal CLI: https://docs.openstack.org/python-ironicclient/latest/cli/osc_plugin_cli.html ironic-inspector-7.2.0/doc/source/user/troubleshooting.rst0000666000175100017510000001427013241323457024051 0ustar zuulzuul00000000000000Troubleshooting --------------- Errors when starting introspection ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * *Invalid provision state "available"* In Kilo release with *python-ironicclient* 0.5.0 or newer Ironic defaults to reporting provision state ``AVAILABLE`` for newly enrolled nodes. **ironic-inspector** will refuse to conduct introspection in this state, as such nodes are supposed to be used by Nova for scheduling. See :ref:`node states ` for instructions on how to put nodes into the correct state. Introspection times out ~~~~~~~~~~~~~~~~~~~~~~~ There may be 3 reasons why introspection can time out after some time (defaulting to 60 minutes, altered by ``timeout`` configuration option): #. Fatal failure in processing chain before node was found in the local cache. See `Troubleshooting data processing`_ for the hints. #. Failure to load the ramdisk on the target node. See `Troubleshooting PXE boot`_ for the hints. #. Failure during ramdisk run. See `Troubleshooting ramdisk run`_ for the hints. Troubleshooting data processing ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In this case **ironic-inspector** logs should give a good idea what went wrong. E.g. for RDO or Fedora the following command will output the full log:: sudo journalctl -u openstack-ironic-inspector (use ``openstack-ironic-discoverd`` for version < 2.0.0). .. note:: Service name and specific command might be different for other Linux distributions (and for old version of **ironic-inspector**). If ``ramdisk_error`` plugin is enabled and ``ramdisk_logs_dir`` configuration option is set, **ironic-inspector** will store logs received from the ramdisk to the ``ramdisk_logs_dir`` directory. This depends, however, on the ramdisk implementation. Troubleshooting PXE boot ^^^^^^^^^^^^^^^^^^^^^^^^ PXE booting most often becomes a problem for bare metal environments with several physical networks. If the hardware vendor provides a remote console (e.g. iDRAC for DELL), use it to connect to the machine and see what is going on. You may need to restart introspection. Another source of information is DHCP and TFTP server logs. Their location depends on how the servers were installed and run. For RDO or Fedora use:: $ sudo journalctl -u openstack-ironic-inspector-dnsmasq (use ``openstack-ironic-discoverd-dnsmasq`` for version < 2.0.0). The last resort is ``tcpdump`` utility. Use something like :: $ sudo tcpdump -i any port 67 or port 68 or port 69 to watch both DHCP and TFTP traffic going through your machine. Replace ``any`` with a specific network interface to check that DHCP and TFTP requests really reach it. If you see node not attempting PXE boot or attempting PXE boot on the wrong network, reboot the machine into BIOS settings and make sure that only one relevant NIC is allowed to PXE boot. If you see node attempting PXE boot using the correct NIC but failing, make sure that: #. network switches configuration does not prevent PXE boot requests from propagating, #. there is no additional firewall rules preventing access to port 67 on the machine where *ironic-inspector* and its DHCP server are installed. If you see node receiving DHCP address and then failing to get kernel and/or ramdisk or to boot them, make sure that: #. TFTP server is running and accessible (use ``tftp`` utility to verify), #. no firewall rules prevent access to TFTP port, #. SELinux is configured properly to allow external TFTP access, If SELinux is neither permissive nor disabled, you should config ``tftp_home_dir`` in SELinux by executing the command :: $ sudo setsebool -P tftp_home_dir 1 See `the man page`_ for more details. .. _the man page: https://www.systutorials.com/docs/linux/man/8-tftpd_selinux/ #. DHCP server is correctly set to point to the TFTP server, #. ``pxelinux.cfg/default`` within TFTP root contains correct reference to the kernel and ramdisk. .. note:: If using iPXE instead of PXE, check the HTTP server logs and the iPXE configuration instead. Troubleshooting ramdisk run ^^^^^^^^^^^^^^^^^^^^^^^^^^^ First, check if the ramdisk logs were stored locally as described in the `Troubleshooting data processing`_ section. If not, ensure that the ramdisk actually booted as described in the `Troubleshooting PXE boot`_ section. Finally, you can try connecting to the IPA ramdisk. If you have any remote console access to the machine, you can check the logs as they appear on the screen. Otherwise, you can rebuild the IPA image with your SSH key to be able to log into it. Use the `dynamic-login`_ or `devuser`_ element for a DIB-based build or put an authorized_keys file in ``/usr/share/oem/`` for a CoreOS-based one. .. _devuser: https://docs.openstack.org/diskimage-builder/latest/elements/devuser/README.html .. _dynamic-login: https://docs.openstack.org/diskimage-builder/latest/elements/dynamic-login/README.html Troubleshooting DNS issues on Ubuntu ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. _ubuntu-dns: Ubuntu uses local DNS caching, so tries localhost for DNS results first before calling out to an external DNS server. When DNSmasq is installed and configured for use with ironic-inspector, it can cause problems by interfering with the local DNS cache. To fix this issue ensure that ``/etc/resolve.conf`` points to your external DNS servers and not to ``127.0.0.1``. On Ubuntu 14.04 this can be done by editing your ``/etc/resolvconf/resolv.conf.d/head`` and adding your nameservers there. This will ensure they will come up first when ``/etc/resolv.conf`` is regenerated. Running Inspector in a VirtualBox environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default VirtualBox does not expose a DMI table to the guest. This prevents ironic-inspector from being able to discover the properties of the a node. In order to run ironic-inspector on a VirtualBox guest the host must be configured to expose DMI data inside the guest. To do this run the following command on the VirtualBox host:: VBoxManage setextradata {NodeName} "VBoxInternal/Devices/pcbios/0/Config/DmiExposeMemoryTable" 1 .. note:: Replace `{NodeName}` with the name of the guest you wish to expose the DMI table on. This command will need to be run once per host to enable this functionality. ironic-inspector-7.2.0/doc/source/admin/0000775000175100017510000000000013241324014020162 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/doc/source/admin/index.rst0000666000175100017510000000037113241323457022040 0ustar zuulzuul00000000000000Administrator Guide =================== How to upgrade Ironic Inspector ------------------------------- .. toctree:: :maxdepth: 2 upgrade Dnsmasq PXE filter driver ------------------------- .. toctree:: :maxdepth: 2 dnsmasq-pxe-filter ironic-inspector-7.2.0/doc/source/admin/dnsmasq-pxe-filter.rst0000666000175100017510000001152013241323457024452 0ustar zuulzuul00000000000000.. _dnsmasq_pxe_filter: **dnsmasq** PXE filter ====================== An inspection PXE DHCP stack is often implemented by the **dnsmasq** service. The **dnsmasq** PXE filter implementation relies on directly configuring the **dnsmasq** DHCP service to provide a caching PXE traffic filter of node MAC addresses. How it works ------------ The filter works by populating the **dnsmasq** DHCP hosts directory with a configuration file per MAC address. Each file is either enabling or disabling, thru the ``ignore`` directive, the DHCP service for a particular MAC address:: $ cat /etc/dnsmasq.d/de-ad-be-ef-de-ad de:ad:be:ef:de:ad,ignore $ The filename is used to keep track of all MAC addresses in the cache, avoiding file parsing. The content of the file determines the MAC address access policy. Thanks to the ``inotify`` facility, **dnsmasq** is notified once a new file is *created* or an existing file is *modified* in the DHCP hosts directory. Thus, to white-list a MAC address, the filter removes the ``ignore`` directive:: $ cat /etc/dnsmasq.d/de-ad-be-ef-de-ad de:ad:be:ef:de:ad $ The hosts directory content establishes a *cached* MAC addresses filter that is kept synchronized with the **ironic** port list. .. note:: The **dnsmasq** inotify facility implementation doesn't react to a file being removed or truncated. Configuration ------------- The ``inotify`` facility was introduced_ to **dnsmasq** in the version `2.73`. This filter driver has been checked by **ironic-inspector** CI with **dnsmasq** versions `>=2.76`. .. _introduced: http://www.thekelleys.org.uk/dnsmasq/CHANGELOG To enable the **dnsmasq** PXE filter, update the PXE filter driver name in the **ironic-inspector** configuration file:: [pxe_filter] driver = dnsmasq The DHCP hosts directory can be specified to override the default ``/var/lib/ironic-inspector/dhcp-hostsdir``:: [dnsmasq_pxe_filter] dhcp_hostsdir = /etc/ironic-inspector/dhcp-hostsdir The filter design relies on the hosts directory being in exclusive **ironic-inspector** control. The hosts directory should be considered a *private cache* directory of **ionic-inspector** that **dnsmasq** polls configuration updates from, through the ``inotify`` facility. The directory has to be writable by **ironic-inspector** and readable by **dnsmasq**. It is also possible to override the default (empty) **dnsmasq** start and stop commands to, for instance, directly control the **dnsmasq** service:: [dnsmasq_pxe_filter] dnsmasq_start_command = dnsmasq --conf-file /etc/ironic-inspector/dnsmasq.conf dnsmasq_stop_command = kill $(cat /var/run/dnsmasq.pid) .. note:: The commands support shell expansion. The default empty start command means the **dnsmasq** service won't be started upon the filter initialization. Conversely, the default empty stop command means the service won't be stopped upon an (error) exit. .. note:: These commands are executed through the rootwrap_ facility, so overriding may require a filter file to be created in the ``rootwrap.d`` directory. A sample configuration to use with the **systemctl** facility might be: .. code-block:: console sudo cat > /etc/ironic-inspector/rootwrap.d/ironic-inspector-dnsmasq-systemctl.filters <`_ should always be read carefully when upgrading the ironic-inspector service. Starting with the Mitaka series, specific upgrade steps and considerations are well-documented in the release notes. Upgrades are only supported one series at a time, or within a series. Only offline (with downtime) upgrades are currently supported. When upgrading ironic-inspector, the following steps should always be taken: * Update ironic-inspector code, without restarting the service yet. * Stop the ironic-inspector service. * Run database migrations:: ironic-inspector-dbsync --config-file upgrade * Start the ironic-inspector service. * Upgrade the ironic-python-agent image used for introspection. .. note:: There is no implicit upgrade order between ironic and ironic-inspector, unless the `release notes`_ say otherwise. ironic-inspector-7.2.0/doc/source/install/0000775000175100017510000000000013241324014020540 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/doc/source/install/index.rst0000666000175100017510000003327613241323457022430 0ustar zuulzuul00000000000000Install Guide ============= Install from PyPI_ (you may want to use virtualenv to isolate your environment):: pip install ironic-inspector Also there is a `DevStack `_ plugin for **ironic-inspector** - see :ref:`contributing_link` for the current status. Finally, some distributions (e.g. Fedora) provide **ironic-inspector** packaged, some of them - under its old name *ironic-discoverd*. There are several projects you can use to set up **ironic-inspector** in production. `puppet-ironic `_ provides Puppet manifests, while `bifrost `_ provides an Ansible-based standalone installer. Refer to Configuration_ if you plan on installing **ironic-inspector** manually. .. _PyPI: https://pypi.python.org/pypi/ironic-inspector .. note:: Please beware of :ref:`possible DNS issues ` when installing **ironic-inspector** on Ubuntu. Version Support Matrix ---------------------- **ironic-inspector** currently requires the Bare Metal API version ``1.11`` to be provided by **ironic**. This version is available starting with the Liberty release of **ironic**. Here is a mapping between the ironic versions and the supported ironic-inspector versions. The Standalone column shows which ironic-inspector versions can be used in standalone mode with each ironic version. The Inspection Interface column shows which ironic-inspector versions can be used with the inspection interface in each version of **ironic**. ============== ============ ==================== Ironic Version Standalone Inspection Interface ============== ============ ==================== Juno 1.0 N/A Kilo 1.0 - 2.2 1.0 - 1.1 Liberty 1.1 - 2.2.7 2.0 - 2.2.7 Mitaka 2.3 - 3.X 2.3 - 3.X Newton 3.3 - 4.X 3.3 - 4.X Ocata+ 5.0 - 5.X 5.0 - 5.X ============== ============ ==================== .. note:: ``3.X`` means there are no specific plans to deprecate support for this ironic version. This does not imply that it will be supported forever. Sample Configuration Files -------------------------- To generate a sample configuration file, run the following command from the top level of the code tree:: tox -egenconfig For a pre-generated sample configuration file, see :doc:`/configuration/sample-config`. To generate a sample policy file, run the following command from the top level of the code tree:: tox -egenpolicy For a pre-generated sample configuration file, see :doc:`/configuration/sample-policy`. Configuration ------------- Copy the sample configuration files to some permanent place (e.g. ``/etc/ironic-inspector/inspector.conf``). Fill in these minimum configuration values: * The ``keystone_authtoken`` section - credentials to use when checking user authentication. * The ``ironic`` section - credentials to use when accessing **ironic** API. * ``connection`` in the ``database`` section - SQLAlchemy connection string for the database. * ``dnsmasq_interface`` in the ``iptables`` section - interface on which ``dnsmasq`` (or another DHCP service) listens for PXE boot requests (defaults to ``br-ctlplane`` which is a sane default for **tripleo**-based installations but is unlikely to work for other cases). * if you wish to use the ``dnsmasq`` PXE/DHCP filter driver rather than the default ``iptables`` driver, see the :ref:`dnsmasq_pxe_filter` description. See comments inside :doc:`the sample configuration ` for other possible configuration options. .. note:: Configuration file contains a password and thus should be owned by ``root`` and should have access rights like ``0600``. Here is an example *inspector.conf* (adapted from a gate run):: [DEFAULT] debug = false rootwrap_config = /etc/ironic-inspector/rootwrap.conf [database] connection = mysql+pymysql://root:@127.0.0.1/ironic_inspector?charset=utf8 [pxe_filter] driver=iptables [iptables] dnsmasq_interface = br-ctlplane [ironic] os_region = RegionOne project_name = service password = username = ironic-inspector auth_url = http://127.0.0.1/identity auth_type = password [keystone_authtoken] auth_uri = http://127.0.0.1/identity project_name = service password = username = ironic-inspector auth_url = http://127.0.0.1/identity_v2_admin auth_type = password [processing] ramdisk_logs_dir = /var/log/ironic-inspector/ramdisk store_data = swift [swift] os_region = RegionOne project_name = service password = username = ironic-inspector auth_url = http://127.0.0.1/identity auth_type = password .. note:: Set ``debug = true`` if you want to see complete logs. **ironic-inspector** requires root rights for managing ``iptables``. It gets them by running ``ironic-inspector-rootwrap`` utility with ``sudo``. To allow it, copy file ``rootwrap.conf`` and directory ``rootwrap.d`` to the configuration directory (e.g. ``/etc/ironic-inspector/``) and create file ``/etc/sudoers.d/ironic-inspector-rootwrap`` with the following content:: Defaults:stack !requiretty stack ALL=(root) NOPASSWD: /usr/bin/ironic-inspector-rootwrap /etc/ironic-inspector/rootwrap.conf * .. DANGER:: Be very careful about typos in ``/etc/sudoers.d/ironic-inspector-rootwrap`` as any typo will break sudo for **ALL** users on the system. Especially, make sure there is a new line at the end of this file. .. note:: ``rootwrap.conf`` and all files in ``rootwrap.d`` must be writeable only by root. .. note:: If you store ``rootwrap.d`` in a different location, make sure to update the *filters_path* option in ``rootwrap.conf`` to reflect the change. If your ``rootwrap.conf`` is in a different location, then you need to update the *rootwrap_config* option in ``ironic-inspector.conf`` to point to that location. Replace ``stack`` with whatever user you'll be using to run **ironic-inspector**. Configuring IPA ~~~~~~~~~~~~~~~ ironic-python-agent_ is a ramdisk developed for **ironic** and support for **ironic-inspector** was added during the Liberty cycle. This is the default ramdisk starting with the Mitaka release. .. note:: You need at least 1.5 GiB of RAM on the machines to use IPA built with diskimage-builder_ and at least 384 MiB to use the *TinyIPA*. To build an **ironic-python-agent** ramdisk, do the following: * Get the new enough version of diskimage-builder_:: sudo pip install -U "diskimage-builder>=1.1.2" * Build the ramdisk:: disk-image-create ironic-agent fedora -o ironic-agent .. note:: Replace "fedora" with your distribution of choice. * Use the resulting files ``ironic-agent.kernel`` and ``ironic-agent.initramfs`` in the following instructions to set PXE or iPXE. Alternatively, you can download a `prebuilt TinyIPA image `_ or use the `other builders `_. .. _diskimage-builder: https://docs.openstack.org/diskimage-builder/latest/ .. _ironic-python-agent: https://docs.openstack.org/ironic-python-agent/latest/ Configuring PXE ~~~~~~~~~~~~~~~ For the PXE boot environment, you'll need: * TFTP server running and accessible (see below for using *dnsmasq*). Ensure ``pxelinux.0`` is present in the TFTP root. Copy ``ironic-agent.kernel`` and ``ironic-agent.initramfs`` to the TFTP root as well. * Next, setup ``$TFTPROOT/pxelinux.cfg/default`` as follows:: default introspect label introspect kernel ironic-agent.kernel append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://{IP}:5050/v1/continue systemd.journald.forward_to_console=yes ipappend 3 Replace ``{IP}`` with IP of the machine (do not use loopback interface, it will be accessed by ramdisk on a booting machine). .. note:: While ``systemd.journald.forward_to_console=yes`` is not actually required, it will substantially simplify debugging if something goes wrong. You can also enable IPA debug logging by appending ``ipa-debug=1``. IPA is pluggable: you can insert introspection plugins called *collectors* into it. For example, to enable a very handy ``logs`` collector (sending ramdisk logs to **ironic-inspector**), modify the ``append`` line in ``$TFTPROOT/pxelinux.cfg/default``:: append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://{IP}:5050/v1/continue ipa-inspection-collectors=default,logs systemd.journald.forward_to_console=yes .. note:: You probably want to always keep the ``default`` collector, as it provides the basic information required for introspection. * You need PXE boot server (e.g. *dnsmasq*) running on **the same** machine as **ironic-inspector**. Don't do any firewall configuration: **ironic-inspector** will handle it for you. In **ironic-inspector** configuration file set ``dnsmasq_interface`` to the interface your PXE boot server listens on. Here is an example *dnsmasq.conf*:: port=0 interface={INTERFACE} bind-interfaces dhcp-range={DHCP IP RANGE, e.g. 192.168.0.50,192.168.0.150} enable-tftp tftp-root={TFTP ROOT, e.g. /tftpboot} dhcp-boot=pxelinux.0 dhcp-sequential-ip .. note:: ``dhcp-sequential-ip`` is used because otherwise a lot of nodes booting simultaneously cause conflicts - the same IP address is suggested to several nodes. Configuring iPXE ~~~~~~~~~~~~~~~~ iPXE allows better scaling as it primarily uses the HTTP protocol instead of slow and unreliable TFTP. You still need a TFTP server as a fallback for nodes not supporting iPXE. To use iPXE, you'll need: * TFTP server running and accessible (see above for using *dnsmasq*). Ensure ``undionly.kpxe`` is present in the TFTP root. If any of your nodes boot with UEFI, you'll also need ``ipxe.efi`` there. * You also need an HTTP server capable of serving static files. Copy ``ironic-agent.kernel`` and ``ironic-agent.initramfs`` there. * Create a file called ``inspector.ipxe`` in the HTTP root (you can name and place it differently, just don't forget to adjust the *dnsmasq.conf* example below):: #!ipxe :retry_dhcp dhcp || goto retry_dhcp :retry_boot imgfree kernel --timeout 30000 http://{IP}:8088/ironic-agent.kernel ipa-inspection-callback-url=http://{IP}>:5050/v1/continue systemd.journald.forward_to_console=yes BOOTIF=${mac} initrd=agent.ramdisk || goto retry_boot initrd --timeout 30000 http://{IP}:8088/ironic-agent.ramdisk || goto retry_boot boot .. note:: Older versions of the iPXE ROM tend to misbehave on unreliable network connection, thus we use the timeout option with retries. Just like with PXE, you can customize the list of collectors by appending the ``ipa-inspector-collectors`` kernel option. For example:: ipa-inspection-collectors=default,logs,extra_hardware * Just as with PXE, you'll need a PXE boot server. The configuration, however, will be different. Here is an example *dnsmasq.conf*:: port=0 interface={INTERFACE} bind-interfaces dhcp-range={DHCP IP RANGE, e.g. 192.168.0.50,192.168.0.150} enable-tftp tftp-root={TFTP ROOT, e.g. /tftpboot} dhcp-sequential-ip dhcp-match=ipxe,175 dhcp-match=set:efi,option:client-arch,7 dhcp-match=set:efi,option:client-arch,9 # Client is already running iPXE; move to next stage of chainloading dhcp-boot=tag:ipxe,http://{IP}:8088/inspector.ipxe # Client is PXE booting over EFI without iPXE ROM, # send EFI version of iPXE chainloader dhcp-boot=tag:efi,tag:!ipxe,ipxe.efi dhcp-boot=undionly.kpxe,localhost.localdomain,{IP} First, we configure the same common parameters as with PXE. Then we define ``ipxe`` and ``efi`` tags. Nodes already supporting iPXE are ordered to download and execute ``inspector.ipxe``. Nodes without iPXE booted with UEFI will get ``ipxe.efi`` firmware to execute, while the remaining will get ``undionly.kpxe``. Managing the **ironic-inspector** Database ------------------------------------------ **ironic-inspector** provides a command line client for managing its database. This client can be used for upgrading, and downgrading the database using `alembic `_ migrations. If this is your first time running **ironic-inspector** to migrate the database, simply run: :: ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf upgrade If you have previously run a version of **ironic-inspector** earlier than 2.2.0, the safest thing is to delete the existing SQLite database and run ``upgrade`` as shown above. However, if you want to save the existing database, to ensure your database will work with the migrations, you'll need to run an extra step before upgrading the database. You only need to do this the first time running version 2.2.0 or later. If you are upgrading from **ironic-inspector** version 2.1.0 or lower: :: ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf stamp --revision 578f84f38d ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf upgrade If you are upgrading from a git master install of the **ironic-inspector** after :ref:`rules ` were introduced: :: ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf stamp --revision d588418040d ironic-inspector-dbsync --config-file /etc/ironic-inspector/inspector.conf upgrade Other available commands can be discovered by running:: ironic-inspector-dbsync --help Running ------- :: ironic-inspector --config-file /etc/ironic-inspector/inspector.conf ironic-inspector-7.2.0/doc/source/.gitignore0000666000175100017510000000001613241323457021073 0ustar zuulzuul00000000000000target/ build/ironic-inspector-7.2.0/doc/source/contributor/0000775000175100017510000000000013241324014021444 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/doc/source/contributor/index.rst0000666000175100017510000000020413241323457023315 0ustar zuulzuul00000000000000.. _contributing_link: .. include:: ../../../CONTRIBUTING.rst Python API ~~~~~~~~~~ .. toctree:: :maxdepth: 1 api/autoindex ironic-inspector-7.2.0/doc/Makefile0000666000175100017510000001317013241323457017250 0ustar zuulzuul00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " xml to make Docutils-native XML files" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Heat.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Heat.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Heat" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Heat" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The xml files are in $(BUILDDIR)/xml." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt."ironic-inspector-7.2.0/CONTRIBUTING.rst0000666000175100017510000003240513241323457017506 0ustar zuulzuul00000000000000================= How To Contribute ================= Basics ~~~~~~ * Our source code is hosted on `OpenStack GitHub`_, but please do not send pull requests there. * Please follow usual OpenStack `Gerrit Workflow`_ to submit a patch. * Update change log in README.rst on any significant change. * It goes without saying that any code change should by accompanied by unit tests. * Note the branch you're proposing changes to. ``master`` is the current focus of development, use ``stable/VERSION`` for proposing an urgent fix, where ``VERSION`` is the current stable series. E.g. at the moment of writing the stable branch is ``stable/1.0``. * Please file a launchpad_ blueprint for any significant code change and a bug for any significant bug fix. .. _OpenStack GitHub: https://github.com/openstack/ironic-inspector .. _Gerrit Workflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow .. _launchpad: https://bugs.launchpad.net/ironic-inspector Development Environment ~~~~~~~~~~~~~~~~~~~~~~~ First of all, install *tox* utility. It's likely to be in your distribution repositories under name of ``python-tox``. Alternatively, you can install it from PyPI. Next checkout and create environments:: git clone https://github.com/openstack/ironic-inspector.git cd ironic-inspector tox Repeat *tox* command each time you need to run tests. If you don't have Python interpreter of one of supported versions (currently 2.7 and 3.4), use ``-e`` flag to select only some environments, e.g. :: tox -e py27 .. note:: Support for Python 3 is highly experimental, stay with Python 2 for the production environment for now. .. note:: This command also runs tests for database migrations. By default the sqlite backend is used. For testing with mysql or postgresql, you need to set up a db named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. Use the script ``tools/test_setup.sh`` to set the database up the same way as done in the OpenStack CI environment. .. note:: Users of Fedora <= 23 will need to run "sudo dnf --releasever=24 update python-virtualenv" to run unit tests To run the functional tests, use:: tox -e func Once you have added new state or transition into inspection state machine, you should regenerate :ref:`State machine diagram ` with:: tox -e genstates Run the service with:: .tox/py27/bin/ironic-inspector --config-file example.conf Of course you may have to modify ``example.conf`` to match your OpenStack environment. See the `install guide <../install#sample-configuration-files>`_ for information on generating or downloading an example configuration file. You can develop and test **ironic-inspector** using DevStack - see `Deploying Ironic Inspector with DevStack`_ for the current status. Deploying Ironic Inspector with DevStack ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `DevStack `_ provides a way to quickly build a full OpenStack development environment with requested components. There is a plugin for installing **ironic-inspector** in DevStack. Installing **ironic-inspector** requires a machine running Ubuntu 14.04 (or later) or Fedora 23 (or later). Make sure this machine is fully up to date and has the latest packages installed before beginning this process. Download DevStack:: git clone https://git.openstack.org/openstack-dev/devstack.git cd devstack Create ``local.conf`` file with minimal settings required to enable both the **ironic** and the **ironic-inspector**. You can start with the `Example local.conf`_ and extend it as needed. Example local.conf ------------------ .. literalinclude:: ../../../devstack/example.local.conf Notes ----- * Set IRONIC_INSPECTOR_BUILD_RAMDISK to True if you want to build ramdisk. Default value is False and ramdisk will be downloaded instead of building. * 1024 MiB of RAM is a minimum required for the default build of IPA based on CoreOS. If you plan to use another operating system and build IPA with diskimage-builder 2048 MiB is recommended. * Network configuration is pretty sensitive, better not to touch it without deep understanding. * This configuration disables **horizon**, **heat**, **cinder** and **tempest**, adjust it if you need these services. Start the install:: ./stack.sh Usage ----- After installation is complete, you can source ``openrc`` in your shell, and then use the OpenStack CLI to manage your DevStack:: source openrc admin demo Show DevStack screens:: screen -x stack To exit screen, hit ``CTRL-a d``. List baremetal nodes:: openstack baremetal node list Bring the node to manageable state:: openstack baremetal node manage Inspect the node:: openstack baremetal node inspect .. note:: The deploy driver used must support the inspect interface. See also the `Ironic Python Agent `_. A node can also be inspected using the following command. However, this will not affect the provision state of the node:: openstack baremetal introspection start Check inspection status:: openstack baremetal introspection status Optionally, get the inspection data:: openstack baremetal introspection data save Writing a Plugin ~~~~~~~~~~~~~~~~ * **ironic-inspector** allows you to hook code into the data processing chain after introspection. Inherit ``ProcessingHook`` class defined in ironic_inspector.plugins.base_ module and overwrite any or both of the following methods: ``before_processing(introspection_data,**)`` called before any data processing, providing the raw data. Each plugin in the chain can modify the data, so order in which plugins are loaded matters here. Returns nothing. ``before_update(introspection_data,node_info,**)`` called after node is found and ports are created, but before data is updated on a node. Please refer to the docstring for details and examples. You can optionally define the following attribute: ``dependencies`` a list of entry point names of the hooks this hook depends on. These hooks are expected to be enabled before the current hook. Make your plugin a setuptools entry point under ``ironic_inspector.hooks.processing`` namespace and enable it in the configuration file (``processing.processing_hooks`` option). * **ironic-inspector** allows plugins to override the action when node is not found in node cache. Write a callable with the following signature: ``(introspection_data,**)`` called when node is not found in cache, providing the processed data. Should return a ``NodeInfo`` class instance. Make your plugin a setuptools entry point under ``ironic_inspector.hooks.node_not_found`` namespace and enable it in the configuration file (``processing.node_not_found_hook`` option). * **ironic-inspector** allows more condition types to be added for `Introspection Rules`_. Inherit ``RuleConditionPlugin`` class defined in ironic_inspector.plugins.base_ module and overwrite at least the following method: ``check(node_info,field,params,**)`` called to check that condition holds for a given field. Field value is provided as ``field`` argument, ``params`` is a dictionary defined at the time of condition creation. Returns boolean value. The following methods and attributes may also be overridden: ``validate(params,**)`` called to validate parameters provided during condition creating. Default implementation requires keys listed in ``REQUIRED_PARAMS`` (and only them). ``REQUIRED_PARAMS`` contains set of required parameters used in the default implementation of ``validate`` method, defaults to ``value`` parameter. ``ALLOW_NONE`` if it's set to ``True``, missing fields will be passed as ``None`` values instead of failing the condition. Defaults to ``False``. Make your plugin a setuptools entry point under ``ironic_inspector.rules.conditions`` namespace. * **ironic-inspector** allows more action types to be added for `Introspection Rules`_. Inherit ``RuleActionPlugin`` class defined in ironic_inspector.plugins.base_ module and overwrite at least the following method: ``apply(node_info,params,**)`` called to apply the action. The following methods and attributes may also be overridden: ``validate(params,**)`` called to validate parameters provided during actions creating. Default implementation requires keys listed in ``REQUIRED_PARAMS`` (and only them). ``REQUIRED_PARAMS`` contains set of required parameters used in the default implementation of ``validate`` method, defaults to no parameters. Make your plugin a setuptools entry point under ``ironic_inspector.rules.conditions`` namespace. .. note:: ``**`` argument is needed so that we can add optional arguments without breaking out-of-tree plugins. Please make sure to include and ignore it. .. _ironic_inspector.plugins.base: https://docs.openstack.org/ironic-inspector/latest/api/ironic_inspector.plugins.base.html .. _Introspection Rules: https://docs.openstack.org/ironic-inspector/latest/usage.html#introspection-rules Making changes to the database ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to make a change to the ironic-inspector database you must update the database models found in ironic_inspector.db_ and then create a migration to reflect that change. There are two ways to create a migration which are described below, both of these generate a new migration file. In this file there is only one function: * ``upgrade`` - The function to run when ``ironic-inspector-dbsync upgrade`` is run, and should be populated with code to bring the database up to its new state from the state it was in after the last migration. For further information on creating a migration, refer to `Create a Migration Script`_ from the alembic documentation. Autogenerate ------------ This is the simplest way to create a migration. Alembic will compare the models to an up to date database, and then attempt to write a migration based on the differences. This should generate correct migrations in most cases however there are some cases when it can not detect some changes and may require manual modification, see `What does Autogenerate Detect (and what does it not detect?)`_ from the alembic documentation. :: ironic-inspector-dbsync upgrade ironic-inspector-dbsync revision -m "A short description" --autogenerate Manual ------ This will generate an empty migration file, with the correct revision information already included. However the upgrade function is left empty and must be manually populated in order to perform the correct actions on the database:: ironic-inspector-dbsync revision -m "A short description" .. _Create a Migration Script: http://alembic.zzzcomputing.com/en/latest/tutorial.html#create-a-migration-script .. _ironic_inspector.db: https://docs.openstack.org/ironic-inspector/latest/api/ironic_inspector.db.html .. _What does Autogenerate Detect (and what does it not detect?): http://alembic.zzzcomputing.com/en/latest/autogenerate.html#what-does-autogenerate-detect-and-what-does-it-not-detect Implementing PXE Filter Drivers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Background ---------- **inspector** in-band introspection PXE-boots the Ironic Python Agent "live" image, to inspect the baremetal server. **ironic** also PXE-boots IPA to perform tasks on a node, such as deploying an image. **ironic** uses **neutron** to provide DHCP, however **neutron** does not provide DHCP for unknown MAC addresses so **inspector** has to use its own DHCP/TFTP stack for discovery and inspection. When **ironic** and **inspector** are operating in the same L2 network, there is a potential for the two DHCPs to race, which could result in a node being deployed by **ironic** being PXE booted by **inspector**. To prevent DHCP races between the **inspector** DHCP and **ironic** DHCP, **inspector** has to be able to filter which nodes can get a DHCP lease from the **inspector** DHCP server. These filters can then be used to prevent node's enrolled in **ironic** inventory from being PXE-booted unless they are explicitly moved into the ``inspected`` state. Filter Interface ---------------- .. py:currentmodule:: ironic_inspector.pxe_filter.interface The contract between **inspector** and a PXE filter driver is described in the :class:`FilterDriver` interface. The methods a driver has to implement are: * :meth:`~FilterDriver.init_filter` called on the service start to initialize internal driver state * :meth:`~FilterDriver.sync` called both periodically and when a node starts or finishes introspection to white or blacklist its ports MAC addresses in the driver * :meth:`~FilterDriver.tear_down_filter` called on service exit to reset the internal driver state .. py:currentmodule:: ironic_inspector.pxe_filter.base The driver-specific configuration is suggested to be parsed during instantiation. There's also a convenience generic interface implementation :class:`BaseFilter` that provides base locking and initialization implementation. If required, a driver can opt-out from the periodic synchronization by overriding the :meth:`~BaseFilter.get_periodic_sync_task`. ironic-inspector-7.2.0/tox.ini0000666000175100017510000000455213241323475016362 0ustar zuulzuul00000000000000[tox] envlist = py3,py27,pep8,functional [testenv] usedevelop = True install_command = pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/queens} {opts} {packages} deps = -r{toxinidir}/test-requirements.txt commands = ostestr {posargs} setenv = VIRTUAL_ENV={envdir} PYTHONDONTWRITEBYTECODE=1 TZ=UTC TESTS_DIR=./ironic_inspector/test/unit/ passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY [testenv:venv] commands = {posargs} [testenv:releasenotes] commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:cover] setenv = {[testenv]setenv} PYTHON=coverage run --branch --omit='*test*' --source ironic_inspector --parallel-mode commands = coverage erase ostestr {posargs} coverage combine coverage report -m --omit='*test*' --fail-under 90 coverage html -d ./cover --omit='*test*' [testenv:pep8] basepython = python2.7 commands = flake8 ironic_inspector doc8 README.rst CONTRIBUTING.rst doc/source [testenv:functional] basepython = python2.7 commands = python -m ironic_inspector.test.functional [testenv:functional-py35] basepython = python3 commands = python3 -m ironic_inspector.test.functional [testenv:genconfig] envdir = {toxworkdir}/venv commands = oslo-config-generator --config-file config-generator.conf [testenv:genpolicy] sitepackages = False envdir = {toxworkdir}/venv commands = oslopolicy-sample-generator --config-file {toxinidir}/policy-generator.conf [testenv:genstates] deps = {[testenv]deps} commands = {toxinidir}/tools/states_to_dot.py -f {toxinidir}/doc/source/images/states.svg --format svg [flake8] max-complexity=15 # [H106] Don't put vim configuration in source files. # [H203] Use assertIs(Not)None to check for None. # [H204] Use assert(Not)Equal to check for equality. # [H205] Use assert(Greater|Less)(Equal) for comparison. # [H904] Delay string interpolations at logging calls. enable-extensions=H106,H203,H204,H205,H904 import-order-style = pep8 application-import-names = ironic_inspector [hacking] import_exceptions = ironicclient.exceptions,ironic_inspector.common.i18n [testenv:docs] setenv = PYTHONHASHSEED=0 sitepackages = False deps = -r{toxinidir}/test-requirements.txt commands = python setup.py build_sphinx ironic-inspector-7.2.0/rootwrap.conf0000666000175100017510000000171113241323457017565 0ustar zuulzuul00000000000000# Configuration for ironic-inspector-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/ironic-inspector/rootwrap.d,/usr/share/ironic-inspector/rootwrap # List of directories to search executables in, in case filters do not # explicitly specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, user0, user1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ironic-inspector-7.2.0/devstack/0000775000175100017510000000000013241324014016631 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/devstack/plugin.sh0000666000175100017510000004512413241323457020505 0ustar zuulzuul00000000000000#!/usr/bin/env bash # This package should be tested under python 3, when the job enables Python 3 enable_python3_package ironic-inspector IRONIC_INSPECTOR_DEBUG=${IRONIC_INSPECTOR_DEBUG:-True} IRONIC_INSPECTOR_DIR=$DEST/ironic-inspector IRONIC_INSPECTOR_DATA_DIR=$DATA_DIR/ironic-inspector IRONIC_INSPECTOR_BIN_DIR=$(get_python_exec_prefix) IRONIC_INSPECTOR_BIN_FILE=$IRONIC_INSPECTOR_BIN_DIR/ironic-inspector IRONIC_INSPECTOR_DBSYNC_BIN_FILE=$IRONIC_INSPECTOR_BIN_DIR/ironic-inspector-dbsync IRONIC_INSPECTOR_CONF_DIR=${IRONIC_INSPECTOR_CONF_DIR:-/etc/ironic-inspector} IRONIC_INSPECTOR_CONF_FILE=$IRONIC_INSPECTOR_CONF_DIR/inspector.conf IRONIC_INSPECTOR_CMD="$IRONIC_INSPECTOR_BIN_FILE --config-file $IRONIC_INSPECTOR_CONF_FILE" IRONIC_INSPECTOR_DHCP_CONF_FILE=$IRONIC_INSPECTOR_CONF_DIR/dnsmasq.conf IRONIC_INSPECTOR_ROOTWRAP_CONF_FILE=$IRONIC_INSPECTOR_CONF_DIR/rootwrap.conf IRONIC_INSPECTOR_ADMIN_USER=${IRONIC_INSPECTOR_ADMIN_USER:-ironic-inspector} IRONIC_INSPECTOR_AUTH_CACHE_DIR=${IRONIC_INSPECTOR_AUTH_CACHE_DIR:-/var/cache/ironic-inspector} IRONIC_INSPECTOR_DHCP_FILTER=${IRONIC_INSPECTOR_DHCP_FILTER:-iptables} if [[ -n ${IRONIC_INSPECTOR_MANAGE_FIREWALL} ]] ; then echo "IRONIC_INSPECTOR_MANAGE_FIREWALL is deprecated." >&2 echo "Please, use IRONIC_INSPECTOR_DHCP_FILTER == noop/iptables/dnsmasq instead." >&2 if [[ "$IRONIC_INSPECTOR_DHCP_FILTER" != "iptables" ]] ; then # both manage firewall and filter driver set together but driver isn't iptables echo "Inconsistent configuration: IRONIC_INSPECTOR_MANAGE_FIREWALL used while" >&2 echo "IRONIC_INSPECTOR_DHCP_FILTER == $IRONIC_INSPECTOR_DHCP_FILTER" >&2 exit 1 fi if [[ $(trueorfalse True IRONIC_INSPECTOR_MANAGE_FIREWALL) == "False" ]] ; then echo "IRONIC_INSPECTOR_MANAGE_FIREWALL == False" >&2 echo "Setting IRONIC_INSPECTOR_DHCP_FILTER=noop" >&2 IRONIC_INSPECTOR_DHCP_FILTER=noop fi fi # dnsmasq dhcp filter configuration # override the default hostsdir so devstack collects the MAC files (/etc) IRONIC_INSPECTOR_DHCP_HOSTSDIR=${IRONIC_INSPECTOR_DHCP_HOSTSDIR:-/etc/ironic-inspector/dhcp-hostsdir} IRONIC_INSPECTOR_DNSMASQ_STOP_COMMAND=${IRONIC_INSPECTOR_DNSMASQ_STOP_COMMAND:-systemctl stop devstack@ironic-inspector-dhcp} IRONIC_INSPECTOR_DNSMASQ_START_COMMAND=${IRONIC_INSPECTOR_DNSMASQ_START_COMMAND:-systemctl start devstack@ironic-inspector-dhcp} IRONIC_INSPECTOR_HOST=$HOST_IP IRONIC_INSPECTOR_PORT=5050 IRONIC_INSPECTOR_URI="http://$IRONIC_INSPECTOR_HOST:$IRONIC_INSPECTOR_PORT" IRONIC_INSPECTOR_BUILD_RAMDISK=$(trueorfalse False IRONIC_INSPECTOR_BUILD_RAMDISK) IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe.vmlinuz} IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz} IRONIC_INSPECTOR_COLLECTORS=${IRONIC_INSPECTOR_COLLECTORS:-default,logs,pci-devices} IRONIC_INSPECTOR_RAMDISK_LOGDIR=${IRONIC_INSPECTOR_RAMDISK_LOGDIR:-$IRONIC_INSPECTOR_DATA_DIR/ramdisk-logs} IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS=${IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS:-True} IRONIC_INSPECTOR_TIMEOUT=${IRONIC_INSPECTOR_TIMEOUT:-600} IRONIC_INSPECTOR_CLEAN_UP_PERIOD=${IRONIC_INSPECTOR_CLEAN_UP_PERIOD:-} # These should not overlap with other ranges/networks IRONIC_INSPECTOR_INTERNAL_IP=${IRONIC_INSPECTOR_INTERNAL_IP:-172.24.42.254} IRONIC_INSPECTOR_INTERNAL_SUBNET_SIZE=${IRONIC_INSPECTOR_INTERNAL_SUBNET_SIZE:-24} IRONIC_INSPECTOR_DHCP_RANGE=${IRONIC_INSPECTOR_DHCP_RANGE:-172.24.42.100,172.24.42.253} IRONIC_INSPECTOR_INTERFACE=${IRONIC_INSPECTOR_INTERFACE:-br-inspector} IRONIC_INSPECTOR_INTERFACE_PHYSICAL=$(trueorfalse False IRONIC_INSPECTOR_INTERFACE_PHYSICAL) IRONIC_INSPECTOR_INTERNAL_URI="http://$IRONIC_INSPECTOR_INTERNAL_IP:$IRONIC_INSPECTOR_PORT" IRONIC_INSPECTOR_INTERNAL_IP_WITH_NET="$IRONIC_INSPECTOR_INTERNAL_IP/$IRONIC_INSPECTOR_INTERNAL_SUBNET_SIZE" # Whether DevStack will be setup for bare metal or VMs IRONIC_IS_HARDWARE=$(trueorfalse False IRONIC_IS_HARDWARE) IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK=${IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK:-""} IRONIC_INSPECTOR_OVS_PORT=${IRONIC_INSPECTOR_OVS_PORT:-brbm-inspector} GITDIR["python-ironic-inspector-client"]=$DEST/python-ironic-inspector-client GITREPO["python-ironic-inspector-client"]=${IRONIC_INSPECTOR_CLIENT_REPO:-${GIT_BASE}/openstack/python-ironic-inspector-client.git} GITBRANCH["python-ironic-inspector-client"]=${IRONIC_INSPECTOR_CLIENT_BRANCH:-master} # This is defined in ironic's devstack plugin. Redefine it just in case, and # insert "inspector" if it's missing. IRONIC_ENABLED_INSPECT_INTERFACES=${IRONIC_ENABLED_INSPECT_INTERFACES:-"inspector,no-inspect"} if [[ "$IRONIC_ENABLED_INSPECT_INTERFACES" != *inspector* ]]; then IRONIC_ENABLED_INSPECT_INTERFACES="inspector,$IRONIC_ENABLED_INSPECT_INTERFACES" fi ### Utilities function mkdir_chown_stack { if [[ ! -d "$1" ]]; then sudo mkdir -p "$1" fi sudo chown $STACK_USER "$1" } function inspector_iniset { local section=$1 local option=$2 shift 2 # value in iniset is at $4; wrapping in quotes iniset "$IRONIC_INSPECTOR_CONF_FILE" $section $option "$*" } ### Install-start-stop function install_inspector { setup_develop $IRONIC_INSPECTOR_DIR } function install_inspector_dhcp { install_package dnsmasq } function install_inspector_client { if use_library_from_git python-ironic-inspector-client; then git_clone_by_name python-ironic-inspector-client setup_dev_lib python-ironic-inspector-client else pip_install_gr python-ironic-inspector-client fi } function start_inspector { run_process ironic-inspector "$IRONIC_INSPECTOR_CMD" } function is_inspector_dhcp_required { [[ "$IRONIC_INSPECTOR_MANAGE_FIREWALL" == "True" ]] || \ [[ "${IRONIC_INSPECTOR_DHCP_FILTER:-iptables}" != "noop" ]] } function start_inspector_dhcp { # NOTE(dtantsur): USE_SYSTEMD requires an absolute path run_process ironic-inspector-dhcp \ "$(which dnsmasq) --conf-file=$IRONIC_INSPECTOR_DHCP_CONF_FILE" \ "" root } function stop_inspector { stop_process ironic-inspector } function stop_inspector_dhcp { stop_process ironic-inspector-dhcp } ### Configuration function prepare_tftp { IRONIC_INSPECTOR_IMAGE_PATH="$TOP_DIR/files/ironic-inspector" IRONIC_INSPECTOR_KERNEL_PATH="$IRONIC_INSPECTOR_IMAGE_PATH.kernel" IRONIC_INSPECTOR_INITRAMFS_PATH="$IRONIC_INSPECTOR_IMAGE_PATH.initramfs" IRONIC_INSPECTOR_CALLBACK_URI="$IRONIC_INSPECTOR_INTERNAL_URI/v1/continue" IRONIC_INSPECTOR_KERNEL_CMDLINE="ipa-inspection-callback-url=$IRONIC_INSPECTOR_CALLBACK_URI systemd.journald.forward_to_console=yes" IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE vga=normal console=tty0 console=ttyS0" IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-inspection-collectors=$IRONIC_INSPECTOR_COLLECTORS" IRONIC_INSPECTOR_KERNEL_CMDLINE="$IRONIC_INSPECTOR_KERNEL_CMDLINE ipa-debug=1" if [[ "$IRONIC_INSPECTOR_BUILD_RAMDISK" == "True" ]]; then if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then build_ipa_ramdisk "$IRONIC_INSPECTOR_KERNEL_PATH" "$IRONIC_INSPECTOR_INITRAMFS_PATH" fi else # download the agent image tarball if [ ! -e "$IRONIC_INSPECTOR_KERNEL_PATH" -o ! -e "$IRONIC_INSPECTOR_INITRAMFS_PATH" ]; then if [ -e "$IRONIC_DEPLOY_KERNEL" -a -e "$IRONIC_DEPLOY_RAMDISK" ]; then cp $IRONIC_DEPLOY_KERNEL $IRONIC_INSPECTOR_KERNEL_PATH cp $IRONIC_DEPLOY_RAMDISK $IRONIC_INSPECTOR_INITRAMFS_PATH else wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_INSPECTOR_KERNEL_PATH wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_INSPECTOR_INITRAMFS_PATH fi fi fi if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then cp $IRONIC_INSPECTOR_KERNEL_PATH $IRONIC_HTTP_DIR/ironic-inspector.kernel cp $IRONIC_INSPECTOR_INITRAMFS_PATH $IRONIC_HTTP_DIR cat > "$IRONIC_HTTP_DIR/ironic-inspector.ipxe" < "$IRONIC_TFTPBOOT_DIR/pxelinux.cfg/default" <> $IRONIC_INSPECTOR_DHCP_CONF_FILE } function _dnsmasq_rootwrap_ctl_tail { # cut off the command head and amend white-spaces with commas shift local bits=$* echo ${bits//\ /, } } function configure_inspector_dnsmasq_rootwrap { # turn the ctl commands into filter rules and dump the roorwrap file local stop_cmd=( $IRONIC_INSPECTOR_DNSMASQ_STOP_COMMAND ) local start_cmd=( $IRONIC_INSPECTOR_DNSMASQ_START_COMMAND ) local stop_cmd_tail=$( _dnsmasq_rootwrap_ctl_tail ${stop_cmd[@]} ) local start_cmd_tail=$( _dnsmasq_rootwrap_ctl_tail ${start_cmd[@]} ) cat > "$IRONIC_INSPECTOR_CONF_DIR/rootwrap.d/ironic-inspector-dnsmasq.filters" <$tempfile chmod 0640 $tempfile sudo chown root:root $tempfile sudo mv $tempfile /etc/sudoers.d/ironic-inspector-rootwrap inspector_iniset DEFAULT rootwrap_config $IRONIC_INSPECTOR_ROOTWRAP_CONF_FILE mkdir_chown_stack "$IRONIC_INSPECTOR_RAMDISK_LOGDIR" inspector_iniset processing ramdisk_logs_dir "$IRONIC_INSPECTOR_RAMDISK_LOGDIR" inspector_iniset processing always_store_ramdisk_logs "$IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS" if [ -n "$IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK" ]; then inspector_iniset processing node_not_found_hook "$IRONIC_INSPECTOR_NODE_NOT_FOUND_HOOK" fi inspector_iniset DEFAULT timeout $IRONIC_INSPECTOR_TIMEOUT if [ -n "$IRONIC_INSPECTOR_CLEAN_UP_PERIOD" ]; then inspector_iniset DEFAULT clean_up_period "$IRONIC_INSPECTOR_CLEAN_UP_PERIOD" fi get_or_create_service "ironic-inspector" "baremetal-introspection" "Ironic Inspector baremetal introspection service" get_or_create_endpoint "baremetal-introspection" "$REGION_NAME" \ "$IRONIC_INSPECTOR_URI" "$IRONIC_INSPECTOR_URI" "$IRONIC_INSPECTOR_URI" if is_dnsmasq_filter_required ; then configure_inspector_dnsmasq_rootwrap configure_inspector_pxe_filter_dnsmasq fi } function configure_inspector_swift { inspector_configure_auth_for swift inspector_iniset processing store_data swift } function configure_inspector_dhcp { mkdir_chown_stack "$IRONIC_INSPECTOR_CONF_DIR" if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then cat > "$IRONIC_INSPECTOR_DHCP_CONF_FILE" < "$IRONIC_INSPECTOR_DHCP_CONF_FILE" <, which is install_inspector in our case: # https://github.com/openstack-dev/devstack/blob/dec121114c3ea6f9e515a452700e5015d1e34704/lib/stack#L32 stack_install_service inspector if is_inspector_dhcp_required; then stack_install_service inspector_dhcp fi $IRONIC_INSPECTOR_DBSYNC_BIN_FILE --config-file $IRONIC_INSPECTOR_CONF_FILE upgrade # calls upgrade inspector for specific release upgrade_project ironic-inspector $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH start_inspector if is_inspector_dhcp_required; then start_inspector_dhcp fi # Don't succeed unless the services come up ensure_services_started ironic-inspector if is_inspector_dhcp_required; then ensure_services_started dnsmasq fi set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ironic-inspector-7.2.0/devstack/upgrade/settings0000666000175100017510000000024413241323457022057 0ustar zuulzuul00000000000000# Enabling Inspector grenade plug-in # Based on Ironic/devstack/grenade/settings register_project_for_upgrade ironic-inspector register_db_to_save ironic_inspector ironic-inspector-7.2.0/devstack/settings0000666000175100017510000000006613241323457020432 0ustar zuulzuul00000000000000enable_service ironic-inspector ironic-inspector-dhcp ironic-inspector-7.2.0/devstack/example.local.conf0000666000175100017510000000333313241323457022242 0ustar zuulzuul00000000000000[[local|localrc]] # Credentials # Reference: https://docs.openstack.org/devstack/latest/configuration.html ADMIN_PASSWORD=password DATABASE_PASSWORD=$ADMIN_PASSWORD RABBIT_PASSWORD=$ADMIN_PASSWORD SERVICE_PASSWORD=$ADMIN_PASSWORD SERVICE_TOKEN=$ADMIN_PASSWORD SWIFT_HASH=$ADMIN_PASSWORD # Enable Neutron which is required by Ironic and disable nova-network. disable_service n-net n-novnc enable_service neutron q-svc q-agt q-dhcp q-l3 q-meta # Enable Swift for agent_* drivers enable_service s-proxy s-object s-container s-account # Enable Ironic, Ironic Inspector plugins enable_plugin ironic https://github.com/openstack/ironic enable_plugin ironic-inspector https://github.com/openstack/ironic-inspector # Disable services disable_service horizon disable_service heat h-api h-api-cfn h-api-cw h-eng disable_service cinder c-sch c-api c-vol disable_service tempest # Swift temp URL's are required for agent_* drivers. SWIFT_ENABLE_TEMPURLS=True # Create 2 virtual machines to pose as Ironic's baremetal nodes. IRONIC_VM_COUNT=2 IRONIC_VM_SPECS_RAM=1024 IRONIC_VM_SPECS_DISK=10 IRONIC_BAREMETAL_BASIC_OPS=True DEFAULT_INSTANCE_TYPE=baremetal # Enable Ironic drivers. IRONIC_ENABLED_DRIVERS=fake,agent_ipmitool,pxe_ipmitool # This driver should be in the enabled list above. IRONIC_DEPLOY_DRIVER=agent_ipmitool IRONIC_BUILD_DEPLOY_RAMDISK=False IRONIC_INSPECTOR_BUILD_RAMDISK=False VIRT_DRIVER=ironic TEMPEST_ALLOW_TENANT_ISOLATION=False # By default, DevStack creates a 10.0.0.0/24 network for instances. # If this overlaps with the hosts network, you may adjust with the # following. NETWORK_GATEWAY=10.1.0.1 FIXED_RANGE=10.1.0.0/24 # Log all output to files LOGDAYS=1 LOGFILE=$HOME/logs/stack.sh.log IRONIC_VM_LOG_DIR=$HOME/ironic-bm-logs ironic-inspector-7.2.0/releasenotes/0000775000175100017510000000000013241324014017516 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/notes/0000775000175100017510000000000013241324014020646 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/notes/fix-mysql-6b79049fe96edae4.yaml0000666000175100017510000000032513241323457026016 0ustar zuulzuul00000000000000--- critical: - | Fixed several issues with MySQL database support: * https://bugs.launchpad.net/bugs/1501746 * https://bugs.launchpad.net/bugs/1506160 * https://bugs.launchpad.net/bugs/1501746 ironic-inspector-7.2.0/releasenotes/notes/pgsql-imperative-enum-dda76f150a205d0a.yaml0000666000175100017510000000041613241323457030256 0ustar zuulzuul00000000000000--- fixes: - | For postgreSQL, the database migration command ``ironic-inspector-dbsync upgrade`` always failed (with `enum NODE_STATE does not exist `_). This is fixed and the migration now works. ironic-inspector-7.2.0/releasenotes/notes/deprecate-setting-ipmi-creds-1581ddc63b273811.yaml0000666000175100017510000000100613241323457031254 0ustar zuulzuul00000000000000--- deprecations: - | Support for setting IPMI credentials via ironic-inspector is deprecated and will be removed completely in Pike. A new API version 1.9 was introduced with this feature de-activated. For reasoning see https://bugs.launchpad.net/ironic-python-agent/+bug/1654318. other: - | Default API version is temporary pinned to 1.8 (before deprecating setting IPMI credentials). It will be reset to the latest version again when support for setting IPMI credentials is removed. ironic-inspector-7.2.0/releasenotes/notes/policy-engine-c44828e3131e6c62.yaml0000666000175100017510000000336213241323457026371 0ustar zuulzuul00000000000000--- features: - | Adds an API access policy enforcment based on **oslo.policy** rules. Similar to other OpenStack services, operators now can configure fine-grained access policies using ``policy.yaml`` file. See `policy.yaml.sample`_ in the code tree for the list of available policies and their default rules. This file can also be generated from the code tree with the following command:: tox -egenpolicy See the `oslo.policy package documentation`_ for more information on using and configuring API access policies. .. _policy.yaml.sample: https://git.openstack.org/cgit/openstack/ironic-inspector/plain/policy.yaml.sample .. _oslo.policy package documentation: https://docs.openstack.org/oslo.policy/latest/ upgrade: - | Due to the choice of default values for API access policies rules, some API parts of the **ironic-inspector** service will become available to wider range of users after upgrade: - general access to the whole API is by default granted to a user with either ``admin``, ``administrator`` or ``baremetal_admin`` role (previously it allowed access only to a user with ``admin`` role) - listing of current introspection statuses and showing a given introspection is by default also allowed to a user with the ``baremetal_observer`` role If these access policies are not appropriate for your deployment, override them in a ``policy.json`` file in the **ironic-inspector** configuration directory (usually ``/etc/ironic-inspector``). See the `oslo.policy package documentation`_ for more information on using and configuring API access policies. .. _oslo.policy package documentation: https://docs.openstack.org/oslo.policy/latest/ ironic-inspector-7.2.0/releasenotes/notes/logs-collector-logging-356e56cd70a04a2b.yaml0000666000175100017510000000007413241323457030326 0ustar zuulzuul00000000000000--- other: - Improve logging for ramdisk logs collection. ironic-inspector-7.2.0/releasenotes/notes/missing-pxe-mac-d9329dab85513460.yaml0000666000175100017510000000015413241323457026626 0ustar zuulzuul00000000000000--- fixes: - Log a warning when add_ports is set to pxe, but no PXE MAC is returned from the ramdisk. ironic-inspector-7.2.0/releasenotes/notes/enroll-hook-d8c32eba70848210.yaml0000666000175100017510000000031313241323457026123 0ustar zuulzuul00000000000000--- upgrade: - Switch required Ironic API version to '1.11', which supports 'enroll' state. features: - Add a new node_not_found hook - enroll, which allows automatically discover Ironic's node. ironic-inspector-7.2.0/releasenotes/notes/no-fail-on-power-off-enroll-node-e40854f6def397b8.yaml0000666000175100017510000000022113241323457032055 0ustar zuulzuul00000000000000--- fixes: - Don't fail on finish power off if node in 'enroll' state. Nodes in 'enroll' state are not expected to have power credentials. ironic-inspector-7.2.0/releasenotes/notes/optional-root-disk-9b972f504b2e6262.yaml0000666000175100017510000000020213241323457027360 0ustar zuulzuul00000000000000--- features: - | Avoid failing introspection on diskless nodes. The node property ``local_gb == 0`` is set in that case. ironic-inspector-7.2.0/releasenotes/notes/pci_devices-plugin-5b93196e0e973155.yaml0000666000175100017510000000040013241323457027321 0ustar zuulzuul00000000000000--- features: - Adds new processing hook pci_devices for setting node capabilities based on PCI devices present on a node and rules in the [pci_devices] aliases configuration option. Requires "pci-devices" collector to be enabled in IPA. ironic-inspector-7.2.0/releasenotes/notes/dnsmasq-pxe-filter-37928d3fdb1e8ec3.yaml0000666000175100017510000000037213241323457027601 0ustar zuulzuul00000000000000--- features: - | Introduces the **dnsmasq** PXE filter driver. This driver takes advantage of the ``inotify`` facility to reconfigure the **dnsmasq** service in real time to implement a caching black-/white-list of port MAC addresses. ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000ironic-inspector-7.2.0/releasenotes/notes/introspection-delay-drivers-deprecation-1d0c25b112fbd4da.yamlironic-inspector-7.2.0/releasenotes/notes/introspection-delay-drivers-deprecation-1d0c25b112fbd4da.y0000666000175100017510000000055313241323457033351 0ustar zuulzuul00000000000000--- upgrade: - | The default value for the configuration option "introspection_delay_drivers" was changed to ``.*``, which means that by default "introspection_delay" is now applied to all drivers. Set "introspection_delay" to 0 to disable the delay. deprecations: - | The configuration option "introspection_delay_drivers" is deprecated. ironic-inspector-7.2.0/releasenotes/notes/add_node-with-version_id-24f51e5888480aa0.yaml0000666000175100017510000000051413241323457030472 0ustar zuulzuul00000000000000--- fixes: - | A ``version_id`` is now explicitly generated during the ``node_cache.start_introspection/.add_node`` call to avoid race conditions such as in case of the `two concurrent introspection calls bug`_. .. _two concurrent introspection calls bug: https://bugs.launchpad.net/ironic-inspector/+bug/1719627 ironic-inspector-7.2.0/releasenotes/notes/port-creation-plugin-c0405ec646b1051d.yaml0000666000175100017510000000046413241323457027752 0ustar zuulzuul00000000000000--- upgrade: - | Ports creating logic was moved from core processing code to the ``validate_interfaces`` processing hook. This may affect deployments that disable this hook or replace it with something else. Also make sure to place this hook before any hooks expecting ports to be created. ironic-inspector-7.2.0/releasenotes/notes/remove-deprecated-conf-opts-361ab0bb342f0e7e.yaml0000666000175100017510000000027013241323457031324 0ustar zuulzuul00000000000000--- upgrade: - | Removes deprecated configuration options: ``introspection_delay_drivers`` from the default section and ``log_bmc_address`` from the ``processing`` section. ironic-inspector-7.2.0/releasenotes/notes/fix-CalledProcessError-on-startup-28d9dbed85a81542.yaml0000666000175100017510000000052413241323457032427 0ustar zuulzuul00000000000000--- fixes: - | Exception CalledProcessError is raised when running `iptables` cmd on start up. The issue is caused by eventlet bug, see: https://github.com/eventlet/eventlet/issues/357 The issue affects *ironic-inspector* only if it manages firewall - configured with ``manage_firewall = True`` configuration option. ironic-inspector-7.2.0/releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml0000666000175100017510000000100113241323457031327 0ustar zuulzuul00000000000000--- fixes: - The ramdisk logs are now stored on all preprocessing errors, not only ones reported by the ramdisk itself. This required moving the ramdisk logs handling from the "ramdisk_error" plugin to the generic processing code. upgrade: - Handling ramdisk logs was moved out of the "ramdisk_error" plugin, so disabling it will no longer disable handling ramdisk logs. As before, you can set "ramdisk_logs_dir" option to an empty value (the default) to disable storing ramdisk logs. ironic-inspector-7.2.0/releasenotes/notes/names-82d9f84153a228ec.yaml0000666000175100017510000000031713241323457025016 0ustar zuulzuul00000000000000--- features: - Add support for using Ironic node names in API instead of UUIDs. Note that using node names in the introspection status API will require a call to Ironic to be made by the service. ironic-inspector-7.2.0/releasenotes/notes/add-lldp-basic-plugin-98aebcf43e60931b.yaml0000666000175100017510000000017213241323457030102 0ustar zuulzuul00000000000000--- features: - Add a plugin to parse raw LLDP Basic Management, 802.1, and 802.3 TLVs and store the data in Swift. ironic-inspector-7.2.0/releasenotes/notes/fix-llc-switch-id-not-mac-e2de3adc0945ee70.yaml0000666000175100017510000000066013241323457030706 0ustar zuulzuul00000000000000--- fixes: - | Fixes bug in which the ``switch_id`` field in a port's ``local_link_connection`` can be set to a non-MAC address if the processed LLDP has a value other than a MAC address for ``ChassisID``. The bare metal API requires the ``switch_id`` field to be a MAC address, and will return an error otherwise. See `bug 1748022 `_ for details. ironic-inspector-7.2.0/releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml0000666000175100017510000000046513241323457030355 0ustar zuulzuul00000000000000--- features: - File name for stored ramdisk logs can now be customized via "ramdisk_logs_filename_format" option. upgrade: - The default file name for stored ramdisk logs was change to contain only node UUID (if known) and the current date time. A proper ".tar.gz" extension is now appended. ironic-inspector-7.2.0/releasenotes/notes/add-lldp-plugin-dependency-c323412654f71b3e.yaml0000666000175100017510000000024613241323457030707 0ustar zuulzuul00000000000000--- features: - | Add a check from the ``link_local_connection`` plugin to use data stored by the ``lldp_basic``; this avoids parsing the LLDP packets twice. ironic-inspector-7.2.0/releasenotes/notes/processing-logging-e2d27bbac95a7213.yaml0000666000175100017510000000047513241323457027641 0ustar zuulzuul00000000000000--- other: - Logging during processing is now more consistent in terms of how it identifies the node. Now we try to prefix the log message with node UUID, BMC address and PXE MAC address (if available). Logging BMC addresses can be disabled via new "log_bmc_address" option in the "processing" section. ironic-inspector-7.2.0/releasenotes/notes/ironic-lib-hints-20412a1c7fa796e0.yaml0000666000175100017510000000041713241323457027046 0ustar zuulzuul00000000000000--- features: - Adds support for using operators with the root device hints mechanism. The supported operators are ``=``, ``==``, ``!=``, ``>=``, ``<=``, ``>``, ``<``, ``s==``, ``s!=``, ``s>=``, ``s>``, ``s<=``, ``s<``, ````, ```` and ````. ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000ironic-inspector-7.2.0/releasenotes/notes/add-node-state-to-introspection-api-response-85fb7f4e72ae386a.yamlironic-inspector-7.2.0/releasenotes/notes/add-node-state-to-introspection-api-response-85fb7f4e72ae30000666000175100017510000000021413241323457033306 0ustar zuulzuul00000000000000--- features: - Adds node state to the ``GET /v1/introspection/`` and ``GET /v1/introspection`` API response data. ironic-inspector-7.2.0/releasenotes/notes/Reapply_update_started_at-8af8cf254cdf8cde.yaml0000666000175100017510000000023313241323457031465 0ustar zuulzuul00000000000000--- fixes: - The POST /v1/introspection//data/unprocessed API updates the started_at time when ironic inspector begins processing the node. ironic-inspector-7.2.0/releasenotes/notes/tempest_plugin_removal-91a01f5950f543e1.yaml0000666000175100017510000000067013241323457030407 0ustar zuulzuul00000000000000--- other: - | The tempest plugin code that was in ``ironic_inspector/test/inspector_tempest_plugin/`` has been removed. Tempest plugin code has been migrated to the project `openstack/ironic-tempest-plugin `_. This was an OpenStack wide `goal for the Queens cycle `_. ironic-inspector-7.2.0/releasenotes/notes/no-logs-stored-data-6db52934c7f9a91a.yaml0000666000175100017510000000014613241323457027557 0ustar zuulzuul00000000000000--- upgrade: - Ramdisk logs are no longer part of data stored to Swift and returned by the API. ironic-inspector-7.2.0/releasenotes/notes/ksadapters-abc9edc63cafa405.yaml0000666000175100017510000000222613241323457026416 0ustar zuulzuul00000000000000--- deprecations: - | Several configuration options related to ironic API access are deprecated and will be removed in the Rocky release. These include: - ``[ironic]/os_region`` - use ``[ironic]/region_name`` option instead - ``[ironic]/auth_strategy`` - set ``[ironic]/auth_type`` option to ``none`` to access ironic API in noauth mode - ``[ironic]/ironic_url`` - use ``[ironic]/endpoint_override`` option to set specific ironic API endpoint address if discovery of ironic API endpoint is not desired or impossible (for example in standalone mode) - ``[ironic]/os_service_type`` - use ``[ironic]/service_type`` option - ``[ironic]/os_endpoint_type`` - use ``[ironic]/valid_interfaces`` option to set ironic endpoint types that will be attempted to be used - | Several configuration options related to swift API access are deprecated and will be removed in Rocky release. These include: - ``[swift]/os_service_type`` - use ``[swift]/service_type`` option - ``[swift]/os_endpoint_type`` - use ``[swift]/valid_interfaces`` option - ``[swift]/os_region`` - use ``[swift]region_name`` option ironic-inspector-7.2.0/releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml0000666000175100017510000000014313241323457027146 0ustar zuulzuul00000000000000--- fixes: - Fixed "/v1/continue" to return HTTP 500 on unexpected exceptions, not HTTP 400. ironic-inspector-7.2.0/releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml0000666000175100017510000000021113241323457027063 0ustar zuulzuul00000000000000--- fixes: - Fixed a regression in the firewall code, which causes re-running introspection for an already inspected node to fail. ironic-inspector-7.2.0/releasenotes/notes/remove-policy-json-b4746d64c1511023.yaml0000666000175100017510000000074013241323457027272 0ustar zuulzuul00000000000000--- other: - | The sample configuration file located at ``example.conf`` and the sample policy file located at ``policy.yaml.sample`` were removed in this release, as they are now published with documentation. See `the sample configuration file `_ and `the sample policy file `_. ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000ironic-inspector-7.2.0/releasenotes/notes/Inspector_rules_API_does_not_return_all_attributes-98a9765726c405d5.yamlironic-inspector-7.2.0/releasenotes/notes/Inspector_rules_API_does_not_return_all_attributes-98a97650000666000175100017510000000024213241323457033775 0ustar zuulzuul00000000000000--- features: - | Querying **ironic-inspector** rules API now also returns the ``invert`` and ``multiple`` attributes of the associated conditions. ironic-inspector-7.2.0/releasenotes/notes/set-node-to-error-when-swift-failure-3e919ecbf9db6401.yaml0000666000175100017510000000012313241323457033047 0ustar zuulzuul00000000000000fixes: - Set the node to the error state when it failed get data from swift. ironic-inspector-7.2.0/releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml0000666000175100017510000000027113241323457033235 0ustar zuulzuul00000000000000--- fixes: - Fix setting non string 'value' field for rule's actions. As non string value is obviously not a formatted value, add the check to avoid AttributeError exception. ironic-inspector-7.2.0/releasenotes/notes/no-rollback-e15bc7fee0134545.yaml0000666000175100017510000000066013241323457026165 0ustar zuulzuul00000000000000--- upgrade: - Introspection rules actions 'set-attribute', 'set-capability' and 'extend-attribute' no longer have the opposite effect on nodes that do not match a rule. fixes: - Dropped rollback actions from 'set-attribute', 'set-capability' and 'extend-attribute' introspection rules actions, as they were confusing, completely undocumented and broke some real world use cases (e.g. setting driver field). ironic-inspector-7.2.0/releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml0000666000175100017510000000025113241323457027172 0ustar zuulzuul00000000000000--- fixes: - Fixes a problem which caused an unhandled TypeError exception to bubble up when inspector was attempting to convert some eDeploy data to integer. ironic-inspector-7.2.0/releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml0000666000175100017510000000012013241323457027111 0ustar zuulzuul00000000000000--- fixes: - Fixed the "is-empty" condition to return True on missing values. ironic-inspector-7.2.0/releasenotes/notes/abort-introspection-ae5cb5a9fbacd2ac.yaml0000666000175100017510000000016313241323457030415 0ustar zuulzuul00000000000000--- features: - Introduced API "POST /v1/introspection//abort" for aborting the introspection process. ironic-inspector-7.2.0/releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml0000666000175100017510000000022613241323457033361 0ustar zuulzuul00000000000000--- features: - Add configuration option `processing.power_off` defaulting to True, which allows to leave nodes powered on after introspection. ironic-inspector-7.2.0/releasenotes/notes/log-info-not-found-cache-error-afbc87e80305ca5c.yaml0000666000175100017510000000033013241323457031724 0ustar zuulzuul00000000000000--- other: - Log level for error when node was not found in Inspector cache was changed from error to info level. It was done because not_found_hook may handle this case, so this wouldn't be error anymore. ironic-inspector-7.2.0/releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml0000666000175100017510000000034713241323457026356 0ustar zuulzuul00000000000000--- prelude: > Starting with this release only ironic-python-agent (IPA) is supported as an introspection ramdisk. upgrade: - Support for the old bash-based ramdisk was removed. Please switch to IPA before upgrading. ironic-inspector-7.2.0/releasenotes/notes/add-disabled-option-to-add-ports-f8c6c9b3e6797652.yaml0000666000175100017510000000020113241323457032041 0ustar zuulzuul00000000000000--- features: - | Add ``disabled`` option to ``add_ports``, so discovered nodes can be created without creating ports. ironic-inspector-7.2.0/releasenotes/notes/db-status-consistency-enhancements-f97fbaccfc81a60b.yaml0000666000175100017510000000075413241323457033204 0ustar zuulzuul00000000000000--- upgrade: - | A new state ``aborting`` was introduced to distinguish between the node introspection abort precondition (being able to perform the state transition from the ``waiting`` state) from the activities necessary to abort an ongoing node introspection (power-off, set finished timestamp etc.) fixes: - | The ``node_info.finished(, error=)`` now updates node state together with other status attributes in a single DB transaction. ironic-inspector-7.2.0/releasenotes/notes/multiattribute_node_lookup-17e219ba8d3e5eb0.yaml0000666000175100017510000000111013241323457031504 0ustar zuulzuul00000000000000--- features: - | Looking up nodes during introspection or discovery now supports multiple attributes matching. For example, two nodes can use the same ``bmc_address`` and still can be distinguished by MAC addresses. upgrade: - | Uniqueness of a node ``bmc_address`` isn't enforced any more. - | The primary key of the ``attributes`` table is relaxed from the ``attributes.name, attributes.value`` column pair to a new column ``attributes.uuid``. fixes: - | Introspection fails on nodes with the same IPMI address but different IPMI ports. ironic-inspector-7.2.0/releasenotes/notes/less-iptables-calls-759e89d103df504c.yaml0000666000175100017510000000011413241323457027553 0ustar zuulzuul00000000000000--- fixes: - Only issue iptables calls when list of active MAC's changes. ironic-inspector-7.2.0/releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml0000666000175100017510000000032113241323457030535 0ustar zuulzuul00000000000000--- features: - Database migrations downgrade was removed. More info about database migration/rollback could be found here http://docs.openstack.org/openstack-ops/content/ops_upgrades-roll-back.html ././@LongLink0000000000000000000000000000015600000000000011217 Lustar 00000000000000ironic-inspector-7.2.0/releasenotes/notes/UUID-started_at-finished_at-in-the-status-API-7860312102923938.yamlironic-inspector-7.2.0/releasenotes/notes/UUID-started_at-finished_at-in-the-status-API-7860312102920000666000175100017510000000034113241323457032314 0ustar zuulzuul00000000000000--- features: - | Extend the introspection status returned from ``GET@/v1/introspection/`` to contain the ``uuid``, ``started_at`` and ``finished_at`` fields. upgrade: - Add a new dependency, ``pytz``. ironic-inspector-7.2.0/releasenotes/notes/googbye-patches-args-071532024b9260bd.yaml0000666000175100017510000000017613241323457027542 0ustar zuulzuul00000000000000--- upgrade: - Removed deprecated support for passing "node_patches" and "ports_patches" arguments to processing hooks. ironic-inspector-7.2.0/releasenotes/notes/contains-matches-ee28958b08995494.yaml0000666000175100017510000000016713241323457027043 0ustar zuulzuul00000000000000--- features: - New condition plugins "contains" and "matches" allow to match value against regular expressions. ironic-inspector-7.2.0/releasenotes/notes/fix_llc_hook_bugs-efeea008c2f792eb.yaml0000666000175100017510000000032513241323457027665 0ustar zuulzuul00000000000000--- fixes: - LLC hook now formats the chassis ID and port ID MAC addresses into Unix format as expected by ironic. - LLC hook ensures that correct port information is passed to the patch_port function ironic-inspector-7.2.0/releasenotes/notes/pxe-enabled-cbc3287ebe3fcd49.yaml0000666000175100017510000000042613241323457026376 0ustar zuulzuul00000000000000--- features: - | Update ``pxe_enabled`` field on ports. It is set to ``True`` for the PXE-booting port and ``False`` for the remaining ports. Both newly discovered and existing ports are affected. upgrade: - | Bare metal API version `1.19` is now required. ironic-inspector-7.2.0/releasenotes/notes/fix_llc_port_assume-4ea47d26501bddc3.yaml0000666000175100017510000000012213241323457030066 0ustar zuulzuul00000000000000--- fixes: - LLC hook no longer assumes all inspected ports are added to ironic ironic-inspector-7.2.0/releasenotes/notes/node-locking-4d135ca5b93524b1.yaml0000666000175100017510000000007713241323457026246 0ustar zuulzuul00000000000000--- fixes: - Acquire a lock on a node UUID when handling it. ironic-inspector-7.2.0/releasenotes/notes/fix-deadlock-during-cleanup-bcb6b517ef299791.yaml0000666000175100017510000000015413241323457031244 0ustar zuulzuul00000000000000--- fixes: - | Fix bug where periodic clean up failed with DBDeadlock if introspection timed out. ironic-inspector-7.2.0/releasenotes/notes/loopback-bmc-e60d64fe74bdf142.yaml0000666000175100017510000000015513241323457026377 0ustar zuulzuul00000000000000--- fixes: - | Loopback BMC addresses (useful e.g. with virtualbmc) are no longer used for lookup. ironic-inspector-7.2.0/releasenotes/notes/flask-debug-6d2dcc2b482324dc.yaml0000666000175100017510000000022413241323457026215 0ustar zuulzuul00000000000000--- security: - Never enable Flask debug mode as it may allow remote code execution. See https://bugs.launchpad.net/bugs/1506419 for details. ironic-inspector-7.2.0/releasenotes/notes/ipa-support-7eea800306829a49.yaml0000666000175100017510000000022413241323457026110 0ustar zuulzuul00000000000000--- other: - IPA (ironic-python-agent) is now fully supported in the devstack plugin and will become the default ramdisk in the next release. ironic-inspector-7.2.0/releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml0000666000175100017510000000013513241323457027640 0ustar zuulzuul00000000000000--- upgrade: - Removed the deprecated "root_device_hint" alias for the "raid_device" hook. ironic-inspector-7.2.0/releasenotes/notes/rollback-removal-a03a989e2e9f776b.yaml0000666000175100017510000000013013241323457027231 0ustar zuulzuul00000000000000--- upgrade: - | Support for rollback actions in introspection rules was removed. ironic-inspector-7.2.0/releasenotes/notes/empty-ipmi-address-5b5ca186a066ed32.yaml0000666000175100017510000000022213241323457027466 0ustar zuulzuul00000000000000--- fixes: - | ``0.0.0.0`` and an empty string in the ``bmc_address`` inventory field are now correctly treated as missing BMC address. ironic-inspector-7.2.0/releasenotes/notes/deprecate-rollback-dea95ac515d3189b.yaml0000666000175100017510000000025613241323457027573 0ustar zuulzuul00000000000000--- deprecations: - The rollback actions for introspection rules are deprecated. No in-tree actions are using them, 3rdpart should stop using them as soon as possible. ironic-inspector-7.2.0/releasenotes/notes/ipa-inventory-0a1e8d644da850ff.yaml0000666000175100017510000000130113241323457026641 0ustar zuulzuul00000000000000--- prelude: > Starting with this release, ironic-python-agent becomes the default introspection ramdisk, with the old bash-based ramdisk being deprecated. features: - Inspector no longer requires old-style "local_gb", "memory_mb", "cpus" and "cpu_arch" fields from the introspection ramdisk. They are still supported, though, for compatibility with the old ramdisk. upgrade: - The root_disk_selection processing hook will now error out if root device hints are specified on ironic node, but ironic-python-agent is not used as an introspection ramdisk. deprecations: - Using old bash-based ramdisk is deprecated, please switch to ironic-python-agent as soon as possible. ironic-inspector-7.2.0/releasenotes/notes/processing-data-type-check-7c914339d3ab15ba.yaml0000666000175100017510000000034113241323457031070 0ustar zuulzuul00000000000000--- fixes: - The data processing API endpoint now validates that data received from the ramdisk is actually a JSON object instead of failing the internal error later (issue https://bugs.launchpad.net/bugs/1525876). ironic-inspector-7.2.0/releasenotes/notes/extra-hardware-swift-aeebf299b9605bb0.yaml0000666000175100017510000000010013241323457030163 0ustar zuulzuul00000000000000--- fixes: - Fixed extra_hardware plugin connection to Swift. ironic-inspector-7.2.0/releasenotes/notes/rules-invert-2585173a11db3c31.yaml0000666000175100017510000000020413241323457026240 0ustar zuulzuul00000000000000--- features: - Introspection rules conditions got a new generic "invert" parameter that inverts the result of the condition. ironic-inspector-7.2.0/releasenotes/notes/hook-deps-83a867c7af0300e4.yaml0000666000175100017510000000032413241323457025571 0ustar zuulzuul00000000000000--- features: - | Processing hooks can now define dependencies on other processing hooks. **ironic-inspector** start up fails when required hooks are not enabled before the hook that requires them. ironic-inspector-7.2.0/releasenotes/notes/fix-crash-when-use-postgresql-ac6c708f48f55c83.yaml0000666000175100017510000000021213241323457031610 0ustar zuulzuul00000000000000--- fixes: - Use only single quotes for strings inside SQL statements. Fixes a crash when PostgreSQL is used as a database backend. ironic-inspector-7.2.0/releasenotes/notes/fix-periodic-tasks-configuration-edd167f0146e60b5.yaml0000666000175100017510000000034013241323457032330 0ustar zuulzuul00000000000000--- fixes: - | Ensure the configuration options ``firewall.firewall_update_period`` and ``clean_up_period`` are applied to the ``periodic_clean_up`` and ``periodic_update`` tasks after the config file is read. ironic-inspector-7.2.0/releasenotes/notes/local_gb-250bd415684a7855.yaml0000666000175100017510000000017713241323457025315 0ustar zuulzuul00000000000000--- upgrade: - | Handling of ``local_gb`` property was moved from the ``scheduler`` hook to ``root_disk_selection``. ironic-inspector-7.2.0/releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml0000666000175100017510000000166313241323457030465 0ustar zuulzuul00000000000000--- features: - Ironic-Inspector is now using keystoneauth and proper auth_plugins instead of keystoneclient for communicating with Ironic and Swift. It allows to finely tune authentification for each service independently. For each service, the keystone session is created and reused, minimizing the number of authentification requests to Keystone. upgrade: - Operators are advised to specify a proper keystoneauth plugin and its appropriate settings in [ironic] and [swift] config sections. Backward compatibility with previous authentification options is included. Using authentification informaiton for Ironic and Swift from [keystone_authtoken] config section is no longer supported. deprecations: - Most of current authentification options for either Ironic or Swift are deprecated and will be removed in a future release. Please configure the keystoneauth auth plugin authentification instead. ironic-inspector-7.2.0/releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml0000666000175100017510000000060513241323457031302 0ustar zuulzuul00000000000000--- upgrade: - API "POST /v1/rules" returns 201 response code instead of 200 on creating success. API version was bumped to 1.6. API less than 1.6 continues to return 200. - Default API version was changed from minimum to maximum which Inspector can support. fixes: - Fix response return code for rule creating endpoint, it returns 201 now instead of 200 on success. ironic-inspector-7.2.0/releasenotes/notes/allow-periodics-shutdown-inspector-ac28ea5ba3224279.yaml0000666000175100017510000000015513241323457032727 0ustar zuulzuul00000000000000--- other: - | Allows a periodic task to shut down an **ironic-inspector** process upon a failure. ironic-inspector-7.2.0/releasenotes/notes/sphinx-docs-4d0a5886261e57bf.yaml0000666000175100017510000000076713241323457026162 0ustar zuulzuul00000000000000--- prelude: > This release includes automatic `docs` generation via Sphinx. other: - | Introduced new docs generation via `Sphinx `_ and `ReST `_. * Separate `doc` folder includes `source` and `build` * Integration with `tox `_ as `docs` target * `makefile` for manual building * `Openstack Theme `_ support ironic-inspector-7.2.0/releasenotes/notes/active_states_timeout-3e3ab110870483ec.yaml0000666000175100017510000000034413241323457030276 0ustar zuulzuul00000000000000--- fixes: - | Timeout in an active state led to an `undefined transition error `_. This is fixed and an introspection finishes now with ``Timeout`` error. ironic-inspector-7.2.0/releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml0000666000175100017510000000020713241323457030635 0ustar zuulzuul00000000000000--- features: - Introduced API "POST /v1/introspection/UUID/data/unprocessed" for reapplying the introspection over stored data. ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000ironic-inspector-7.2.0/releasenotes/notes/change_started_finished_at_type_to_datetime-c5617e598350970c.yamlironic-inspector-7.2.0/releasenotes/notes/change_started_finished_at_type_to_datetime-c5617e598350970000666000175100017510000000055313241323457033325 0ustar zuulzuul00000000000000--- fixes: - | Change database columns ``started_at`` and ``finished_at`` to type DateTime from type Float so that timestamps fit into these columns correctly. upgrade: - | A database migration is required to change some columns from Float to DateTime type. This may take some time based on the number of introspection statuses in DB. ironic-inspector-7.2.0/releasenotes/notes/keystone-noauth-9ba5ad9884c6273c.yaml0000666000175100017510000000031313241323457027130 0ustar zuulzuul00000000000000--- fixes: - | Ironic introspection no longer tries to access the Identity service if the ``auth_strategy`` option is set to ``noauth`` and the ``auth_type`` option is not set to ``none``. ironic-inspector-7.2.0/releasenotes/notes/extend-rules-9a9d38701e970611.yaml0000666000175100017510000000035513241323457026174 0ustar zuulzuul00000000000000--- features: - Conditions now support comparing fields from node info; - Actions support formatting to fetch values from introspection data. See http://docs.openstack.org/developer/ironic-inspector/usage.html#introspection-rulesironic-inspector-7.2.0/releasenotes/notes/patch-head-backslash-24bcdd03ba254bf2.yaml0000666000175100017510000000021513241323457030026 0ustar zuulzuul00000000000000--- fixes: - Introspection rules (e.g. set-attribute action) now accept 'path' field without leading forward slash as Ironic cli does. ironic-inspector-7.2.0/releasenotes/notes/capabilities-15cc2268d661f0a0.yaml0000666000175100017510000000022013241323457026314 0ustar zuulzuul00000000000000--- features: - Added a new "capabilities" processing hook detecting the CPU and boot mode capabilities (the latter disabled by default). ironic-inspector-7.2.0/releasenotes/notes/preprocessing-error-01e55b4db20fb7fc.yaml0000666000175100017510000000022513241323457030122 0ustar zuulzuul00000000000000--- fixes: - Fixed confusing error message shown to user when something bad happens during preprocessing (https://launchpad.net/bugs/1523907). ironic-inspector-7.2.0/releasenotes/notes/bmc-logging-deprecation-4ca046a64fac6f11.yaml0000666000175100017510000000012613241323457030507 0ustar zuulzuul00000000000000--- deprecations: - | The configuration option "log_bmc_address" is deprecated. ironic-inspector-7.2.0/releasenotes/notes/.placeholder0000666000175100017510000000000013241323457023133 0ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/notes/drop-maintenance-a9a87a9a2af051ad.yaml0000666000175100017510000000026213241323457027340 0ustar zuulzuul00000000000000--- upgrade: - Removed support for introspecting nodes in maintenance mode, deprecated in the liberty cycle. Use "inspecting", "manageable" or "enroll" states instead. ironic-inspector-7.2.0/releasenotes/notes/futurist-557fcd18d4eaf1c1.yaml0000666000175100017510000000022413241323457026006 0ustar zuulzuul00000000000000--- upgrade: - Minimum possible value for the "max_concurrency" setting is now 2. other: - Switched to Futurist library for asynchronous tasks. ironic-inspector-7.2.0/releasenotes/notes/status-removal-fa1d9a98ffad9f60.yaml0000666000175100017510000000066713241323457027217 0ustar zuulzuul00000000000000--- upgrade: - | Old status records are no longer removed by default. They are still removed if a node is removed from Ironic. deprecations: - | The ``node_status_keep_time`` configuration option is deprecated. Now that we can remove status information about nodes removed from **ironic**, this option does not make much sense, and `may be confusing `_ ironic-inspector-7.2.0/releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml0000666000175100017510000000042513241323457027124 0ustar zuulzuul00000000000000--- fixes: - The lookup procedure now uses all valid MAC's, not only the MAC(s) that will be used for creating port(s). - The "enroll" node_not_found_hook now uses all valid MAC's to check node existence, not only the MAC(s) that will be used for creating port(s). ironic-inspector-7.2.0/releasenotes/notes/infiniband-support-960d6846e326dec4.yaml0000666000175100017510000000062313241323457027532 0ustar zuulzuul00000000000000--- features: - | InfiniBand interface discovery is now supported through introspection. The ironic-inspector will add the client-id to the corresponding ironic port that represents the InfiniBand interface. The ironic-inspector should be configured with a list of interfaces ``firewall.ethoib_interfaces`` to indicate which Ethernet Over InfiniBand Interfaces are used for DHCP. ironic-inspector-7.2.0/releasenotes/notes/deprecate-root-device-hint-909d389b7efed5da.yaml0000666000175100017510000000014113241323457031252 0ustar zuulzuul00000000000000--- deprecations: - Using the root_device_hint alias for the raid_device plugin is deprecated. ironic-inspector-7.2.0/releasenotes/notes/port-list-retry-745d1cf41780e961.yaml0000666000175100017510000000061213241323457026731 0ustar zuulzuul00000000000000--- fixes: - | The periodic PXE filter update task now retries fetching port list from the Bare Metal service 5 times (with 1 second delay) before giving up. This ensures that a temporary networking glitch will not result in the ironic-inspector service stopping. upgrade: - | Adds dependency on the `retrying `_ python library. ironic-inspector-7.2.0/releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml0000666000175100017510000000015713241323457025757 0ustar zuulzuul00000000000000--- fixes: - The "size" root device hint is now always converted to an integer for consistency with IPA. ironic-inspector-7.2.0/releasenotes/notes/ipmi-credentials-removal-0021f89424fbf7a3.yaml0000666000175100017510000000052713241323457030600 0ustar zuulzuul00000000000000--- upgrade: - | Experimental setting IPMI credentials support was removed from all versions of the API. The current **ironic-inspector** API version was bumped to `1.12` to mark this change. - | The default API version was synchronized with the current API version again after removal of the IPMI credentials setting. ironic-inspector-7.2.0/releasenotes/notes/cors-5f345c65da7f5c99.yaml0000666000175100017510000000104613241323457024753 0ustar zuulzuul00000000000000--- features: - | Added CORS support middleware to Ironic Inspector, allowing a deployer to optionally configure rules under which a javascript client may break the single-origin policy and access the API directly. OpenStack CrossProject Spec: http://specs.openstack.org/openstack/openstack-specs/specs/cors-support.html Oslo_Middleware Docs: http://docs.openstack.org/developer/oslo.middleware/cors.html OpenStack Cloud Admin Guide: http://docs.openstack.org/admin-guide-cloud/cross_project_cors.html ironic-inspector-7.2.0/releasenotes/notes/compact-debug-logging-b15dd9bbdd3ce27a.yaml0000666000175100017510000000023313241323457030411 0ustar zuulzuul00000000000000--- other: - Make debug-level logging more compact by removing newlines from firewall logging and disabling some 3rdparty debug messages by default. ironic-inspector-7.2.0/releasenotes/notes/empty-condition-abc707b771be6be3.yaml0000666000175100017510000000017513241323457027244 0ustar zuulzuul00000000000000--- features: - Added new condition plugin "is-empty", which allows to match empty string, list, dictionary or None. ironic-inspector-7.2.0/releasenotes/notes/rollback-formatting-7d61c9af2600d42f.yaml0000666000175100017510000000040313241323457027716 0ustar zuulzuul00000000000000--- fixes: - | Do not fail the whole introspection due to a value formatting error during introspection rules rollback. See `bug 1686942 `_ for an example and detailed investigation. ironic-inspector-7.2.0/releasenotes/notes/fix-wrong-provision-state-name-150c91c48d471bf9.yaml0000666000175100017510000000105313241323457031707 0ustar zuulzuul00000000000000--- fixes: - | Wrong provision state name 'inspectfail' in *ironic-inspector* valid states for node inspection. This issue leads to state inconsistency between *ironic* and *ironic-inspector*. For example, if *ironic* inspection timeout is lower than *ironic-inspector*'s, and inspection timeout occurs, *ironic* will transition node into 'inspect failed' provision state. In such case when node inspection finishes without errors the node will be in 'inspect failed' provision state with inspection in 'finished' state. ironic-inspector-7.2.0/releasenotes/notes/disable-dhcp-c86a3a0ee2696ee0.yaml0000666000175100017510000000056113241323457026365 0ustar zuulzuul00000000000000--- fixes: - DHCP is now disabled completely when no nodes are on introspection and the "node_not_found_hook" is not set. This reduces probability of serving DHCP to wrong nodes, if their NIC is not registered in Ironic. See https://bugs.launchpad.net/ironic-inspector/+bug/1557979 and https://bugzilla.redhat.com/show_bug.cgi?id=1317695 for details. ironic-inspector-7.2.0/releasenotes/notes/empty-ipmi-address-2-4d57c34aec7d14e2.yaml0000666000175100017510000000056213241323457027720 0ustar zuulzuul00000000000000--- fixes: - | The older ``ipmi_address`` field in the introspection data no longer has priority over the newer ``bmc_address`` inventory field during lookup. This fixes lookup based on MAC addresses, when the BMC address is reported as ``0.0.0.0`` for any reason (see `bug 1714944 `_). ironic-inspector-7.2.0/releasenotes/notes/migrations-autogenerate-4303fd496c3c2757.yaml0000666000175100017510000000007413241323457030466 0ustar zuulzuul00000000000000--- other: - Allow autogeneration of database migrations. ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000ironic-inspector-7.2.0/releasenotes/notes/add-support-for-listing-all-introspection-statuses-2a3d4379c3854894.yamlironic-inspector-7.2.0/releasenotes/notes/add-support-for-listing-all-introspection-statuses-2a3d4370000666000175100017510000000052213241323457033603 0ustar zuulzuul00000000000000--- features: - | Add an API endpoint for listing introspection statuses. Operators can use this to get the status for all running or previously run introspection processing. - | Introduce a new configuration option ``api_max_limit`` that defines the maximum number of items per page when API results are paginated. ironic-inspector-7.2.0/releasenotes/notes/firewall-refactoring-17e8ad764f2cde8d.yaml0000666000175100017510000000156413241323457030252 0ustar zuulzuul00000000000000--- features: - | The PXE filter drivers mechanism is now enabled. The firewall-based filtering was re-implemented as the ``iptables`` PXE filter driver. deprecations: - | The firewall-specific configuration options were moved from the ``firewall`` to the ``iptables`` group. All options in the ``iptables`` group are now deprecated. - | The generic firewall options ``firewall_update_period`` and ``manage_firewall`` were moved under the ``pxe_filter`` group as ``sync_period`` and ``driver=iptables/noop`` respectively. fixes: - | Should the ``iptables`` PXE filter encounter an unexpected exception in the periodic ``sync`` call, the exception will be logged and the filter driver will be reset in order to make subsequent ``sync`` calls fail (and propagate the failure, exiting the **ironic-inspector** process eventually). ironic-inspector-7.2.0/releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml0000666000175100017510000000035313241323457026700 0ustar zuulzuul00000000000000--- features: - Added GenericLocalLinkConnectionHook processing plugin to process LLDP data returned during inspection and set port ID and switch ID in an Ironic node's port local link connection information using that data. ironic-inspector-7.2.0/releasenotes/notes/deprecated-options-removal-ocata-a44dadf3bcf8d6fc.yaml0000666000175100017510000000035013241323457032663 0ustar zuulzuul00000000000000--- upgrade: - | Removed previously deprecated authentication options from "ironic", "swift", and "keystone_authtoken" sections. - | Removed long deprecated support for "discoverd" section in configuration file. ironic-inspector-7.2.0/releasenotes/notes/introspection-state-03538fac198882b6.yaml0000666000175100017510000000137213241323457027655 0ustar zuulzuul00000000000000--- features: - Node introspection state is now kept in a dedicated database column. The introspection is now using a finite state machine. The state isn't exposed to the user yet. issues: - Due to the nature of the NodeInfo.state attribute (being updated independently from the rest of the node_info attributes) if a (DB) connection was lost before the Node.state column was updated, Node.finished_at and Node.error columns may not be in sync with the Node.state column. upgrade: - Node.state and Node.version_id database columns are introduced. - The introspection state column defaults to the state ``finished`` unless the introspection error column value on a node row isn't null, then node state is set to ``error``. ironic-inspector-7.2.0/releasenotes/source/0000775000175100017510000000000013241324014021016 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/conf.py0000666000175100017510000002221313241323457022331 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Ironic Inspector Release Notes documentation build configuration file, # created by sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'reno.sphinxext', ] try: import openstackdocstheme extensions.append('openstackdocstheme') except ImportError: openstackdocstheme = None repository_name = 'openstack/ironic-inspector' bug_project = 'ironic-inspector' bug_tag = '' html_last_updated_fmt = '%Y-%m-%d %H:%M' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Ironic Inspector Release Notes' copyright = u'2015, Ironic Inspector Developers' # Release notes are version independent. # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. if openstackdocstheme is not None: html_theme = 'openstackdocs' else: html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'IronicInspectorReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'IronicInspectorReleaseNotes.tex', u'Ironic Inspector Release Notes Documentation', u'Ironic Inspector Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'ironicinspectorreleasenotes', u'Ironic Inspector Release Notes Documentation', [u'Ironic Inspector Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'IronicInspectorReleaseNotes', u'Ironic Inspector Release Notes Documentation', u'Ironic Inspector Developers', 'IronicInspectorReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ironic-inspector-7.2.0/releasenotes/source/_static/0000775000175100017510000000000013241324014022444 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/_static/.placeholder0000666000175100017510000000000013241323457024731 0ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/liberty.rst0000666000175100017510000000027513241323457023242 0ustar zuulzuul00000000000000============================================ Liberty Series (2.0.0 - 2.2.7) Release Notes ============================================ .. release-notes:: :branch: origin/stable/liberty ironic-inspector-7.2.0/releasenotes/source/unreleased.rst0000666000175100017510000000015313241323457023712 0ustar zuulzuul00000000000000============================ Current Series Release Notes ============================ .. release-notes:: ironic-inspector-7.2.0/releasenotes/source/mitaka.rst0000666000175100017510000000027113241323457023032 0ustar zuulzuul00000000000000=========================================== Mitaka Series (2.3.0 - 3.2.x) Release Notes =========================================== .. release-notes:: :branch: origin/stable/mitaka ironic-inspector-7.2.0/releasenotes/source/pike.rst0000666000175100017510000000025213241323457022513 0ustar zuulzuul00000000000000========================================= Pike Series (6.0.0 - 6.0.x) Release Notes ========================================= .. release-notes:: :branch: stable/pike ironic-inspector-7.2.0/releasenotes/source/index.rst0000666000175100017510000000030413241323457022670 0ustar zuulzuul00000000000000============================== Ironic Inspector Release Notes ============================== .. toctree:: :maxdepth: 1 unreleased queens pike ocata newton mitaka liberty ironic-inspector-7.2.0/releasenotes/source/queens.rst0000666000175100017510000000026513241323457023067 0ustar zuulzuul00000000000000============================================ Queens Series (6.1.0 - 7.1.x) Release Notes ============================================ .. release-notes:: :branch: stable/queens ironic-inspector-7.2.0/releasenotes/source/locale/0000775000175100017510000000000013241324014022255 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/locale/fr/0000775000175100017510000000000013241324014022664 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175100017510000000000013241324014024451 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000666000175100017510000000463013241323457027521 0ustar zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Ironic Inspector Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-02-03 16:43+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 05:07+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "2.2.3" msgstr "2.2.3" msgid "2.2.4" msgstr "2.2.4" msgid "2.2.5" msgstr "2.2.5" msgid "2.2.6" msgstr "2.2.6" msgid "2.3.0" msgstr "2.3.0" msgid "3.0.0" msgstr "3.0.0" msgid "3.1.0" msgstr "3.1.0" msgid "3.2.0" msgstr "3.2.0" msgid "3.2.1" msgstr "3.2.1" msgid "3.2.2" msgstr "3.2.2" msgid "3.2.2-7" msgstr "3.2.2-7" msgid "3.3.0" msgstr "3.3.0" msgid "4.0.0" msgstr "4.0.0" msgid "4.1.0" msgstr "4.1.0" msgid "4.2.0" msgstr "4.2.0" msgid "Bug Fixes" msgstr "Corrections de bugs" msgid "Critical Issues" msgstr "Erreurs critiques" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Ironic Inspector Release Notes" msgstr "Note de release d'Ironic Inspector" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Other Notes" msgstr "Autres notes" msgid "Security Issues" msgstr "Problèmes de sécurités" msgid "Start using reno to manage release notes." msgstr "Commence à utiliser reno pour la gestion des notes de release" msgid "Upgrade Notes" msgstr "Notes de mises à jours" msgid "`Openstack Theme `_ support" msgstr "`Openstack Theme `_ support" msgid "http://docs.openstack.org/admin-guide-cloud/cross_project_cors.html" msgstr "http://docs.openstack.org/admin-guide-cloud/cross_project_cors.html" msgid "http://docs.openstack.org/developer/oslo.middleware/cors.html" msgstr "http://docs.openstack.org/developer/oslo.middleware/cors.html" msgid "" "http://specs.openstack.org/openstack/openstack-specs/specs/cors-support.html" msgstr "" "http://specs.openstack.org/openstack/openstack-specs/specs/cors-support.html" msgid "https://bugs.launchpad.net/bugs/1501746" msgstr "https://bugs.launchpad.net/bugs/1501746" msgid "https://bugs.launchpad.net/bugs/1506160" msgstr "https://bugs.launchpad.net/bugs/1506160" ironic-inspector-7.2.0/releasenotes/source/locale/en_GB/0000775000175100017510000000000013241324014023227 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000013241324014025014 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000666000175100017510000015244713241323457030076 0ustar zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: Ironic Inspector Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-02-05 19:57+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-02-06 01:21+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en-GB\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "2.2.3" msgstr "2.2.3" msgid "2.2.4" msgstr "2.2.4" msgid "2.2.5" msgstr "2.2.5" msgid "2.2.6" msgstr "2.2.6" msgid "2.3.0" msgstr "2.3.0" msgid "3.0.0" msgstr "3.0.0" msgid "3.1.0" msgstr "3.1.0" msgid "3.2.0" msgstr "3.2.0" msgid "3.2.1" msgstr "3.2.1" msgid "3.2.2" msgstr "3.2.2" msgid "3.2.2-7" msgstr "3.2.2-7" msgid "3.3.0" msgstr "3.3.0" msgid "4.0.0" msgstr "4.0.0" msgid "4.1.0" msgstr "4.1.0" msgid "4.2.0" msgstr "4.2.0" msgid "4.2.1" msgstr "4.2.1" msgid "4.2.2" msgstr "4.2.2" msgid "5.0.0" msgstr "5.0.0" msgid "5.0.1" msgstr "5.0.1" msgid "5.1.0" msgstr "5.1.0" msgid "6.0.0" msgstr "6.0.0" msgid "6.0.0-10" msgstr "6.0.0-10" msgid "6.1.0" msgstr "6.1.0" msgid "7.0.0" msgstr "7.0.0" msgid "7.0.0-16" msgstr "7.0.0-16" msgid "" "A ``version_id`` is now explicitly generated during the ``node_cache." "start_introspection/.add_node`` call to avoid race conditions such as in " "case of the `two concurrent introspection calls bug`_." msgstr "" "A ``version_id`` is now explicitly generated during the ``node_cache." "start_introspection/.add_node`` call to avoid race conditions such as in " "case of the `two concurrent introspection calls bug`_." msgid "" "A database migration is required to change some columns from Float to " "DateTime type. This may take some time based on the number of introspection " "statuses in DB." msgstr "" "A database migration is required to change some columns from Float to " "DateTime type. This may take some time based on the number of introspection " "statuses in the DB." msgid "" "A new state ``aborting`` was introduced to distinguish between the node " "introspection abort precondition (being able to perform the state transition " "from the ``waiting`` state) from the activities necessary to abort an " "ongoing node introspection (power-off, set finished timestamp etc.)" msgstr "" "A new state ``aborting`` was introduced to distinguish between the node " "introspection abort precondition (being able to perform the state transition " "from the ``waiting`` state) from the activities necessary to abort an " "ongoing node introspection (power-off, set finished timestamp etc.)" msgid "" "API \"POST /v1/rules\" returns 201 response code instead of 200 on creating " "success. API version was bumped to 1.6. API less than 1.6 continues to " "return 200." msgstr "" "API \"POST /v1/rules\" returns 201 response code instead of 200 on creating " "success. API version was bumped to 1.6. API less than 1.6 continues to " "return 200." msgid "Acquire a lock on a node UUID when handling it." msgstr "Acquire a lock on a node UUID when handling it." msgid "" "Actions support formatting to fetch values from introspection data. See " "http://docs.openstack.org/developer/ironic-inspector/usage." "html#introspection-rules" msgstr "" "Actions support formatting to fetch values from introspection data. See " "http://docs.openstack.org/developer/ironic-inspector/usage." "html#introspection-rules" msgid "" "Add ``disabled`` option to ``add_ports``, so discovered nodes can be created " "without creating ports." msgstr "" "Add ``disabled`` option to ``add_ports``, so discovered nodes can be created " "without creating ports." msgid "" "Add a check from the ``link_local_connection`` plugin to use data stored by " "the ``lldp_basic``; this avoids parsing the LLDP packets twice." msgstr "" "Add a check from the ``link_local_connection`` plugin to use data stored by " "the ``lldp_basic``; this avoids parsing the LLDP packets twice." msgid "Add a new dependency, ``pytz``." msgstr "Add a new dependency, ``pytz``." msgid "" "Add a new node_not_found hook - enroll, which allows automatically discover " "Ironic's node." msgstr "" "Add a new node_not_found hook - enroll, which allows automatic discovery of " "Ironic's node." msgid "" "Add a plugin to parse raw LLDP Basic Management, 802.1, and 802.3 TLVs and " "store the data in Swift." msgstr "" "Add a plugin to parse raw LLDP Basic Management, 802.1, and 802.3 TLVs and " "store the data in Swift." msgid "" "Add an API endpoint for listing introspection statuses. Operators can use " "this to get the status for all running or previously run introspection " "processing." msgstr "" "Add an API endpoint for listing introspection statuses. Operators can use " "this to get the status for all running or previously run introspection " "processing." msgid "" "Add configuration option `processing.power_off` defaulting to True, which " "allows to leave nodes powered on after introspection." msgstr "" "Add configuration option `processing.power_off` defaulting to True, which " "allows to leave nodes powered on after introspection." msgid "" "Add support for using Ironic node names in API instead of UUIDs. Note that " "using node names in the introspection status API will require a call to " "Ironic to be made by the service." msgstr "" "Add support for using Ironic node names in API instead of UUIDs. Note that " "using node names in the introspection status API will require a call to " "Ironic to be made by the service." msgid "" "Added CORS support middleware to Ironic Inspector, allowing a deployer to " "optionally configure rules under which a javascript client may break the " "single-origin policy and access the API directly." msgstr "" "Added CORS support middleware to Ironic Inspector, allowing a deployer to " "optionally configure rules under which a JavaScript client may break the " "single-origin policy and access the API directly." msgid "" "Added GenericLocalLinkConnectionHook processing plugin to process LLDP data " "returned during inspection and set port ID and switch ID in an Ironic node's " "port local link connection information using that data." msgstr "" "Added GenericLocalLinkConnectionHook processing plugin to process LLDP data " "returned during inspection and set port ID and switch ID in an Ironic node's " "port local link connection information using that data." msgid "" "Added a new \"capabilities\" processing hook detecting the CPU and boot mode " "capabilities (the latter disabled by default)." msgstr "" "Added a new \"capabilities\" processing hook detecting the CPU and boot mode " "capabilities (the latter disabled by default)." msgid "" "Added new condition plugin \"is-empty\", which allows to match empty string, " "list, dictionary or None." msgstr "" "Added new condition plugin \"is-empty\", which allows to match empty string, " "list, dictionary or None." msgid "" "Adds an API access policy enforcment based on **oslo.policy** rules. Similar " "to other OpenStack services, operators now can configure fine-grained access " "policies using ``policy.yaml`` file. See `policy.yaml.sample`_ in the code " "tree for the list of available policies and their default rules. This file " "can also be generated from the code tree with the following command::" msgstr "" "Adds an API access policy enforcment based on **oslo.policy** rules. Similar " "to other OpenStack services, operators now can configure fine-grained access " "policies using ``policy.yaml`` file. See `policy.yaml.sample`_ in the code " "tree for the list of available policies and their default rules. This file " "can also be generated from the code tree with the following command::" msgid "" "Adds new processing hook pci_devices for setting node capabilities based on " "PCI devices present on a node and rules in the [pci_devices] aliases " "configuration option. Requires \"pci-devices\" collector to be enabled in " "IPA." msgstr "" "Adds new processing hook pci_devices for setting node capabilities based on " "PCI devices present on a node and rules in the [pci_devices] aliases " "configuration option. Requires \"pci-devices\" collector to be enabled in " "IPA." msgid "" "Adds node state to the ``GET /v1/introspection/`` and " "``GET /v1/introspection`` API response data." msgstr "" "Adds node state to the ``GET /v1/introspection/`` and " "``GET /v1/introspection`` API response data." msgid "" "Adds support for using operators with the root device hints mechanism. The " "supported operators are ``=``, ``==``, ``!=``, ``>=``, ``<=``, ``>``, ``<``, " "``s==``, ``s!=``, ``s>=``, ``s>``, ``s<=``, ``s<``, ````, ```` " "and ````." msgstr "" "Adds support for using operators with the root device hints mechanism. The " "supported operators are ``=``, ``==``, ``!=``, ``>=``, ``<=``, ``>``, ``<``, " "``s==``, ``s!=``, ``s>=``, ``s>``, ``s<=``, ``s<``, ````, ```` " "and ````." msgid "Allow autogeneration of database migrations." msgstr "Allow auto-generation of database migrations." msgid "" "Allows a periodic task to shut down an **ironic-inspector** process upon a " "failure." msgstr "" "Allows a periodic task to shut down an **ironic-inspector** process upon a " "failure." msgid "" "Avoid failing introspection on diskless nodes. The node property ``local_gb " "== 0`` is set in that case." msgstr "" "Avoid failing introspection on diskless nodes. The node property ``local_gb " "== 0`` is set in that case." msgid "Bare metal API version `1.19` is now required." msgstr "Bare metal API version `1.19` is now required." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Change database columns ``started_at`` and ``finished_at`` to type DateTime " "from type Float so that timestamps fit into these columns correctly." msgstr "" "Change database columns ``started_at`` and ``finished_at`` to type DateTime " "from type Float so that timestamps fit into these columns correctly." msgid "Conditions now support comparing fields from node info;" msgstr "Conditions now support comparing fields from node info;" msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "DHCP is now disabled completely when no nodes are on introspection and the " "\"node_not_found_hook\" is not set. This reduces probability of serving DHCP " "to wrong nodes, if their NIC is not registered in Ironic. See https://bugs." "launchpad.net/ironic-inspector/+bug/1557979 and https://bugzilla.redhat.com/" "show_bug.cgi?id=1317695 for details." msgstr "" "DHCP is now disabled completely when no nodes are on introspection and the " "\"node_not_found_hook\" is not set. This reduces probability of serving DHCP " "to wrong nodes, if their NIC is not registered in Ironic. See https://bugs." "launchpad.net/ironic-inspector/+bug/1557979 and https://bugzilla.redhat.com/" "show_bug.cgi?id=1317695 for details." msgid "" "Database migrations downgrade was removed. More info about database " "migration/rollback could be found here http://docs.openstack.org/openstack-" "ops/content/ops_upgrades-roll-back.html" msgstr "" "Database migrations downgrade was removed. More info about database " "migration/rollback could be found here http://docs.openstack.org/openstack-" "ops/content/ops_upgrades-roll-back.html" msgid "" "Default API version is temporary pinned to 1.8 (before deprecating setting " "IPMI credentials). It will be reset to the latest version again when support " "for setting IPMI credentials is removed." msgstr "" "Default API version is temporary pinned to 1.8 (before deprecating setting " "IPMI credentials). It will be reset to the latest version again when support " "for setting IPMI credentials is removed." msgid "" "Default API version was changed from minimum to maximum which Inspector can " "support." msgstr "" "Default API version was changed from minimum to maximum which Inspector can " "support." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Do not fail the whole introspection due to a value formatting error during " "introspection rules rollback. See `bug 1686942 `_ for an example and detailed investigation." msgstr "" "Do not fail the whole introspection due to a value formatting error during " "introspection rules rollback. See `bug 1686942 `_ for an example and detailed investigation." msgid "" "Don't fail on finish power off if node in 'enroll' state. Nodes in 'enroll' " "state are not expected to have power credentials." msgstr "" "Don't fail on finish power off if node in 'enrol' state. Nodes in 'enrol' " "state are not expected to have power credentials." msgid "" "Dropped rollback actions from 'set-attribute', 'set-capability' and 'extend-" "attribute' introspection rules actions, as they were confusing, completely " "undocumented and broke some real world use cases (e.g. setting driver field)." msgstr "" "Dropped rollback actions from 'set-attribute', 'set-capability' and 'extend-" "attribute' introspection rules actions, as they were confusing, completely " "undocumented and broke some real world use cases (e.g. setting driver field)." msgid "" "Due to the choice of default values for API access policies rules, some API " "parts of the **ironic-inspector** service will become available to wider " "range of users after upgrade:" msgstr "" "Due to the choice of default values for API access policies rules, some API " "parts of the **ironic-inspector** service will become available to wider " "range of users after upgrade:" msgid "" "Due to the nature of the NodeInfo.state attribute (being updated " "independently from the rest of the node_info attributes) if a (DB) " "connection was lost before the Node.state column was updated, Node." "finished_at and Node.error columns may not be in sync with the Node.state " "column." msgstr "" "Due to the nature of the NodeInfo.state attribute (being updated " "independently from the rest of the node_info attributes) if a (DB) " "connection was lost before the Node.state column was updated, Node." "finished_at and Node.error columns may not be in sync with the Node.state " "column." msgid "" "Ensure the configuration options ``firewall.firewall_update_period`` and " "``clean_up_period`` are applied to the ``periodic_clean_up`` and " "``periodic_update`` tasks after the config file is read." msgstr "" "Ensure the configuration options ``firewall.firewall_update_period`` and " "``clean_up_period`` are applied to the ``periodic_clean_up`` and " "``periodic_update`` tasks after the config file is read." msgid "" "Exception CalledProcessError is raised when running `iptables` cmd on start " "up. The issue is caused by eventlet bug, see: https://github.com/eventlet/" "eventlet/issues/357 The issue affects *ironic-inspector* only if it manages " "firewall - configured with ``manage_firewall = True`` configuration option." msgstr "" "Exception CalledProcessError is raised when running `iptables` cmd on start " "up. The issue is caused by eventlet bug, see: https://github.com/eventlet/" "eventlet/issues/357 The issue affects *ironic-inspector* only if it manages " "firewall - configured with ``manage_firewall = True`` configuration option." msgid "" "Experimental setting IPMI credentials support was removed from all versions " "of the API. The current **ironic-inspector** API version was bumped to " "`1.12` to mark this change." msgstr "" "Experimental setting IPMI credentials support was removed from all versions " "of the API. The current **ironic-inspector** API version was bumped to " "`1.12` to mark this change." msgid "" "Extend the introspection status returned from ``GET@/v1/introspection/`` to contain the ``uuid``, ``started_at`` and ``finished_at`` fields." msgstr "" "Extend the introspection status returned from ``GET@/v1/introspection/`` to contain the ``uuid``, ``started_at`` and ``finished_at`` fields." msgid "" "File name for stored ramdisk logs can now be customized via " "\"ramdisk_logs_filename_format\" option." msgstr "" "File name for stored ramdisk logs can now be customised via " "\"ramdisk_logs_filename_format\" option." msgid "" "Fix bug where periodic clean up failed with DBDeadlock if introspection " "timed out." msgstr "" "Fix bug where periodic clean up failed with DBDeadlock if introspection " "timed out." msgid "" "Fix response return code for rule creating endpoint, it returns 201 now " "instead of 200 on success." msgstr "" "Fix response return code for rule creating endpoint, it returns 201 now " "instead of 200 on success." msgid "" "Fix setting non string 'value' field for rule's actions. As non string value " "is obviously not a formatted value, add the check to avoid AttributeError " "exception." msgstr "" "Fix setting non string 'value' field for rule's actions. As non string value " "is obviously not a formatted value, add the check to avoid AttributeError " "exception." msgid "" "Fixed \"/v1/continue\" to return HTTP 500 on unexpected exceptions, not HTTP " "400." msgstr "" "Fixed \"/v1/continue\" to return HTTP 500 on unexpected exceptions, not HTTP " "400." msgid "" "Fixed a regression in the firewall code, which causes re-running " "introspection for an already inspected node to fail." msgstr "" "Fixed a regression in the firewall code, which causes re-running " "introspection for an already inspected node to fail." msgid "" "Fixed an issue with deleting nodes from cache on MySQL, see https://bugs." "launchpad.net/ironic-inspector/+bug/1511187 for details." msgstr "" "Fixed an issue with deleting nodes from cache on MySQL, see https://bugs." "launchpad.net/ironic-inspector/+bug/1511187 for details." msgid "" "Fixed confusing error message shown to user when something bad happens " "during preprocessing (https://launchpad.net/bugs/1523907)." msgstr "" "Fixed confusing error message shown to user when something bad happens " "during preprocessing (https://launchpad.net/bugs/1523907)." msgid "Fixed extra_hardware plugin connection to Swift." msgstr "Fixed extra_hardware plugin connection to Swift." msgid "Fixed several issues with MySQL database support:" msgstr "Fixed several issues with MySQL database support:" msgid "Fixed the \"is-empty\" condition to return True on missing values." msgstr "Fixed the \"is-empty\" condition to return True on missing values." msgid "" "Fixes a problem which caused an unhandled TypeError exception to bubble up " "when inspector was attempting to convert some eDeploy data to integer." msgstr "" "Fixes a problem which caused an unhandled TypeError exception to bubble up " "when inspector was attempting to convert some eDeploy data to integer." msgid "" "For postgreSQL, the database migration command ``ironic-inspector-dbsync " "upgrade`` always failed (with `enum NODE_STATE does not exist `_). This is fixed and the " "migration now works." msgstr "" "For postgreSQL, the database migration command ``ironic-inspector-dbsync " "upgrade`` always failed (with `enum NODE_STATE does not exist `_). This is fixed and the " "migration now works." msgid "" "Handling of ``local_gb`` property was moved from the ``scheduler`` hook to " "``root_disk_selection``." msgstr "" "Handling of ``local_gb`` property was moved from the ``scheduler`` hook to " "``root_disk_selection``." msgid "" "Handling ramdisk logs was moved out of the \"ramdisk_error\" plugin, so " "disabling it will no longer disable handling ramdisk logs. As before, you " "can set \"ramdisk_logs_dir\" option to an empty value (the default) to " "disable storing ramdisk logs." msgstr "" "Handling ramdisk logs was moved out of the \"ramdisk_error\" plugin, so " "disabling it will no longer disable handling ramdisk logs. As before, you " "can set \"ramdisk_logs_dir\" option to an empty value (the default) to " "disable storing ramdisk logs." msgid "" "IPA (ironic-python-agent) is now fully supported in the devstack plugin and " "will become the default ramdisk in the next release." msgstr "" "IPA (ironic-python-agent) is now fully supported in the Devstack plugin and " "will become the default ramdisk in the next release." msgid "" "If these access policies are not appropriate for your deployment, override " "them in a ``policy.json`` file in the **ironic-inspector** configuration " "directory (usually ``/etc/ironic-inspector``)." msgstr "" "If these access policies are not appropriate for your deployment, override " "them in a ``policy.json`` file in the **ironic-inspector** configuration " "directory (usually ``/etc/ironic-inspector``)." msgid "Improve logging for ramdisk logs collection." msgstr "Improve logging for ramdisk logs collection." msgid "" "InfiniBand interface discovery is now supported through introspection. The " "ironic-inspector will add the client-id to the corresponding ironic port " "that represents the InfiniBand interface. The ironic-inspector should be " "configured with a list of interfaces ``firewall.ethoib_interfaces`` to " "indicate which Ethernet Over InfiniBand Interfaces are used for DHCP." msgstr "" "InfiniBand interface discovery is now supported through introspection. The " "ironic-inspector will add the client-id to the corresponding ironic port " "that represents the InfiniBand interface. The ironic-inspector should be " "configured with a list of interfaces ``firewall.ethoib_interfaces`` to " "indicate which Ethernet Over InfiniBand Interfaces are used for DHCP." msgid "" "Inspector no longer requires old-style \"local_gb\", \"memory_mb\", \"cpus\" " "and \"cpu_arch\" fields from the introspection ramdisk. They are still " "supported, though, for compatibility with the old ramdisk." msgstr "" "Inspector no longer requires old-style \"local_gb\", \"memory_mb\", \"cpus\" " "and \"cpu_arch\" fields from the introspection ramdisk. They are still " "supported, though, for compatibility with the old ramdisk." msgid "" "Integration with `tox `_ as " "`docs` target" msgstr "" "Integration with `tox `_ as " "`docs` target" msgid "" "Introduce a new configuration option ``api_max_limit`` that defines the " "maximum number of items per page when API results are paginated." msgstr "" "Introduce a new configuration option ``api_max_limit`` that defines the " "maximum number of items per page when API results are paginated." msgid "" "Introduced API \"POST /v1/introspection//abort\" for aborting the " "introspection process." msgstr "" "Introduced API \"POST /v1/introspection//abort\" for aborting the " "introspection process." msgid "" "Introduced API \"POST /v1/introspection/UUID/data/unprocessed\" for " "reapplying the introspection over stored data." msgstr "" "Introduced API \"POST /v1/introspection/UUID/data/unprocessed\" for " "reapplying the introspection over stored data." msgid "" "Introduced new docs generation via `Sphinx `_ and `ReST `_." msgstr "" "Introduced new docs generation via `Sphinx `_ and `ReST `_." msgid "" "Introduces the **dnsmasq** PXE filter driver. This driver takes advantage of " "the ``inotify`` facility to reconfigure the **dnsmasq** service in real time " "to implement a caching black-/white-list of port MAC addresses." msgstr "" "Introduces the **dnsmasq** PXE filter driver. This driver takes advantage of " "the ``inotify`` facility to reconfigure the **dnsmasq** service in real time " "to implement a caching black-/white-list of port MAC addresses." msgid "" "Introspection fails on nodes with the same IPMI address but different IPMI " "ports." msgstr "" "Introspection fails on nodes with the same IPMI address but different IPMI " "ports." msgid "" "Introspection rules (e.g. set-attribute action) now accept 'path' field " "without leading forward slash as Ironic cli does." msgstr "" "Introspection rules (e.g. set-attribute action) now accept 'path' field " "without leading forward slash as Ironic CLI does." msgid "" "Introspection rules actions 'set-attribute', 'set-capability' and 'extend-" "attribute' no longer have the opposite effect on nodes that do not match a " "rule." msgstr "" "Introspection rules actions 'set-attribute', 'set-capability' and 'extend-" "attribute' no longer have the opposite effect on nodes that do not match a " "rule." msgid "" "Introspection rules conditions got a new generic \"invert\" parameter that " "inverts the result of the condition." msgstr "" "Introspection rules conditions got a new generic \"invert\" parameter that " "inverts the result of the condition." msgid "Ironic Inspector Release Notes" msgstr "Ironic Inspector Release Notes" msgid "" "Ironic-Inspector is now using keystoneauth and proper auth_plugins instead " "of keystoneclient for communicating with Ironic and Swift. It allows to " "finely tune authentification for each service independently. For each " "service, the keystone session is created and reused, minimizing the number " "of authentification requests to Keystone." msgstr "" "Ironic-Inspector is now using keystoneauth and proper auth_plugins instead " "of keystoneclient for communicating with Ironic and Swift. It allows to " "finely tune authentication for each service independently. For each service, " "the Keystone session is created and reused, minimising the number of " "authentication requests to Keystone." msgid "Known Issues" msgstr "Known Issues" msgid "" "LLC hook ensures that correct port information is passed to patch_port " "function" msgstr "" "LLC hook ensures that correct port information is passed to patch_port " "function" msgid "" "LLC hook ensures that correct port information is passed to the patch_port " "function" msgstr "" "LLC hook ensures that correct port information is passed to the patch_port " "function" msgid "LLC hook no longer assumes all inspected ports are added to ironic" msgstr "LLC hook no longer assumes all inspected ports are added to ironic" msgid "" "LLC hook now formats the chassis ID and port ID MAC addresses into Unix " "format as expected by ironic." msgstr "" "LLC hook now formats the chassis ID and port ID MAC addresses into Unix " "format as expected by ironic." msgid "" "LLC hook now formats the chassis id and port id MAC addresses into Unix " "format as expected by ironic." msgstr "" "LLC hook now formats the chassis id and port id MAC addresses into Unix " "format as expected by ironic." msgid "Liberty Series (2.0.0 - 2.2.7) Release Notes" msgstr "Liberty Series (2.0.0 - 2.2.7) Release Notes" msgid "" "Log a warning when add_ports is set to pxe, but no PXE MAC is returned from " "the ramdisk." msgstr "" "Log a warning when add_ports is set to pxe, but no PXE MAC is returned from " "the ramdisk." msgid "" "Log level for error when node was not found in Inspector cache was changed " "from error to info level. It was done because not_found_hook may handle this " "case, so this wouldn't be error anymore." msgstr "" "Log level for error when node was not found in Inspector cache was changed " "from error to info level. It was done because not_found_hook may handle this " "case, so this wouldn't be error any more." msgid "" "Logging during processing is now more consistent in terms of how it " "identifies the node. Now we try to prefix the log message with node UUID, " "BMC address and PXE MAC address (if available). Logging BMC addresses can be " "disabled via new \"log_bmc_address\" option in the \"processing\" section." msgstr "" "Logging during processing is now more consistent in terms of how it " "identifies the node. Now we try to prefix the log message with node UUID, " "BMC address and PXE MAC address (if available). Logging BMC addresses can be " "disabled via new \"log_bmc_address\" option in the \"processing\" section." msgid "" "Looking up nodes during introspection or discovery now supports multiple " "attributes matching. For example, two nodes can use the same ``bmc_address`` " "and still can be distinguished by MAC addresses." msgstr "" "Looking up nodes during introspection or discovery now supports multiple " "attributes matching. For example, two nodes can use the same ``bmc_address`` " "and still can be distinguished by MAC addresses." msgid "" "Loopback BMC addresses (useful e.g. with virtualbmc) are no longer used for " "lookup." msgstr "" "Loopback BMC addresses (useful e.g. with virtualbmc) are no longer used for " "lookup." msgid "" "Make debug-level logging more compact by removing newlines from firewall " "logging and disabling some 3rdparty debug messages by default." msgstr "" "Make debug-level logging more compact by removing newlines from firewall " "logging and disabling some 3rdparty debug messages by default." msgid "Minimum possible value for the \"max_concurrency\" setting is now 2." msgstr "Minimum possible value for the \"max_concurrency\" setting is now 2." msgid "Mitaka Series (2.3.0 - 3.2.x) Release Notes" msgstr "Mitaka Series (2.3.0 - 3.2.x) Release Notes" msgid "" "Most of current authentification options for either Ironic or Swift are " "deprecated and will be removed in a future release. Please configure the " "keystoneauth auth plugin authentification instead." msgstr "" "Most of current authentication options for either Ironic or Swift are " "deprecated and will be removed in a future release. Please configure the " "keystoneauth auth plugin authentication instead." msgid "" "Never enable Flask debug mode as it may allow remote code execution. See " "https://bugs.launchpad.net/bugs/1506419 for details." msgstr "" "Never enable Flask debug mode as it may allow remote code execution. See " "https://bugs.launchpad.net/bugs/1506419 for details." msgid "New Features" msgstr "New Features" msgid "" "New condition plugins \"contains\" and \"matches\" allow to match value " "against regular expressions." msgstr "" "New condition plugins \"contains\" and \"matches\" allow to match value " "against regular expressions." msgid "Newton Series (3.3.0 - 4.2.x) Release Notes" msgstr "Newton Series (3.3.0 - 4.2.x) Release Notes" msgid "" "Node introspection state is now kept in a dedicated database column. The " "introspection is now using a finite state machine. The state isn't exposed " "to the user yet." msgstr "" "Node introspection state is now kept in a dedicated database column. The " "introspection is now using a finite state machine. The state isn't exposed " "to the user yet." msgid "Node.state and Node.version_id database columns are introduced." msgstr "Node.state and Node.version_id database columns are introduced." msgid "Ocata Series (5.0.0 - 5.0.x) Release Notes" msgstr "Ocata Series (5.0.0 - 5.0.x) Release Notes" msgid "" "Old status records are no longer removed by default. They are still removed " "if a node is removed from Ironic." msgstr "" "Old status records are no longer removed by default. They are still removed " "if a node is removed from Ironic." msgid "Only issue iptables calls when list of active MAC's changes." msgstr "Only issue iptables calls when list of active MAC addresses changes." msgid "OpenStack Cloud Admin Guide:" msgstr "OpenStack Cloud Admin Guide:" msgid "OpenStack CrossProject Spec:" msgstr "OpenStack CrossProject Spec:" msgid "" "Operators are advised to specify a proper keystoneauth plugin and its " "appropriate settings in [ironic] and [swift] config sections. Backward " "compatibility with previous authentification options is included. Using " "authentification informaiton for Ironic and Swift from [keystone_authtoken] " "config section is no longer supported." msgstr "" "Operators are advised to specify a proper keystoneauth plugin and its " "appropriate settings in [ironic] and [swift] config sections. Backward " "compatibility with previous authentication options is included. Using " "authentication information for Ironic and Swift from [keystone_authtoken] " "config section is no longer supported." msgid "Oslo_Middleware Docs:" msgstr "Oslo_Middleware Docs:" msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series (6.0.0 - 6.0.x) Release Notes" msgstr "Pike Series (6.0.0 - 6.0.x) Release Notes" msgid "" "Ports creating logic was moved from core processing code to the " "``validate_interfaces`` processing hook. This may affect deployments that " "disable this hook or replace it with something else. Also make sure to place " "this hook before any hooks expecting ports to be created." msgstr "" "Ports creating logic was moved from core processing code to the " "``validate_interfaces`` processing hook. This may affect deployments that " "disable this hook or replace it with something else. Also make sure to place " "this hook before any hooks expecting ports to be created." msgid "Prelude" msgstr "Prelude" msgid "" "Processing hooks can now define dependencies on other processing hooks. " "**ironic-inspector** start up fails when required hooks are not enabled " "before the hook that requires them." msgstr "" "Processing hooks can now define dependencies on other processing hooks. " "**ironic-inspector** start up fails when required hooks are not enabled " "before the hook that requires them." msgid "" "Querying **ironic-inspector** rules API now also returns the ``invert`` and " "``multiple`` attributes of the associated conditions." msgstr "" "Querying **ironic-inspector** rules API now also returns the ``invert`` and " "``multiple`` attributes of the associated conditions." msgid "" "Ramdisk logs are no longer part of data stored to Swift and returned by the " "API." msgstr "" "Ramdisk logs are no longer part of data stored to Swift and returned by the " "API." msgid "" "Removed deprecated support for passing \"node_patches\" and \"ports_patches" "\" arguments to processing hooks." msgstr "" "Removed deprecated support for passing \"node_patches\" and \"ports_patches" "\" arguments to processing hooks." msgid "" "Removed long deprecated support for \"discoverd\" section in configuration " "file." msgstr "" "Removed long deprecated support for \"discoverd\" section in configuration " "file." msgid "" "Removed previously deprecated authentication options from \"ironic\", \"swift" "\", and \"keystone_authtoken\" sections." msgstr "" "Removed previously deprecated authentication options from \"ironic\", \"swift" "\", and \"keystone_authtoken\" sections." msgid "" "Removed support for introspecting nodes in maintenance mode, deprecated in " "the liberty cycle. Use \"inspecting\", \"manageable\" or \"enroll\" states " "instead." msgstr "" "Removed support for introspecting nodes in maintenance mode, deprecated in " "the liberty cycle. Use \"inspecting\", \"manageable\" or \"enroll\" states " "instead." msgid "" "Removed the deprecated \"root_device_hint\" alias for the \"raid_device\" " "hook." msgstr "" "Removed the deprecated \"root_device_hint\" alias for the \"raid_device\" " "hook." msgid "" "Removes deprecated configuration options: ``introspection_delay_drivers`` " "from the default section and ``log_bmc_address`` from the ``processing`` " "section." msgstr "" "Removes deprecated configuration options: ``introspection_delay_drivers`` " "from the default section and ``log_bmc_address`` from the ``processing`` " "section." msgid "Security Issues" msgstr "Security Issues" msgid "" "See the `oslo.policy package documentation`_ for more information on using " "and configuring API access policies." msgstr "" "See the `oslo.policy package documentation`_ for more information on using " "and configuring API access policies." msgid "Separate `doc` folder includes `source` and `build`" msgstr "Separate `doc` folder includes `source` and `build`" msgid "Set the node to the error state when it failed get data from swift." msgstr "Set the node to the error state when it failed get data from Swift." msgid "" "Several configuration options related to ironic API access are deprecated " "and will be removed in the Rocky release. These include:" msgstr "" "Several configuration options related to ironic API access are deprecated " "and will be removed in the Rocky release. These include:" msgid "" "Several configuration options related to swift API access are deprecated and " "will be removed in Rocky release. These include:" msgstr "" "Several configuration options related to Swift API access are deprecated and " "will be removed in Rocky release. These include:" msgid "" "Should the ``iptables`` PXE filter encounter an unexpected exception in the " "periodic ``sync`` call, the exception will be logged and the filter driver " "will be reset in order to make subsequent ``sync`` calls fail (and propagate " "the failure, exiting the **ironic-inspector** process eventually)." msgstr "" "Should the ``iptables`` PXE filter encounter an unexpected exception in the " "periodic ``sync`` call, the exception will be logged and the filter driver " "will be reset in order to make subsequent ``sync`` calls fail (and propagate " "the failure, exiting the **ironic-inspector** process eventually)." msgid "Start using reno to manage release notes." msgstr "Start using Reno to manage release notes." msgid "" "Starting with this release only ironic-python-agent (IPA) is supported as an " "introspection ramdisk." msgstr "" "Starting with this release only ironic-python-agent (IPA) is supported as an " "introspection ramdisk." msgid "" "Starting with this release, ironic-python-agent becomes the default " "introspection ramdisk, with the old bash-based ramdisk being deprecated." msgstr "" "Starting with this release, ironic-python-agent becomes the default " "introspection ramdisk, with the old bash-based ramdisk being deprecated." msgid "Support for rollback actions in introspection rules was removed." msgstr "Support for rollback actions in introspection rules was removed." msgid "" "Support for setting IPMI credentials via ironic-inspector is deprecated and " "will be removed completely in Pike. A new API version 1.9 was introduced " "with this feature de-activated. For reasoning see https://bugs.launchpad.net/" "ironic-python-agent/+bug/1654318." msgstr "" "Support for setting IPMI credentials via ironic-inspector is deprecated and " "will be removed completely in Pike. A new API version 1.9 was introduced " "with this feature de-activated. For reasoning see https://bugs.launchpad.net/" "ironic-python-agent/+bug/1654318." msgid "" "Support for the old bash-based ramdisk was removed. Please switch to IPA " "before upgrading." msgstr "" "Support for the old bash-based ramdisk was removed. Please switch to IPA " "before upgrading." msgid "" "Switch required Ironic API version to '1.11', which supports 'enroll' state." msgstr "" "Switch required Ironic API version to '1.11', which supports 'enroll' state." msgid "Switched to Futurist library for asynchronous tasks." msgstr "Switched to Futurist library for asynchronous tasks." msgid "" "The \"enroll\" node_not_found_hook now uses all valid MAC's to check node " "existence, not only the MAC(s) that will be used for creating port(s)." msgstr "" "The \"enroll\" node_not_found_hook now uses all valid MACs to check node " "existence, not only the MAC(s) that will be used for creating port(s)." msgid "" "The \"size\" root device hint is now always converted to an integer for " "consistency with IPA." msgstr "" "The \"size\" root device hint is now always converted to an integer for " "consistency with IPA." msgid "" "The POST /v1/introspection//data/unprocessed API updates the " "started_at time when ironic inspector begins processing the node." msgstr "" "The POST /v1/introspection//data/unprocessed API updates the " "started_at time when ironic inspector begins processing the node." msgid "" "The PXE filter drivers mechanism is now enabled. The firewall-based " "filtering was re-implemented as the ``iptables`` PXE filter driver." msgstr "" "The PXE filter drivers mechanism is now enabled. The firewall-based " "filtering was re-implemented as the ``iptables`` PXE filter driver." msgid "" "The ``node_info.finished(, error=)`` now updates node " "state together with other status attributes in a single DB transaction." msgstr "" "The ``node_info.finished(, error=)`` now updates node " "state together with other status attributes in a single DB transaction." msgid "" "The ``node_status_keep_time`` configuration option is deprecated. Now that " "we can remove status information about nodes removed from **ironic**, this " "option does not make much sense, and `may be confusing `_" msgstr "" "The ``node_status_keep_time`` configuration option is deprecated. Now that " "we can remove status information about nodes removed from **ironic**, this " "option does not make much sense, and `may be confusing `_" msgid "The configuration option \"introspection_delay_drivers\" is deprecated." msgstr "" "The configuration option \"introspection_delay_drivers\" is deprecated." msgid "The configuration option \"log_bmc_address\" is deprecated." msgstr "The configuration option \"log_bmc_address\" is deprecated." msgid "" "The data processing API endpoint now validates that data received from the " "ramdisk is actually a JSON object instead of failing the internal error " "later (issue https://bugs.launchpad.net/bugs/1525876)." msgstr "" "The data processing API endpoint now validates that data received from the " "ramdisk is actually a JSON object instead of failing the internal error " "later (issue https://bugs.launchpad.net/bugs/1525876)." msgid "" "The default API version was synchronized with the current API version again " "after removal of the IPMI credentials setting." msgstr "" "The default API version was synchronised with the current API version again " "after removal of the IPMI credentials setting." msgid "" "The default file name for stored ramdisk logs was change to contain only " "node UUID (if known) and the current date time. A proper \".tar.gz\" " "extension is now appended." msgstr "" "The default file name for stored ramdisk logs was change to contain only " "node UUID (if known) and the current date time. A proper \".tar.gz\" " "extension is now appended." msgid "" "The default value for the configuration option \"introspection_delay_drivers" "\" was changed to ``.*``, which means that by default \"introspection_delay" "\" is now applied to all drivers. Set \"introspection_delay\" to 0 to " "disable the delay." msgstr "" "The default value for the configuration option \"introspection_delay_drivers" "\" was changed to ``.*``, which means that by default \"introspection_delay" "\" is now applied to all drivers. Set \"introspection_delay\" to 0 to " "disable the delay." msgid "" "The firewall-specific configuration options were moved from the ``firewall`` " "to the ``iptables`` group. All options in the ``iptables`` group are now " "deprecated." msgstr "" "The firewall-specific configuration options were moved from the ``firewall`` " "to the ``iptables`` group. All options in the ``iptables`` group are now " "deprecated." msgid "" "The generic firewall options ``firewall_update_period`` and " "``manage_firewall`` were moved under the ``pxe_filter`` group as " "``sync_period`` and ``driver=iptables/noop`` respectively." msgstr "" "The generic firewall options ``firewall_update_period`` and " "``manage_firewall`` were moved under the ``pxe_filter`` group as " "``sync_period`` and ``driver=iptables/noop`` respectively." msgid "" "The introspection state column defaults to the state ``finished`` unless the " "introspection error column value on a node row isn't null, then node state " "is set to ``error``." msgstr "" "The introspection state column defaults to the state ``finished`` unless the " "introspection error column value on a node row isn't null, then node state " "is set to ``error``." msgid "" "The lookup procedure now uses all valid MAC's, not only the MAC(s) that will " "be used for creating port(s)." msgstr "" "The lookup procedure now uses all valid MACs, not only the MAC(s) that will " "be used for creating port(s)." msgid "" "The older ``ipmi_address`` field in the introspection data no longer has " "priority over the newer ``bmc_address`` inventory field during lookup. This " "fixes lookup based on MAC addresses, when the BMC address is reported as " "``0.0.0.0`` for any reason (see `bug 1714944 `_)." msgstr "" "The older ``ipmi_address`` field in the introspection data no longer has " "priority over the newer ``bmc_address`` inventory field during lookup. This " "fixes lookup based on MAC addresses, when the BMC address is reported as " "``0.0.0.0`` for any reason (see `bug 1714944 `_)." msgid "" "The primary key of the ``attributes`` table is relaxed from the ``attributes." "name, attributes.value`` column pair to a new column ``attributes.uuid``." msgstr "" "The primary key of the ``attributes`` table is relaxed from the ``attributes." "name, attributes.value`` column pair to a new column ``attributes.uuid``." msgid "" "The ramdisk logs are now stored on all preprocessing errors, not only ones " "reported by the ramdisk itself. This required moving the ramdisk logs " "handling from the \"ramdisk_error\" plugin to the generic processing code." msgstr "" "The ramdisk logs are now stored on all preprocessing errors, not only ones " "reported by the ramdisk itself. This required moving the ramdisk logs " "handling from the \"ramdisk_error\" plugin to the generic processing code." msgid "" "The rollback actions for introspection rules are deprecated. No in-tree " "actions are using them, 3rdpart should stop using them as soon as possible." msgstr "" "The rollback actions for introspection rules are deprecated. No in-tree " "actions are using them, 3rdpart should stop using them as soon as possible." msgid "" "The root_disk_selection processing hook will now error out if root device " "hints are specified on ironic node, but ironic-python-agent is not used as " "an introspection ramdisk." msgstr "" "The root_disk_selection processing hook will now error out if root device " "hints are specified on ironic node, but ironic-python-agent is not used as " "an introspection ramdisk." msgid "" "The tempest plugin code that was in ``ironic_inspector/test/" "inspector_tempest_plugin/`` has been removed. Tempest plugin code has been " "migrated to the project `openstack/ironic-tempest-plugin `_. This was an OpenStack " "wide `goal for the Queens cycle `_." msgstr "" "The tempest plugin code that was in ``ironic_inspector/test/" "inspector_tempest_plugin/`` has been removed. Tempest plugin code has been " "migrated to the project `openstack/ironic-tempest-plugin `_. This was an OpenStack " "wide `goal for the Queens cycle `_." msgid "This release includes automatic `docs` generation via Sphinx." msgstr "This release includes automatic `docs` generation via Sphinx." msgid "" "Timeout in an active state led to an `undefined transition error `_. This is fixed and an " "introspection finishes now with ``Timeout`` error." msgstr "" "Timeout in an active state led to an `undefined transition error `_. This is fixed and an " "introspection finishes now with ``Timeout`` error." msgid "Uniqueness of a node ``bmc_address`` isn't enforced any more." msgstr "Uniqueness of a node ``bmc_address`` isn't enforced any more." msgid "" "Update ``pxe_enabled`` field on ports. It is set to ``True`` for the PXE-" "booting port and ``False`` for the remaining ports. Both newly discovered " "and existing ports are affected." msgstr "" "Update ``pxe_enabled`` field on ports. It is set to ``True`` for the PXE-" "booting port and ``False`` for the remaining ports. Both newly discovered " "and existing ports are affected." msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Use only single quotes for strings inside SQL statements. Fixes a crash when " "PostgreSQL is used as a database backend." msgstr "" "Use only single quotes for strings inside SQL statements. Fixes a crash when " "PostgreSQL is used as a database backend." msgid "" "Using old bash-based ramdisk is deprecated, please switch to ironic-python-" "agent as soon as possible." msgstr "" "Using old bash-based ramdisk is deprecated, please switch to ironic-python-" "agent as soon as possible." msgid "" "Using the root_device_hint alias for the raid_device plugin is deprecated." msgstr "" "Using the root_device_hint alias for the raid_device plugin is deprecated." msgid "" "Wrong provision state name 'inspectfail' in *ironic-inspector* valid states " "for node inspection. This issue leads to state inconsistency between " "*ironic* and *ironic-inspector*. For example, if *ironic* inspection timeout " "is lower than *ironic-inspector*'s, and inspection timeout occurs, *ironic* " "will transition node into 'inspect failed' provision state. In such case " "when node inspection finishes without errors the node will be in 'inspect " "failed' provision state with inspection in 'finished' state." msgstr "" "Wrong provision state name 'inspectfail' in *ironic-inspector* valid states " "for node inspection. This issue leads to state inconsistency between " "*ironic* and *ironic-inspector*. For example, if *ironic* inspection timeout " "is lower than *ironic-inspector*'s, and inspection timeout occurs, *ironic* " "will transition node into 'inspect failed' provision state. In such case " "when node inspection finishes without errors the node will be in 'inspect " "failed' provision state with inspection in 'finished' state." msgid "`Openstack Theme `_ support" msgstr "`OpenStack Theme `_ support" msgid "" "``0.0.0.0`` and an empty string in the ``bmc_address`` inventory field are " "now correctly treated as missing BMC address." msgstr "" "``0.0.0.0`` and an empty string in the ``bmc_address`` inventory field are " "now correctly treated as missing BMC address." msgid "" "``[ironic]/auth_strategy`` - set ``[ironic]/auth_type`` option to ``none`` " "to access ironic API in noauth mode" msgstr "" "``[ironic]/auth_strategy`` - set ``[ironic]/auth_type`` option to ``none`` " "to access ironic API in noauth mode" msgid "" "``[ironic]/ironic_url`` - use ``[ironic]/endpoint_override`` option to set " "specific ironic API endpoint address if discovery of ironic API endpoint is " "not desired or impossible (for example in standalone mode)" msgstr "" "``[ironic]/ironic_url`` - use ``[ironic]/endpoint_override`` option to set " "specific ironic API endpoint address if discovery of Ironic API endpoint is " "not desired or impossible (for example in standalone mode)" msgid "" "``[ironic]/os_endpoint_type`` - use ``[ironic]/valid_interfaces`` option to " "set ironic endpoint types that will be attempted to be used" msgstr "" "``[ironic]/os_endpoint_type`` - use ``[ironic]/valid_interfaces`` option to " "set Ironic endpoint types that will be attempted to be used" msgid "``[ironic]/os_region`` - use ``[ironic]/region_name`` option instead" msgstr "``[ironic]/os_region`` - use ``[ironic]/region_name`` option instead" msgid "``[ironic]/os_service_type`` - use ``[ironic]/service_type`` option" msgstr "``[ironic]/os_service_type`` - use ``[ironic]/service_type`` option" msgid "``[swift]/os_endpoint_type`` - use ``[swift]/valid_interfaces`` option" msgstr "``[swift]/os_endpoint_type`` - use ``[swift]/valid_interfaces`` option" msgid "``[swift]/os_region`` - use ``[swift]region_name`` option" msgstr "``[swift]/os_region`` - use ``[swift]region_name`` option" msgid "``[swift]/os_service_type`` - use ``[swift]/service_type`` option" msgstr "``[swift]/os_service_type`` - use ``[swift]/service_type`` option" msgid "`makefile` for manual building" msgstr "`makefile` for manual building" msgid "" "general access to the whole API is by default granted to a user with either " "``admin``, ``administrator`` or ``baremetal_admin`` role (previously it " "allowed access only to a user with ``admin`` role)" msgstr "" "general access to the whole API is by default granted to a user with either " "``admin``, ``administrator`` or ``baremetal_admin`` role (previously it " "allowed access only to a user with ``admin`` role)" msgid "http://docs.openstack.org/admin-guide-cloud/cross_project_cors.html" msgstr "http://docs.openstack.org/admin-guide-cloud/cross_project_cors.html" msgid "http://docs.openstack.org/developer/oslo.middleware/cors.html" msgstr "http://docs.openstack.org/developer/oslo.middleware/cors.html" msgid "" "http://specs.openstack.org/openstack/openstack-specs/specs/cors-support.html" msgstr "" "http://specs.openstack.org/openstack/openstack-specs/specs/cors-support.html" msgid "https://bugs.launchpad.net/bugs/1501746" msgstr "https://bugs.launchpad.net/bugs/1501746" msgid "https://bugs.launchpad.net/bugs/1506160" msgstr "https://bugs.launchpad.net/bugs/1506160" msgid "" "listing of current introspection statuses and showing a given introspection " "is by default also allowed to a user with the ``baremetal_observer`` role" msgstr "" "listing of current introspection statuses and showing a given introspection " "is by default also allowed to a user with the ``baremetal_observer`` role" ironic-inspector-7.2.0/releasenotes/source/_templates/0000775000175100017510000000000013241324014023153 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/_templates/.placeholder0000666000175100017510000000000013241323457025440 0ustar zuulzuul00000000000000ironic-inspector-7.2.0/releasenotes/source/newton.rst0000666000175100017510000000027113241323457023076 0ustar zuulzuul00000000000000=========================================== Newton Series (3.3.0 - 4.2.x) Release Notes =========================================== .. release-notes:: :branch: origin/stable/newton ironic-inspector-7.2.0/releasenotes/source/ocata.rst0000666000175100017510000000026513241323457022656 0ustar zuulzuul00000000000000========================================== Ocata Series (5.0.0 - 5.0.x) Release Notes ========================================== .. release-notes:: :branch: origin/stable/ocata ironic-inspector-7.2.0/requirements.txt0000666000175100017510000000236413241323457020332 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. automaton>=1.9.0 # Apache-2.0 alembic>=0.8.10 # MIT Babel!=2.4.0,>=2.3.4 # BSD construct>=2.8.10 # MIT eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT Flask!=0.11,<1.0,>=0.10 # BSD futurist>=1.2.0 # Apache-2.0 ironic-lib>=2.5.0 # Apache-2.0 jsonpath-rw<2.0,>=1.2.0 # Apache-2.0 jsonschema<3.0.0,>=2.6.0 # MIT keystoneauth1>=3.3.0 # Apache-2.0 keystonemiddleware>=4.17.0 # Apache-2.0 netaddr>=0.7.18 # BSD pbr!=2.1.0,>=2.0.0 # Apache-2.0 python-ironicclient>=2.2.0 # Apache-2.0 python-swiftclient>=3.2.0 # Apache-2.0 pytz>=2013.6 # MIT oslo.concurrency>=3.25.0 # Apache-2.0 oslo.config>=5.1.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.db>=4.27.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 oslo.rootwrap>=5.8.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 six>=1.10.0 # MIT stevedore>=1.20.0 # Apache-2.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT ironic-inspector-7.2.0/tools/0000775000175100017510000000000013241324014016165 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/tools/test-setup.sh0000777000175100017510000000370613241323457020663 0ustar zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # The root password for the PostgreSQL database; pass it in via # POSTGRES_ROOT_PW. DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest ironic-inspector-7.2.0/tools/states_to_dot.py0000777000175100017510000000621013241323457021430 0ustar zuulzuul00000000000000#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import optparse from automaton.converters import pydot from ironic_inspector import introspection_state as states def print_header(text): print("*" * len(text)) print(text) print("*" * len(text)) def main(): parser = optparse.OptionParser() parser.add_option("-f", "--file", dest="filename", help="write output to FILE", metavar="FILE") parser.add_option("-T", "--format", dest="format", help="output in given format (default: png)", default='png') parser.add_option("--no-labels", dest="labels", help="do not include labels", action='store_false', default=True) (options, args) = parser.parse_args() if options.filename is None: options.filename = 'states.%s' % options.format def node_attrs(state): """Attributes used for drawing the nodes (states). The user can perform actions on introspection states, we distinguish the error states from the other states by highlighting the node. Error stable states are labelled with red. This is a callback method used by pydot.convert(). :param state: name of state :returns: A dictionary with graphic attributes used for displaying the state. # """ attrs = {} attrs['fontcolor'] = 'red' if 'error' in state else 'gray' return attrs def edge_attrs(start_state, event, end_state): """Attributes used for drawing the edges (transitions). This is a callback method used by pydot.convert(). :param start_state: name of the start state :param event: the event, a string :param end_state: name of the end state (unused) :returns: A dictionary with graphic attributes used for displaying the transition. """ if not options.labels: return {} attrs = {} attrs['fontsize'] = 10 attrs['label'] = event if end_state is 'error': attrs['fontcolor'] = 'red' return attrs source = states.FSM graph_name = '"Ironic Inspector states"' graph_attrs = {'size': 0} dot_graph = pydot.convert( source, graph_name, graph_attrs=graph_attrs, node_attrs_cb=node_attrs, edge_attrs_cb=edge_attrs, add_start_state=False) dot_graph.write(options.filename, format=options.format) print(dot_graph.to_string()) print_header("Created %s at '%s'" % (options.format, options.filename)) if __name__ == '__main__': main() ironic-inspector-7.2.0/rootwrap.d/0000775000175100017510000000000013241324014017124 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/rootwrap.d/ironic-inspector-firewall.filters0000666000175100017510000000033513241323457025625 0ustar zuulzuul00000000000000# ironic-inspector-rootwrap command filters for firewall manipulation # This file should be owned by (and only-writeable by) the root user [Filters] # ironic_inspector/firewall.py iptables: CommandFilter, iptables, root ironic-inspector-7.2.0/setup.py0000666000175100017510000000200613241323457016551 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ironic-inspector-7.2.0/AUTHORS0000664000175100017510000000657213241324013016106 0ustar zuulzuul00000000000000Alfredo Moralejo Andreas Jaeger Andreas Jaeger Anh Tran Annie Lezil Anton Arefiev Attila Fazekas AvnishPal Bob Fournier Cady_Chen Cao Xuan Hoang Chris Krelle Dmitry Tantsur Dmitry Tantsur Doug Hellmann Duan Jiong Edan David Flavio Percoco Frederic Lepied Galyna Zholtkevych Haomeng, Wang Hoang Trung Hieu Ilya Etingof Imre Farkas James E. Blair James Slagle Janonymous Javier Pena Jay Faulkner Jeremy Stanley Jim Rollenhagen JinLi Jiri Tomasek John L. Villalovos John L. Villalovos John Trowbridge Julia Kreger KaiFeng Wang Kan Ken'ichi Ohmichi Kurt Taylor Lucas Alvares Gomes Luong Anh Tuan Marcellin Fom Tchassem Mario Villaplana Markos Chandras Mitsuhiro SHIGEMATSU Moshe Levi Nam Nguyen Hoai Ngo Quoc Cuong Nguyen Hung Phuong Nishant Kumar Noam Angel OctopusZhang OpenStack Release Bot Pavlo Shchelokovskyy Pavlo Shchelokovskyy Ramamani Yeleswarapu Ruby Loo Sam Betts Serge Kovaleff Sergii Nozhka Szymon Borkowski Tao Li Teng Fei Vasyl Saienko Vu Cong Tuan William Stevenson Yosef Hoffman Yuiko Takada Yuiko Takada YuikoTakada Zhenguo Niu Zhenguo Niu ZhiQiang Fan Zuul ankit avnish chenxing dparalen dparalen gecong1973 ghanshyam inspurericzhang ji-xuepeng leesea lihao melissaml rajat29 shangxiaobj vmud213 yaojun ironic-inspector-7.2.0/ironic-inspector.80000666000175100017510000000135713241323457020427 0ustar zuulzuul00000000000000.\" Manpage for ironic-inspector. .TH man 8 "08 Oct 2014" "1.0" "ironic-inspector man page" .SH NAME ironic-inspector \- hardware introspection daemon for OpenStack Ironic. .SH SYNOPSIS ironic-inspector CONFFILE .SH DESCRIPTION This command starts ironic-inspector service, which starts and finishes hardware discovery and maintains firewall rules for nodes accessing PXE boot service (usually dnsmasq). .SH OPTIONS The ironic-inspector does not take any options. However, you should supply path to the configuration file. .SH SEE ALSO README page located at https://pypi.python.org/pypi/ironic-inspector provides some information about how to configure and use the service. .SH BUGS No known bugs. .SH AUTHOR Dmitry Tantsur (divius.inside@gmail.com) ironic-inspector-7.2.0/README.rst0000666000175100017510000000272213241323457016533 0ustar zuulzuul00000000000000=============================================== Hardware introspection for OpenStack Bare Metal =============================================== Introduction ============ .. image:: https://governance.openstack.org/tc/badges/ironic-inspector.svg :target: https://governance.openstack.org/tc/reference/tags/index.html This is an auxiliary service for discovering hardware properties for a node managed by `Ironic`_. Hardware introspection or hardware properties discovery is a process of getting hardware parameters required for scheduling from a bare metal node, given it's power management credentials (e.g. IPMI address, user name and password). * Free software: Apache license * Source: https://git.openstack.org/cgit/openstack/ironic-inspector * Bugs: https://bugs.launchpad.net/ironic-inspector * Downloads: https://pypi.python.org/pypi/ironic-inspector * Documentation: https://docs.openstack.org/ironic-inspector/latest/ * Python client library and CLI tool: `python-ironic-inspector-client `_ (`documentation `_). .. _Ironic: https://wiki.openstack.org/wiki/Ironic .. note:: **ironic-inspector** was called *ironic-discoverd* before version 2.0.0. Release Notes ============= For information on any current or prior version, see `the release notes`_. .. _the release notes: https://docs.openstack.org/releasenotes/ironic-inspector/ ironic-inspector-7.2.0/zuul.d/0000775000175100017510000000000013241324014016246 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/zuul.d/project.yaml0000666000175100017510000000112413241323457020612 0ustar zuulzuul00000000000000- project: check: jobs: - ironic-inspector-grenade-dsvm - ironic-inspector-tempest-dsvm-discovery - ironic-inspector-tempest-dsvm-python3 - openstack-tox-functional - openstack-tox-functional-py35 - ironic-tempest-dsvm-ironic-inspector gate: queue: ironic jobs: - ironic-inspector-grenade-dsvm - ironic-inspector-tempest-dsvm-discovery - ironic-inspector-tempest-dsvm-python3 - openstack-tox-functional - openstack-tox-functional-py35 - ironic-tempest-dsvm-ironic-inspector ironic-inspector-7.2.0/zuul.d/legacy-ironic-inspector-jobs.yaml0000666000175100017510000000271713241323457024641 0ustar zuulzuul00000000000000# DSVM jobs - job: name: ironic-inspector-dsvm-base parent: legacy-dsvm-base required-projects: - openstack/ironic - openstack/ironic-inspector - openstack/ironic-lib - openstack/ironic-python-agent - openstack/ironic-tempest-plugin - openstack/pyghmi - openstack/python-ironic-inspector-client - openstack/python-ironicclient - openstack/virtualbmc irrelevant-files: - ^test-requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^ironic_inspector/test/(?!.*tempest).*$ - ^ironic_inspector/locale/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tox.ini$ - job: name: ironic-inspector-grenade-dsvm parent: ironic-inspector-dsvm-base run: playbooks/legacy/ironic-inspector-grenade-dsvm/run.yaml post-run: playbooks/legacy/ironic-inspector-grenade-dsvm/post.yaml timeout: 10800 required-projects: - openstack-dev/grenade - job: name: ironic-inspector-tempest-dsvm-discovery parent: ironic-inspector-dsvm-base run: playbooks/legacy/ironic-inspector-tempest-dsvm-discovery/run.yaml post-run: playbooks/legacy/ironic-inspector-tempest-dsvm-discovery/post.yaml timeout: 10800 - job: name: ironic-inspector-tempest-dsvm-python3 parent: ironic-inspector-dsvm-base run: playbooks/legacy/ironic-inspector-tempest-dsvm-python3/run.yaml post-run: playbooks/legacy/ironic-inspector-tempest-dsvm-python3/post.yaml timeout: 10800 ironic-inspector-7.2.0/LICENSE0000666000175100017510000002613613241323457016056 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ironic-inspector-7.2.0/ChangeLog0000664000175100017510000011520213241324013016577 0ustar zuulzuul00000000000000CHANGES ======= 7.2.0 ----- * Retry port lists on failure in PXE filter periodic sync * Only set switch\_id in local\_link\_connection if it is a mac address * ironic\_inspector: ironic: Fix 'auth\_type' when 'auth\_strategy' is used * ironic\_inspector: ironic: Fix 'auth\_type' when 'auth\_strategy' is used * Remove redundant "the" from a docstring * Update reno for stable/queens * Update UPPER\_CONSTRAINTS\_FILE for stable/queens * Update .gitreview for stable/queens 7.1.0 ----- * Remove sample policy and config files * Imported Translations from Zanata * Imported Translations from Zanata * Zuul: Remove project name * fixed some "ironic" misspelling * Replace use of functools.wraps() with six.wraps() * Declare support for Python 3.5 in setup.cfg * Remove empty files * Zuul: Remove project name * Updated from global requirements * Switch the CI to hardware types * Remove use of construct lib FieldError exception * Use the 'ironic' queue for the gate * Updated from global requirements * Imported Translations from Zanata * Add keystoneauth adapters * Updated from global requirements * Fix Py2/Py3 differences in write locking code 7.0.0 ----- * Doc update: remove discoverd reference * Remove ironic\_inspector/test/inspector\_tempest\_plugin/ directory * Make the Python 3 job voting * Centralize config options * Follow up conf.py help text * Replace http with https for doc links * Use general py3 tox env for default tox run * Skip devstack jobs on locale-only changes * Imported Translations from Zanata * Replace http with https for doc links in ironic-inspector * zuul: Add ability to specify a 'branch\_override' value * Terminal state transitions in transactions * Updated from global requirements * Use native v3 tox jobs * Imported Translations from Zanata * zuul: Remove duplicated TEMPEST\_PLUGINS entry * Update version of flake8-import-order package * Use the tempest plugin from openstack/ironic-tempest-plugin * devstack: set [service\_available]ironic-inspector = True * Make discovery use dnsmasq dhcp filter * fix dvsm config deprecations * Updated from global requirements * Imported Translations from Zanata * Updated from global requirements * Updated from global requirements * Zuul: add file extension to playbook path * Dnsmasq filter docs follow-up * Allow concurrect updating of dnsmasq configuration * Add py35 gate for ironic-inspector * Introducing a dnsmasq PXE filter driver * Remove setting of version/release from releasenotes * zuul: Clean up zuul files * Updated from global requirements * Updated from global requirements * Imported Translations from Zanata * Updated from global requirements * Move processing of local\_gb to root\_disk\_selection hook * Zuul: add file extension to playbook path * Imported Translations from Zanata 6.1.0 ----- * Unittest node\_info is added with a version\_id * Clean up release notes before a release * refer to 'openstack baremetal' CLI in docs * Updated from global requirements * Support manage\_firewall during deprecation period * Refactoring the firewall * Add zuul3 jobs in-tree * Update tests to do not use deprecated test.services() * flake8: Enable some off-by-default checks * pep8: Add 'application-import-names = ironic\_inspector' * Fix for broken zuul v3 job and releasenotes * Just "import mock" as it works for Python 2 and 3 * Add request context and policy enforcement * Properly init config in unit tests * Remove SCREEN\_LOGDIR from devstack * Updated from global requirements * Make starting state non-reentrant * Use ostestr unit test runner * Generate version\_id upon add\_node * Replace the usage of 'admin\_manager' with 'os\_admin' * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updated from global requirements * Do not rely on the older ipmi\_address field on lookup * Updated from global requirements * Change example dnsmasq.conf in install doc for uefi * Allow periodics to terminate inspector * i[Trivialfix]Fix typos in ironic-inspector * Updated from global requirements * Release notes: specify pike versions * Imported Translations from Zanata * Updated from global requirements * Remove ensure\_logs\_exist check during upgrade * Update reno for stable/pike 6.0.0 ----- * PXE filter options have no effect yet * Syncing example.conf * Clean up current release notes * Treat 0.0.0.0 and '' as missing BMC address * Configuration documentation migrated * Clean up deprecated config options * Update the documentation link for doc migration * Updated from global requirements * Add selinux check in Troubleshooting PXE boot document * Docs migration conclusion * Introducing the user content * PGSQL: create Enum before using * Introducing the installation content * Updated from global requirements * Introducing the contributor content * Updated from global requirements * Update URL home-page in documents according to document migration * switch from oslosphinx to openstackdocstheme * [Devstack] cleanup upgrade settings * Updated from global requirements * PXE boot filtering drivers * add disabled option to VALID\_ADD\_PORTS\_VALUES * Updated from global requirements * Updated from global requirements * Use new oslo db EngineFacade * grenade: Only 'enable\_plugin ironic-inspector' if not already in conf * Updated from global requirements * Deprecate removing old status and disable it by default * Regenerate example.conf * Remove pbr warnerrors in favor of sphinx check * [refactoring] simplify signature of \_finish function * Updated from global requirements * Updated from global requirements * Completely remove support for setting IPMI credentials * Preparing for service splitting * Updated from global requirements * Updated from global requirements * Replace the deprecated tempest.test.attr with decorators.attr * Follow up: conditions optional fields * Remove rollback support from introspection rules * Bump pydot2 to pydot3 * Updated from global requirements * Do not fail rules rollback on bad formatting key * Updated from global requirements * Allow timeout in active states * Inspector rules API does not return all attributes * Logging ironic port creation * Updated from global requirements * Fix \`clean up\` error logging formatting * Connect brbm-inspector and brbm when needed * Remove redundant copy operation in pxe\_enabled update code * [devstack] fix working with USE\_SYSTEMD=True * Add new transaction starting -> error on timeout * Adds node state to the API response data * Allow hooks to have dependencies on other hooks * Remove unneeded validation code in ValidateInterfacesHook * Fix slave\_connection string for unit tests * Set pxe\_enabled on new and existing ports on introspection * Mock ironic client per test in functional tests * Updated from global requirements * Move port creation to validate\_interfaces hook * Use processed lldp data, if available, for local\_link\_connection plugin * Updated from global requirements * Update \`create migration\` manual * Add missing test for NodeInfo.create\_ports * Updated from global requirements * Don't set MTU for inspector veth iface on hardware * [Devstack]: pick correct MTU for br-inspector veth * Unpin python3 version for functional tests * Update config sample * Update the url of ironic inspection documentation * Updated from global requirements 5.1.0 ----- * Don't pass sqlite\_db in db\_options.set\_defaults() * Trivial: don't create unused temporary directory * Remove translation of log messages Part-2 * Remove translation of log messages Part-1 * Fix some reST field lists in docstrings * Use eventlet version of subprocess * Mention state machine diagram generation in contrib guide * Updated from global requirements * Fix inconsistent service naming in install guide * Update broken alembic links in contributing guide * Updated from global requirements * Reapply doesn't update started\_at time * Fix tox test failed because timezone is CST * Python 3.4 (py34) is removed from tox envlist * Fix valid provision states for introspection * Updated from global requirements * Add fsm dot diagram generator * Updated from global requirements * Update HTTP API docs with missing 1.9 API microversion * Functional tests: set introspection\_delay to 0 * Update the Version Support Matrix in install guide * Use IRONIC\_VM\_NETWORK\_BRIDGE * Use flake8-import-order * Use specific end version since liberty is EOL * Trivial: mock sleep in introspection tests * Minor update to CONTRIBUTING.rst * Update hardware inventory in docs * Update release notes for Ocata release * Update documentation to deploy Ironic Inspector with DevStack * Updated from global requirements 5.0.0 ----- * Fix some grammar and spelling issues in release notes * Find a node by multiple attributes * Pass session directly to swiftclient * Ensure devstack does not try to delete a physical NIC * Switch to use test\_utils.call\_until\_true * Add node\_info to some messages and clean up docstrings for lldp plugin * Fix updating rows in d00d6e3f38c4 migration * Typo fix: infromation => information * Prepare for using standard python tests * Trivial: improve logging in FSM transition * Adding InfiniBand Support * Add plugin to process basic management LLDP TLVs * Clean up logging related to new state machine * Update external links in the documentation * Clean up deprecated configuration options * Switch to decorators.idempotent\_id * Updated from global requirements * Allow diskless nodes introspection * Remove heading "Team and repository tags" * Deprecate introspection\_delay\_drivers option and make it no-op * [devstack] Stop installing jq, we're not using it in tests any more * Deprecated log\_bmc\_address option * [devstack] Remove bridge only if it's not OVB\_PHYSICAL\_BRIDGE * Change (started|finished)\_at column type * Make grenade actually upgrade our source code * Devstack: don't rely on Ironic local vars * Fix DBDeadlock during clean up * Skip brbm-inspector veth if IRONIC\_IS\_HARDWARE == True * Updated from global requirements * Updated from global requirements * Grenade: don't enable tempest plugins in settings * Deprecate setting IPMI credentials * Do not use loopback BMC addresses for lookup * Updated from global requirements * Functional test node remove * Updated from global requirements * Updated from global requirements * Add troubleshooting step for virtual box * Updated from global requirements * Remove unused "service" argument from tempest client manager * Tempest test tag baremetal doesn't exist * Updated from global requirements * Use the device hints matching mechanism from ironic-lib * Fix test when running with SQLite 3.7.17 from CentOS 7 * Add drac\_address to ipmi\_address\_fields * Update config sample * Revert "Use liberty-eol tag for liberty release notes" * Use oslo\_serialization.base64 to follow OpenStack Python3 * Updated from global requirements * Introducing node introspection state management * Use liberty-eol tag for liberty release notes * Add author and author-email in setup.cfg * Remove upgrade from non-ironic setup * Remove default\_params\_with\_timeout\_values from tempest client * Updated from global requirements * Document upgrade procedure * Clean up retained code for grenade workaround * Show team and repo badges on README * Do not source old/localrc twise in grenade * Don't recreate db during grenade upgrade * Use function is\_valid\_mac from oslo.utils * Add API for listing all introspection statuses * Stop disabling the Neutron tempest plugin * Fix passing incorrect args to logging * Remove the generic eventlet monkey patch from test * Test discovered nodes are in ENROLL state and fix typo * Fixed incorrect string type checking * Add !requiretty to sudoers config in install document * Add py35 to tox job list * Update to hacking 0.12.0 and use new checks * Updated from global requirements * Add a test for introspection abort action verification * Updated from global requirements * Allow to configure cache clean up period * Switch func3 environment to Python 3.5 * Updated from global requirements * Move assertStatus to Base functional test and rename it for clarity * Updated from global requirements * UUID, started\_at, finished\_at in the status API * Only disable Neutron tests when our Grenade is running * Bump hacking to 0.11.0 in test-requirements * Updated from global requirements * Correct conf.py missing space * Fix a typo in base.py * Stop adding ServiceAvailable group option * Enable release notes translation * Updated from global requirements * devstack: remove old exercise scripts * Updated from global requirements * LLC Hook: Do not assume interfaces are added to Ironic * LLC Hook: Fix patching Ironic ports * TrivialFix: Fix typo in the configuration file * Updated from global requirements * TrivialFix: Remove default=None when set value in Config * Use assertEqual() instead of assertDictEqual() * Update reno for Newton * Updated from global requirements 4.2.0 ----- * modify the home-page info with the developer documentation * Tempest: add auto-discovery test * Updated from global requirements * Add translation marker to deprecated\_reason of config opts * Add translation marker to help of config opts * Updated from global requirements * Disable neutron tests in our grenade * Log hook names as we run them * Set node to the error if reapply fails * TrivialFix: Remove cfg import unused * Replace assertRaisesRegexp with assertRaisesRegex * Inherit from oslotest base * Add PCI devices plugin to inspector * Change asserts with more specific assert methods * Python 3.x compatibility ConfigParser * Changed an assert to more specific assert method * Fix formatting strings in LOG.error * Fix tempest.conf generation * Use upper constraints for all jobs in tox.ini * functional: allow passing kwargs to all calls * Increase verbosity for functional tests * Fix order of arguments in assertEqual * Modify the SQL's string from double quote to single quote * Updated from global requirements * Tempest: increase ironic sync timeout 4.1.0 ----- * Fix release notes formatting * Add callback function to manage missing hooks * Updated from global requirements * Check whether action value is string before calling format() * Updated from global requirements * Use OSC in exercise.sh * Provide meaningful error messages in functional tests * Add GenericLocalLinkConnectionHook processing hook * Combine multiple warning logs into one in create\_ports * Add a simple smoke test to be run in the grenade gate * Remove redundant white space * Tempest: wrap instance actions into inspector methods * Fix improperly placed firewall.update\_filters when aborting * Add config to skip power off after introspection * Update example.conf * [doc]Add 'ipa-debug=1' to installation document * Updated from global requirements * [devstack]Remove unneeded enable\_service in example.local.conf * [devstack]Switch to pip\_install\_gr for inspector client * Updated from global requirements * [devstack]Only cleanup tftp directory if ipxe disabled 4.0.0 ----- * remove unused LOG * Updated from global requirements * Skip test\_init\_failed\_processing\_hook test * Make Ironic variables visible inside exercise.sh * [devstack] Do not hardcode coreos ramdisk when building from source * Allow customizing ramdisk logs file names and simplify the default * Introduce upgrade testing with Grenade * Updated from global requirements * Use run\_process in the devstack plugin * Create devstack/example.local.conf and include it in the docs * Updated from global requirements * Updated from global requirements * devstack/plugin.sh: use screen\_stop * Updated from global requirements * Fix tempest tests * Fix response code for rule creating API * Tempest: don't rely on tempest ironic client * Update terribly outdated installation instructions * Add config fixture to functional tests * Return HTTP 500 from /v1/continue on unexpected exceptions * Remove deprecated alias "root\_device\_hint" for "raid\_device" hook * Add a plugin for capabilities detection * Remove support for the old bash ramdisk * Updated from global requirements * Remove iterated form of side effects to simplify code * Updated from global requirements * Updated from global requirements * Tempest: add basic test * Updated from global requirements * Updated from global requirements * Fix py3 issue in functional tests * Updated from global requirements * Always convert the size root device hints to an integer * Update Introspection API Docs from UUID to Node ID * Updated from global requirements 3.3.0 ----- * Ensure rules documentation examples are valid JSON * Updated from global requirements * Support Ironic node names in our API * Updated from global requirements * is-empty conditions should accept missing values * Store ramdisk logs on all processing failures, not only reported by the ramdisk * Use PortOpt type for port options * Refactor test\_process * use openstack cli instead of keystone cli * Updated from global requirements * Make sure to clean the blacklist cache when disabling the firewall * Updated from global requirements * Tempest plugin initial commit * Make tox respect upper-constraints.txt * Updated from global requirements * Allow rerunning introspection on stored data * Updated from global requirements * Move unit tests to "unit" directory * Drop the TestInit node\_cache unit test * Updated from global requirements * Update versions on the release notes page * Remove downgrades from migrations * Set config options for keystoneauth * Use keystoneauth for Ironic and Swift clients * Better error handling when converting eDeploy data * Update reno for stable/mitaka * Use all valid MAC's for lookup 3.2.0 ----- * Update links to existing documentation * Disable DHCP completely when no nodes are on introspection * Added CORS support to Ironic Inspector * Add discover nodes exercise * Don't fail on power off if in enroll state * [devstack] add support for using iPXE instead of plain PXE 3.1.0 ----- * Allow specify log level for Error exception * Fix arg for "Port already exists" error * Updated from global requirements * Deprecate root\_device\_hint name for raid\_device plugin * Remove redundant data copying in tests * Update the troubleshooting guide * Officially deprecate rollback for introspection rules * Generate API documentation from Python modules * Fixed warnings during the docs build * Refactor service init, shutdown and run into a separate class * Fix incorrect string formatting in the SSL code * Refactor base test classes * Move ironic options to common/ironic * Use dedicated config file for config generator * Recommend using dhcp-sequential-ip in the dnsmasq configuration * Correct method call to to\_dict * Use futurist library for asynchronous tasks * [devstack] Improve virtual machines logs * Split ironic-related functions from utils to separate common.ironic module * Add db migrations tests * Clarify that we don't maintain compatibility for stored data * Add enroll\_node\_not\_found hook * Logging configuration options at startup * Add invert option to rule conditions * Updated from global requirements * Updated from global requirements * Fix gate broken by sudden remove of SERVICE\_TENANT\_NAME variable * Add new condition: is-empty * Updated from global requirements * Extend conditions and actions * Small ValidateInterfacesHook cleanup * Drop rollback actions for set-XX and extend-XX rules actions * Do not set Swift parameters defaults in keyword arguments * Stop storing ramdisk logs with the introspection data * Expand instructions for DNS on Ubuntu * Always generate fresh uuid in test cases * Updated from global requirements * Add forward slash for node path patch * Remove "ramdisk" mentioning in func test contrib * Introduce API for aborting introspection * Do not update firewall rules if list of MAC's did not change * Update example.conf * Remove deprecated support for passing patches lists into hooks * [devstack] Use the coreos builder for the source build of the ramdisk * Enable Keystone v3 endpoints for Inspector * Register the keystone service and endpoint in the devstack plugin * Clean up documentation on introspection rules conditions * Updated from global requirements * Update translation setup * Use new introspection data save command in exercise.sh * Set timeout in gate to 10 minutes * Updated from global requirements * Add new conditions: matches and contains * Updated from global requirements 3.0.0 ----- * Updated from global requirements * Stringify node\_info.uuid as a precaution * Check whether agent tarball exists * Updated from global requirements * Revert "Block broken diskimage-builder versions to unblock the gate" * Block broken diskimage-builder versions to unblock the gate * Switch to IPA as a primary ramdisk * Track node identification during the whole processing * Updated from global requirements * Use assertTrue/False instead of assertEqual(T/F) * Updated from global requirements * Update ramdisk callback documentation * Put py34 first in the env order of tox * Updated from global requirements * Drop support for introspecting nodes in maintenance mode * Enable ramdisk log collection in devstack plugin and improve logging * Validate that data received from the ramdisk is a JSON object * Improve debug logging * Adjust releasenotes structure * Updated from global requirements * Properly report preprocessing errors to a user * Updated from global requirements * Mention docs in the README now that they're published * Auto-fill keystone\_authtoken config section 2.3.0 ----- * Numerous improvements in the documentation * Increase cleaning timeout for gate to 10 minutes * Add missing release notes * Lock nodes to avoid simultaneous introspection requests * Generate Sphinx docs * Use Reno for release notes management * Update IPA section of CONTRIBUTING.rst to match gate * Updated from global requirements * Updated from global requirements * Issue a warning when add\_ports=pxe and not PXE NIC address is provided * Do not explicitly mention requirements.txt in tox.ini * Make it explicit that finished is set to true on failures too * Updated from global requirements * Properly order node deletion from cache * Support IPA for devstack-plugin * Enable migration autogenerate * Updated from global requirements * Replace deprecated LOG.warn with LOG.warning * Fix random gate breakages due to cleaning * Updated from global requirements * Always default to InnoDB for MySQL * Updated from global requirements * Never run Flask application with debug mode * Fix gate broken by the devstack trueorfalse change * Add coverage target to tox.ini * Use auth\_strategy=noauth in functional tests * Fix database schema for mysql and switch the gate to testing it * Updated from global requirements * Add .eggs/ to gitignore 2.2.0 ----- * Add troubleshooting section for Ubuntu DNS issues * Add a warning about sudoers file * Add explicit dependencies on Alembic and SA * Update README with extra\_hardware plugin changes * Don't rely on dict ordering in test\_plugins\_standard * Fix rootwrap configuration documentation * devstack: allow nodes to be already MANAGEABLE in exercise * Updated from global requirements * Add alembic migrations for the inspector database * Use rootwrap to execute iptables instead of requiring root * Support IPA in raid\_device plugin * Convert eDeploy data so that rules can process it * Stop recommending using DIB from source * Allow empty lookup attributes if node\_not\_found\_hook is provided * Switch to using CLI for introspection rules * Add missing plugins for introspection rules * Ignore IPMI Address for IPMI Bridged nodes * Updated from global requirements * Replace glance and keystone commands with OSC equivalents * Smart root disk selection including support for root device hints * Add API Discovery to Ironic Inspector * Add mock for check\_call to all firewall tests * Updated from global requirements * Deprecate returning patches from plugins * Add introspection rules support * Add instructions how to try IPA as a ramdisk * Stop using configuration options in top level code * Split standard plugin tests from other processing tests * Move update functions to NodeInfo object * Enable colorized logging in devstack plugin * Revisit required provision states * Updated from global requirements * Add logging to introspection data storage * Make list of ipmi\_address-alike driver fields configurable * Fix logic for internal server error * Pass -w flag to iptables to make it wait for xtables lock * Store and expose introspection data * Updated from global requirements * Rename plugin root\_device\_hint -> raid\_device * Clean up README * Fail introspection early if no lookup attributes can be detected * Updated from global requirements * Mock socket in test\_bad\_hostname\_errors * Copy gitignore from ironic * Updated from global requirements * Add func3 tox environment for functional testing with Python 3 2.1.0 ----- * Updated from global requirements * Document error response format in HTTP-API.rst * Make Swift endpoint type configurable * Migrate to oslo\_log * Updated from global requirements * Periodically check nodes' existance * Log look up attributes at INFO level * Updated from global requirements * Use retries provided by ironicclient instead of ad-hoc ones * Split common database code into ironic\_inspector.db * Require ironic API version 1.6 * Load authenticate token from HTTP header * Implement optional API versioning * Fix functional test invocation * Convert functional test to a framework and decouple from client * Updated from global requirements * Make endpoint type configurable * Insert artificial delay between sending virtual nodes on introspection * Updated from global requirements * Fix and enhance contributing documentation on writing plugins * Improve support matrix readablity * Provide more meaningful message for error 500 * Create a handler for uncaught 404 errors * Clarify version support matrix * Drop openstackclient from requirements * Updated from global requirements * Updated from global requirements * Switch to pbr postversioning 2.0.1 ----- * Fix usage of mock due for the latest 1.1.0 mock * Bump version to 2.1.0 2.0.0 ----- * Prerelease translation source update * Clean up tox.ini * Log when introspection has started * Updated from global requirements * Migrate to oslo\_db * Specify server id clearly on the gate test * Allow addition of hooks without overriding defaults * Try real nova boot in exercise.sh * Use real property values from nova flavor in exercise.sh * Recommend disabling cinder in local.conf * Drop MANIFEST.in - it's not needed with PBR * Drop unused functest directory * Updated from global requirements * Fix and enhance devstack test script * Remove cliff from requirements.txt * Make functional test importable and stop depending on DIB code * Move Python ramdisk code out of tree * Deprecate authenticate opt in favor of auth\_strategy * Updated from global requirements * Setup Translations * Move client out of tree * Switch to pbr * Remove ironic running check from inspector startup * Change the way error are returned from API to JSON * Provide hook into process when a node isn't found * Add missing backslash and run genconfig * Final discoverd -> inspector rename/move pass * Move create\_ports to NodeInfo * Pass environment variables of proxy to tox * Update .gitreview file for project rename * Update devstack plugin for the recent authentication change * Rename edeploy plugin * Fix edeploy plugin puts too much data in Ironic extra column * Use keystonemiddleware auth credentials * Rework processing hook interface for 2.0.0 * Naming clean up * Add node() and ports() to NodeInfo * Do not require scheduling attributes in ramdisk * Support standalone ironic * Add node UUID to response from /v1/continue * Make the number of green threads configurable * Launchpad project was renamed * Update from global requirements * Add SSL/TLS Support * Repair devstack plugin after rename * Fake missing local\_gb for root\_device\_hint plugin * Rename discoverd -> inspector * Drop unused CLI options * Include journalctl output when sending logs * Support setting IPMI credentials in ironic\_discoverd\_ramdisk * Add babel.cfg to MANIFEST.in * Fix leftover of edeploy options removal * Remove profile matching from eDeploy plugin * Initial import of ironic\_discoverd\_ramdisk * Define API\_VERSIONS in shell.py * Bump version to 1.2.0 * README: More updates for Troubleshooting section * Check hooks availability on start-up * Doublecheck node provision state on receiving data from ramdisk * Delay failure from pre-processing hooks * Add unit tests for main.py * Pre-release documentation update * Add an option to always store ramdisk logs * Add ability to save logs received from the ramdisk * Document ipmi\_address in HTTP API * Implement basic checks in functest/devstack-test.sh * Use my\_ip as default URL instead of localhost * Enable ramdisk\_error plugin by default * First take on integration test for using on devstack * Add unit tests for firewall.py * Add DevStack plugin for ironic-discoverd * Proper errors handling for client * Clean up firewall rules on exit * Invalidate cached node information after hooks run * Add option to delete ports after introspection * Add INSPECTFAIL as a valid state to start introspection * Update from global requirements * Switch to oslo.config * More generic option for tuning adding ports * Fix typos in ironic-discoverd * Documentation update * eDeploy: Store all of the facts collected by edeploy in Ironic DB * Stop waiting for power off to happen after introspection * eDeploy plugin to save BIOS and RAID configuration to node.extra * Enable overwrite\_existing by default * CLI: give user a hint to power on the node when setting IPMI creds * Add python-openstackclient plugin for ironic-discoverd * None is a valid default value for base\_url and auth\_token in the client * Add scripts to manage translations * Consider dropping check on power state * Switch to Python 3.4 as default Python 3 env * Properly implement authentication via keystonemiddleware * Verify input uuid * eDeploy plugin to not overwrite node's capabilities * Change utils.get\_ipmi\_address(node) to always return IPv4 address * setup.py to work without requirements.txt * Change i18n domain to match what is generated in oslo-incubator sync * Always use non-localized strings in unit tests * Use less verbose imports for i18n * Support i18n part2 * Support i18n part3 * Fix i18n import * Support i18n * Getting ready for inclusion into projects.yaml * Update functest to new ramdisk code * Also set IPMI address if it's not set already * Don't wait for too long for IPMI credentials update * Functional test for setting IPMI credentials * Require maintenance mode for setting IPMI credentials * Download jq utility when running functest * Make root\_device\_hint plugin not to fail if no block device was received * Documentation and strings update * Adding root\_device\_hint plugin * Client and doc update for setting IPMI credentials * Revamp support for setting IPMI credentials * Make sure we always notify user about an introspect call error * Functional test for boot interface detection * Fix get\_kernel\_parameter fixture * Fix detection of PXELINUX-provided boot interface * Get rid of thread=False monkey-patching hack * Bump requirements to Kilo * Allow ramdisk to specify PXE boot interface * Refactor ValidateInterfaces plugin * Add eDeploy plugin * Start 1.1 development 1.0.0 ----- * Finalize name + small readme fixes * Last-minute release notes update for 1.0.0 * Bump randomly set retry count and delay * Support ilo\_address and drac\_address in addition to ipmi\_address * Update functest to use new ramdisk file name * Revert "Get rid of monkey patching work around for Python 2 in main.py" * Fix discovery image installation notes * Add missing test for client and mark some code as non-covered * Fix reporting errors happened in background thread of introspect() * Get rid of monkey patching work around for Python 2 in main.py * Use --config-file argument in functional testing * MANAGED -> MANAGEABLE * Update README for 1.0.0 release * Create directory for database if it doesn't exist * Allows passing of config file via --config-file option * Do not timeout nodes which already finished introspection * Dump test CLI output as JSON, not as Python repr * Small sdist-related cleanup * Introduce test-requirements and update to stable/juno * General naming and strings clean up * Do not proceed with introspection on a node twice * Changes utils.get\_keystone(token) to utils.check\_is\_admin(token) * Switch setting IPMI credentials to using options and HTTP params * Add options table and convenience methods to work with it * Add introspect client call and switch functest to it * Refactor stable API to be /v1/introspection/ * Support new Kilo state machine * Rework node cache clean up according to recent changes * Implement get status endpoint * Disable setting IPMI credentials by default * Add 'default' argument to conf.get\* functions * Remaining changes from making database a required option * Make database a required configuration option * Introduce \_\_version\_\_ and \_\_version\_info\_\_ * Store introspection result in the local database * Add option to overwrite existing properties * Try to set boot device to PXE before rebooting * Make firewall management optional * Reorder configuration options for clarity * Enable functional testing with local ramdisk source * Update README and stop posting full changelogs * Simple functional testing for discoverd and the reference ramdisk * Add forgotten modules to setup.py (shame on me) * Wait for power off before finishing discovery * Review and fix logging messages * Retry on Conflict exceptions from Ironic * Support updating IPMI credentials from within ramdisk * Require manual power on if ipmi\_setup\_credentials is set in Node.extra * Refactoring: split test.py and rewrite tests for process * Refactoring: cap complexity at 15 and coverage at 90 * Refactoring: split discoverd.py into 2 modules * Refactoring: drop features incompatible with Kilo changes * Refactoring: consolidate standard plugins in one module * Wait for power off state before calling discovery done * Separate validating NIC data into a new plugin * Store good MAC's and interfaces in node\_info after processing * Extend node\_cache.pop\_node() result to be a structure * Do not fail if ipmi\_address is not present in discovery data * Cherry-pick changelog from 0.2.5 * Workflow documentation is now in infra-manual * Return serialized node to the ramdisk * Add option \`power\_off\_after\_discovery\` * Move code updating Node.properties to a plugin * Be even more paranoid in cleaning the iptables * Make /v1/continue synchronous and return real errors * Implement timeout for discovery * Support hooks for processing data * Fix issues with using the database * Pass toxinidir to the definition of requirements * Use node\_cache in firewall for fetching MAC's on discovery * Keep cache of nodes under discovery in the local database * Add CONTRIBUTING.rst * Bump version to 1.0.0 0.2.4 ----- * Release 0.2.4 to fix MANIFEST 0.2.3 ----- * Enable hacking * Post-migration updates for stackforge and launchpad * Updates prior to moving to stackforge * Rename tox job flake8 -> pep8 as per OpenStack standart 0.2.2 ----- * ChangeLog and small README fixes * Seriously update README * Allow undefined power state in maintenance mode and reboot instead of power on * Raise tox coverage target to 80% * More unit tests, fixed one bug * Separate utils module, more clean ups * Clean up in firewall module * gitignore .coverage file * Store discovery\_timestamp in node extra (part of #4) * Small tweak * Test coverage reporting and gating * Warn if node seems already on discovery (finally closes #3) * Refuse to discover nodes that are not powered off or have instance uuid (part of #3) * Validate power interface on starting discovery (part of #3) * Move configuration to a separate module * On each start-up make several attempts to check that Ironic is available * Fixed comments * Make firewall more robust * Fixed README * Update changelog in README * Apply workarounds to restore basic function under Py33 * Make validation for /v1/discover synchronous (part of #3) * Bump version 0.2.1 ----- * Updated README * Do no log until logging is initialized * Warn if starting with authenticated=false * Ensure nodes are in maintenance mode (closes #5) * Always white-list MACs (closes #6) * LOG.warn -> LOG.warning * Make new behavior of ports creation configurable (closes #9) * Fix README * Accept interfaces from the ramdisk and only fill ports for NIC's with IP's (closes #8) * Fixed a typo * Version bump 0.2.0 ----- * Updated description in setup.py * Add requests and six as explicit dependencies * Avoid tracebacks on power failure * Make SSH driver regex configurable * Do not fail discovery completely, if one node failed to power on * Fail early if no nodes were valid * Drop shebang from test.py, it's supposed to be used from tox * Separate firewall module * Fixed wrong signature of update\_filters * Fixed README * Make firewall update period configurable * Lock around firewall update * Silence info's from urllib3 * Fix tests and drop Python 2.6 support * Sort requirements.txt * Small fixes * Simple client in ironic\_discoverd.client * Create Ironic client on each update\_filters() call * Cleaned up \_iptables function * Do not allow periodic task to stop on failure * Finally remove Makefile * Enable testing for other pythons + get back flake8 * Install tox on Travis [2] * Install tox on Travis * Switch to tox * Python 3 compatibility * Enhanced README, added installation guide * Makefile: .PHONY * Switch to setuptools entry points * Implement authentication via Keystone * Refactor \_\_main\_\_ to import only discoverd module, not specific items * Bump version to 0.2.0 0.1.1 ----- * Include man page into manifest * Simple man page * Updated README * Make interface configurable * Drop defaults from example.conf * Version bump 0.1.0 ----- * Requires eventlet * Small wording fix * Version 0.1.0 * Remove on\_discovery after discovered data arrived * Remaining of threaded code * Revert "Switch to os.system instead of subprocess which is not thread-safe" * (Try to) switch to eventlet * Switch to os.system instead of subprocess which is not thread-safe * Updated from global requirements * Enhance example.conf with documentation * Remaining fixes + load listen address and port from configuration * Fixed for README * Stabilized API, update README * Fixed README * Bump version to 0.1 * Apache license * MANIFEST.in for example.conf * Late version bump * Update README and drop make run target * Load configuration from file, not env * Add unit tests * README.md -> README.rst * Added main script and updated setup.py * Travis button * Enable Travis and make test * Updated Makefile and added README * New structure, getting ready for PyPI * Tabs vs spaces * Small issues - batch 2 * Fixed firewall * Small fixes * Firewall handling * Inline some code and add docstrings * Call update\_filters() periodically * Stub for changing iptables rules * Initial implementation of /start call * Force power state off after discovery * Basic implementation * Initial commit ironic-inspector-7.2.0/ironic_inspector.egg-info/0000775000175100017510000000000013241324014022070 5ustar zuulzuul00000000000000ironic-inspector-7.2.0/ironic_inspector.egg-info/entry_points.txt0000664000175100017510000000513313241324013025367 0ustar zuulzuul00000000000000[console_scripts] ironic-inspector = ironic_inspector.cmd.all:main ironic-inspector-dbsync = ironic_inspector.dbsync:main ironic-inspector-rootwrap = oslo_rootwrap.cmd:main [ironic_inspector.hooks.node_not_found] enroll = ironic_inspector.plugins.discovery:enroll_node_not_found_hook example = ironic_inspector.plugins.example:example_not_found_hook [ironic_inspector.hooks.processing] capabilities = ironic_inspector.plugins.capabilities:CapabilitiesHook example = ironic_inspector.plugins.example:ExampleProcessingHook extra_hardware = ironic_inspector.plugins.extra_hardware:ExtraHardwareHook lldp_basic = ironic_inspector.plugins.lldp_basic:LLDPBasicProcessingHook local_link_connection = ironic_inspector.plugins.local_link_connection:GenericLocalLinkConnectionHook pci_devices = ironic_inspector.plugins.pci_devices:PciDevicesHook raid_device = ironic_inspector.plugins.raid_device:RaidDeviceDetection ramdisk_error = ironic_inspector.plugins.standard:RamdiskErrorHook root_disk_selection = ironic_inspector.plugins.standard:RootDiskSelectionHook scheduler = ironic_inspector.plugins.standard:SchedulerHook validate_interfaces = ironic_inspector.plugins.standard:ValidateInterfacesHook [ironic_inspector.pxe_filter] dnsmasq = ironic_inspector.pxe_filter.dnsmasq:DnsmasqFilter iptables = ironic_inspector.pxe_filter.iptables:IptablesFilter noop = ironic_inspector.pxe_filter.base:NoopFilter [ironic_inspector.rules.actions] example = ironic_inspector.plugins.example:ExampleRuleAction extend-attribute = ironic_inspector.plugins.rules:ExtendAttributeAction fail = ironic_inspector.plugins.rules:FailAction set-attribute = ironic_inspector.plugins.rules:SetAttributeAction set-capability = ironic_inspector.plugins.rules:SetCapabilityAction [ironic_inspector.rules.conditions] contains = ironic_inspector.plugins.rules:ContainsCondition eq = ironic_inspector.plugins.rules:EqCondition ge = ironic_inspector.plugins.rules:GeCondition gt = ironic_inspector.plugins.rules:GtCondition in-net = ironic_inspector.plugins.rules:NetCondition is-empty = ironic_inspector.plugins.rules:EmptyCondition le = ironic_inspector.plugins.rules:LeCondition lt = ironic_inspector.plugins.rules:LtCondition matches = ironic_inspector.plugins.rules:MatchesCondition ne = ironic_inspector.plugins.rules:NeCondition [oslo.config.opts] ironic_inspector = ironic_inspector.conf.opts:list_opts [oslo.config.opts.defaults] ironic_inspector = ironic_inspector.conf.opts:set_config_defaults [oslo.policy.enforcer] ironic_inspector = ironic_inspector.policy:get_oslo_policy_enforcer [oslo.policy.policies] ironic_inspector.api = ironic_inspector.policy:list_policies ironic-inspector-7.2.0/ironic_inspector.egg-info/dependency_links.txt0000664000175100017510000000000113241324013026135 0ustar zuulzuul00000000000000 ironic-inspector-7.2.0/ironic_inspector.egg-info/SOURCES.txt0000664000175100017510000003132513241324014023760 0ustar zuulzuul00000000000000.stestr.conf AUTHORS CONTRIBUTING.rst ChangeLog LICENSE README.rst babel.cfg config-generator.conf ironic-inspector.8 policy-generator.conf requirements.txt rootwrap.conf setup.cfg setup.py test-requirements.txt tox.ini devstack/example.local.conf devstack/plugin.sh devstack/settings devstack/upgrade/resources.sh devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh doc/Makefile doc/source/.gitignore doc/source/conf.py doc/source/index.rst doc/source/admin/dnsmasq-pxe-filter.rst doc/source/admin/index.rst doc/source/admin/upgrade.rst doc/source/configuration/index.rst doc/source/configuration/ironic-inspector.rst doc/source/configuration/policy.rst doc/source/configuration/sample-config.rst doc/source/configuration/sample-policy.rst doc/source/contributor/index.rst doc/source/images/states.svg doc/source/install/index.rst doc/source/user/http-api.rst doc/source/user/index.rst doc/source/user/troubleshooting.rst doc/source/user/usage.rst doc/source/user/workflow.rst ironic_inspector/__init__.py ironic_inspector/alembic.ini ironic_inspector/api_tools.py ironic_inspector/db.py ironic_inspector/dbsync.py ironic_inspector/introspect.py ironic_inspector/introspection_state.py ironic_inspector/main.py ironic_inspector/node_cache.py ironic_inspector/policy.py ironic_inspector/process.py ironic_inspector/rules.py ironic_inspector/utils.py ironic_inspector/version.py ironic_inspector/wsgi_service.py ironic_inspector.egg-info/PKG-INFO ironic_inspector.egg-info/SOURCES.txt ironic_inspector.egg-info/dependency_links.txt ironic_inspector.egg-info/entry_points.txt ironic_inspector.egg-info/not-zip-safe ironic_inspector.egg-info/pbr.json ironic_inspector.egg-info/requires.txt ironic_inspector.egg-info/top_level.txt ironic_inspector/cmd/__init__.py ironic_inspector/cmd/all.py ironic_inspector/common/__init__.py ironic_inspector/common/context.py ironic_inspector/common/i18n.py ironic_inspector/common/ironic.py ironic_inspector/common/keystone.py ironic_inspector/common/lldp_parsers.py ironic_inspector/common/lldp_tlvs.py ironic_inspector/common/service_utils.py ironic_inspector/common/swift.py ironic_inspector/conf/__init__.py ironic_inspector/conf/capabilities.py ironic_inspector/conf/default.py ironic_inspector/conf/discovery.py ironic_inspector/conf/dnsmasq_pxe_filter.py ironic_inspector/conf/iptables.py ironic_inspector/conf/ironic.py ironic_inspector/conf/opts.py ironic_inspector/conf/pci_devices.py ironic_inspector/conf/processing.py ironic_inspector/conf/pxe_filter.py ironic_inspector/conf/swift.py ironic_inspector/locale/en_GB/LC_MESSAGES/ironic_inspector.po ironic_inspector/migrations/env.py ironic_inspector/migrations/script.py.mako ironic_inspector/migrations/versions/18440d0834af_introducing_the_aborting_state.py ironic_inspector/migrations/versions/578f84f38d_inital_db_schema.py ironic_inspector/migrations/versions/882b2d84cb1b_attribute_constraints_relaxing.py ironic_inspector/migrations/versions/d00d6e3f38c4_change_created_finished_at_type.py ironic_inspector/migrations/versions/d2e48801c8ef_introducing_node_state_attribute.py ironic_inspector/migrations/versions/d588418040d_add_rules.py ironic_inspector/migrations/versions/e169a4a81d88_add_invert_field_to_rule_condition.py ironic_inspector/plugins/__init__.py ironic_inspector/plugins/base.py ironic_inspector/plugins/capabilities.py ironic_inspector/plugins/discovery.py ironic_inspector/plugins/example.py ironic_inspector/plugins/extra_hardware.py ironic_inspector/plugins/lldp_basic.py ironic_inspector/plugins/local_link_connection.py ironic_inspector/plugins/pci_devices.py ironic_inspector/plugins/raid_device.py ironic_inspector/plugins/rules.py ironic_inspector/plugins/standard.py ironic_inspector/pxe_filter/__init__.py ironic_inspector/pxe_filter/base.py ironic_inspector/pxe_filter/dnsmasq.py ironic_inspector/pxe_filter/interface.py ironic_inspector/pxe_filter/iptables.py ironic_inspector/test/__init__.py ironic_inspector/test/base.py ironic_inspector/test/functional.py ironic_inspector/test/unit/__init__.py ironic_inspector/test/unit/policy_fixture.py ironic_inspector/test/unit/test_api_tools.py ironic_inspector/test/unit/test_common_ironic.py ironic_inspector/test/unit/test_db.py ironic_inspector/test/unit/test_dnsmasq_pxe_filter.py ironic_inspector/test/unit/test_introspect.py ironic_inspector/test/unit/test_iptables.py ironic_inspector/test/unit/test_keystone.py ironic_inspector/test/unit/test_main.py ironic_inspector/test/unit/test_migrations.py ironic_inspector/test/unit/test_node_cache.py ironic_inspector/test/unit/test_plugins_base.py ironic_inspector/test/unit/test_plugins_capabilities.py ironic_inspector/test/unit/test_plugins_discovery.py ironic_inspector/test/unit/test_plugins_extra_hardware.py ironic_inspector/test/unit/test_plugins_lldp_basic.py ironic_inspector/test/unit/test_plugins_local_link_connection.py ironic_inspector/test/unit/test_plugins_pci_devices.py ironic_inspector/test/unit/test_plugins_raid_device.py ironic_inspector/test/unit/test_plugins_rules.py ironic_inspector/test/unit/test_plugins_standard.py ironic_inspector/test/unit/test_process.py ironic_inspector/test/unit/test_pxe_filter.py ironic_inspector/test/unit/test_rules.py ironic_inspector/test/unit/test_swift.py ironic_inspector/test/unit/test_utils.py ironic_inspector/test/unit/test_wsgi_service.py playbooks/legacy/ironic-inspector-grenade-dsvm/post.yaml playbooks/legacy/ironic-inspector-grenade-dsvm/run.yaml playbooks/legacy/ironic-inspector-tempest-dsvm-discovery/post.yaml playbooks/legacy/ironic-inspector-tempest-dsvm-discovery/run.yaml playbooks/legacy/ironic-inspector-tempest-dsvm-python3/post.yaml playbooks/legacy/ironic-inspector-tempest-dsvm-python3/run.yaml releasenotes/notes/.placeholder releasenotes/notes/Inspector_rules_API_does_not_return_all_attributes-98a9765726c405d5.yaml releasenotes/notes/Reapply_update_started_at-8af8cf254cdf8cde.yaml releasenotes/notes/UUID-started_at-finished_at-in-the-status-API-7860312102923938.yaml releasenotes/notes/abort-introspection-ae5cb5a9fbacd2ac.yaml releasenotes/notes/active_states_timeout-3e3ab110870483ec.yaml releasenotes/notes/add-disabled-option-to-add-ports-f8c6c9b3e6797652.yaml releasenotes/notes/add-lldp-basic-plugin-98aebcf43e60931b.yaml releasenotes/notes/add-lldp-plugin-4645596cb8b39fd3.yaml releasenotes/notes/add-lldp-plugin-dependency-c323412654f71b3e.yaml releasenotes/notes/add-node-state-to-introspection-api-response-85fb7f4e72ae386a.yaml releasenotes/notes/add-support-for-listing-all-introspection-statuses-2a3d4379c3854894.yaml releasenotes/notes/add-support-for-long-running-ramdisk-ffee3c177c56cebb.yaml releasenotes/notes/add_node-with-version_id-24f51e5888480aa0.yaml releasenotes/notes/allow-periodics-shutdown-inspector-ac28ea5ba3224279.yaml releasenotes/notes/bmc-logging-deprecation-4ca046a64fac6f11.yaml releasenotes/notes/capabilities-15cc2268d661f0a0.yaml releasenotes/notes/change_started_finished_at_type_to_datetime-c5617e598350970c.yaml releasenotes/notes/check-formatted-value-from-nonstring-3d851cb42ce3a0ac.yaml releasenotes/notes/compact-debug-logging-b15dd9bbdd3ce27a.yaml releasenotes/notes/contains-matches-ee28958b08995494.yaml releasenotes/notes/continue-http-500-62f33d425aade9d7.yaml releasenotes/notes/cors-5f345c65da7f5c99.yaml releasenotes/notes/custom-ramdisk-log-name-dac06822c38657e7.yaml releasenotes/notes/db-status-consistency-enhancements-f97fbaccfc81a60b.yaml releasenotes/notes/deprecate-rollback-dea95ac515d3189b.yaml releasenotes/notes/deprecate-root-device-hint-909d389b7efed5da.yaml releasenotes/notes/deprecate-setting-ipmi-creds-1581ddc63b273811.yaml releasenotes/notes/deprecated-options-removal-ocata-a44dadf3bcf8d6fc.yaml releasenotes/notes/disable-dhcp-c86a3a0ee2696ee0.yaml releasenotes/notes/dnsmasq-pxe-filter-37928d3fdb1e8ec3.yaml releasenotes/notes/drop-maintenance-a9a87a9a2af051ad.yaml releasenotes/notes/edeploy-typeerror-6486e31923d91666.yaml releasenotes/notes/empty-condition-abc707b771be6be3.yaml releasenotes/notes/empty-ipmi-address-2-4d57c34aec7d14e2.yaml releasenotes/notes/empty-ipmi-address-5b5ca186a066ed32.yaml releasenotes/notes/enroll-hook-d8c32eba70848210.yaml releasenotes/notes/extend-rules-9a9d38701e970611.yaml releasenotes/notes/extra-hardware-swift-aeebf299b9605bb0.yaml releasenotes/notes/firewall-refactoring-17e8ad764f2cde8d.yaml releasenotes/notes/firewall-rerun-f2d0f64cca2698ff.yaml releasenotes/notes/fix-CalledProcessError-on-startup-28d9dbed85a81542.yaml releasenotes/notes/fix-crash-when-use-postgresql-ac6c708f48f55c83.yaml releasenotes/notes/fix-deadlock-during-cleanup-bcb6b517ef299791.yaml releasenotes/notes/fix-llc-switch-id-not-mac-e2de3adc0945ee70.yaml releasenotes/notes/fix-mysql-6b79049fe96edae4.yaml releasenotes/notes/fix-periodic-tasks-configuration-edd167f0146e60b5.yaml releasenotes/notes/fix-rules-endpoint-response-d60984c40d927c1f.yaml releasenotes/notes/fix-wrong-provision-state-name-150c91c48d471bf9.yaml releasenotes/notes/fix_llc_hook_bugs-efeea008c2f792eb.yaml releasenotes/notes/fix_llc_port_assume-4ea47d26501bddc3.yaml releasenotes/notes/flask-debug-6d2dcc2b482324dc.yaml releasenotes/notes/futurist-557fcd18d4eaf1c1.yaml releasenotes/notes/googbye-patches-args-071532024b9260bd.yaml releasenotes/notes/hook-deps-83a867c7af0300e4.yaml releasenotes/notes/infiniband-support-960d6846e326dec4.yaml releasenotes/notes/introspection-delay-drivers-deprecation-1d0c25b112fbd4da.yaml releasenotes/notes/introspection-state-03538fac198882b6.yaml releasenotes/notes/ipa-inventory-0a1e8d644da850ff.yaml releasenotes/notes/ipa-support-7eea800306829a49.yaml releasenotes/notes/ipmi-credentials-removal-0021f89424fbf7a3.yaml releasenotes/notes/ironic-lib-hints-20412a1c7fa796e0.yaml releasenotes/notes/is-empty-missing-a590d580cb62761d.yaml releasenotes/notes/keystone-noauth-9ba5ad9884c6273c.yaml releasenotes/notes/keystoneauth-plugins-aab6cbe1d0e884bf.yaml releasenotes/notes/ksadapters-abc9edc63cafa405.yaml releasenotes/notes/less-iptables-calls-759e89d103df504c.yaml releasenotes/notes/local_gb-250bd415684a7855.yaml releasenotes/notes/log-info-not-found-cache-error-afbc87e80305ca5c.yaml releasenotes/notes/logs-collector-logging-356e56cd70a04a2b.yaml releasenotes/notes/lookup-all-macs-eead528c0b764ad7.yaml releasenotes/notes/loopback-bmc-e60d64fe74bdf142.yaml releasenotes/notes/migrations-autogenerate-4303fd496c3c2757.yaml releasenotes/notes/missing-pxe-mac-d9329dab85513460.yaml releasenotes/notes/multiattribute_node_lookup-17e219ba8d3e5eb0.yaml releasenotes/notes/names-82d9f84153a228ec.yaml releasenotes/notes/no-downgrade-migrations-514bf872d9f944ed.yaml releasenotes/notes/no-fail-on-power-off-enroll-node-e40854f6def397b8.yaml releasenotes/notes/no-logs-stored-data-6db52934c7f9a91a.yaml releasenotes/notes/no-old-ramdisk-095b05e1245131d8.yaml releasenotes/notes/no-rollback-e15bc7fee0134545.yaml releasenotes/notes/no-root_device_hint-0e7676d481d503bb.yaml releasenotes/notes/node-locking-4d135ca5b93524b1.yaml releasenotes/notes/optional-root-disk-9b972f504b2e6262.yaml releasenotes/notes/patch-head-backslash-24bcdd03ba254bf2.yaml releasenotes/notes/pci_devices-plugin-5b93196e0e973155.yaml releasenotes/notes/pgsql-imperative-enum-dda76f150a205d0a.yaml releasenotes/notes/policy-engine-c44828e3131e6c62.yaml releasenotes/notes/port-creation-plugin-c0405ec646b1051d.yaml releasenotes/notes/port-list-retry-745d1cf41780e961.yaml releasenotes/notes/preprocessing-error-01e55b4db20fb7fc.yaml releasenotes/notes/processing-data-type-check-7c914339d3ab15ba.yaml releasenotes/notes/processing-logging-e2d27bbac95a7213.yaml releasenotes/notes/pxe-enabled-cbc3287ebe3fcd49.yaml releasenotes/notes/ramdisk-logs-on-all-failures-24da41edf3a98400.yaml releasenotes/notes/reapply-introspection-5edbbfaf498dbd12.yaml releasenotes/notes/remove-deprecated-conf-opts-361ab0bb342f0e7e.yaml releasenotes/notes/remove-policy-json-b4746d64c1511023.yaml releasenotes/notes/rollback-formatting-7d61c9af2600d42f.yaml releasenotes/notes/rollback-removal-a03a989e2e9f776b.yaml releasenotes/notes/rules-invert-2585173a11db3c31.yaml releasenotes/notes/set-node-to-error-when-swift-failure-3e919ecbf9db6401.yaml releasenotes/notes/size-hint-ea2a264468e1fcb7.yaml releasenotes/notes/sphinx-docs-4d0a5886261e57bf.yaml releasenotes/notes/status-removal-fa1d9a98ffad9f60.yaml releasenotes/notes/tempest_plugin_removal-91a01f5950f543e1.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po rootwrap.d/ironic-inspector-firewall.filters tools/states_to_dot.py tools/test-setup.sh zuul.d/legacy-ironic-inspector-jobs.yaml zuul.d/project.yamlironic-inspector-7.2.0/ironic_inspector.egg-info/not-zip-safe0000664000175100017510000000000113241323720024321 0ustar zuulzuul00000000000000 ironic-inspector-7.2.0/ironic_inspector.egg-info/pbr.json0000664000175100017510000000005613241324013023546 0ustar zuulzuul00000000000000{"git_version": "a8d621f", "is_release": true}ironic-inspector-7.2.0/ironic_inspector.egg-info/top_level.txt0000664000175100017510000000002113241324013024612 0ustar zuulzuul00000000000000ironic_inspector ironic-inspector-7.2.0/ironic_inspector.egg-info/PKG-INFO0000664000175100017510000000514113241324013023165 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: ironic-inspector Version: 7.2.0 Summary: Hardware introspection for OpenStack Bare Metal Home-page: https://docs.openstack.org/ironic-inspector/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: Apache-2 Description-Content-Type: UNKNOWN Description: =============================================== Hardware introspection for OpenStack Bare Metal =============================================== Introduction ============ .. image:: https://governance.openstack.org/tc/badges/ironic-inspector.svg :target: https://governance.openstack.org/tc/reference/tags/index.html This is an auxiliary service for discovering hardware properties for a node managed by `Ironic`_. Hardware introspection or hardware properties discovery is a process of getting hardware parameters required for scheduling from a bare metal node, given it's power management credentials (e.g. IPMI address, user name and password). * Free software: Apache license * Source: https://git.openstack.org/cgit/openstack/ironic-inspector * Bugs: https://bugs.launchpad.net/ironic-inspector * Downloads: https://pypi.python.org/pypi/ironic-inspector * Documentation: https://docs.openstack.org/ironic-inspector/latest/ * Python client library and CLI tool: `python-ironic-inspector-client `_ (`documentation `_). .. _Ironic: https://wiki.openstack.org/wiki/Ironic .. note:: **ironic-inspector** was called *ironic-discoverd* before version 2.0.0. Release Notes ============= For information on any current or prior version, see `the release notes`_. .. _the release notes: https://docs.openstack.org/releasenotes/ironic-inspector/ Platform: UNKNOWN Classifier: Environment :: Console Classifier: Environment :: OpenStack Classifier: Intended Audience :: System Administrators Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 ironic-inspector-7.2.0/ironic_inspector.egg-info/requires.txt0000664000175100017510000000131013241324013024462 0ustar zuulzuul00000000000000automaton>=1.9.0 alembic>=0.8.10 Babel!=2.4.0,>=2.3.4 construct>=2.8.10 eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 Flask!=0.11,<1.0,>=0.10 futurist>=1.2.0 ironic-lib>=2.5.0 jsonpath-rw<2.0,>=1.2.0 jsonschema<3.0.0,>=2.6.0 keystoneauth1>=3.3.0 keystonemiddleware>=4.17.0 netaddr>=0.7.18 pbr!=2.1.0,>=2.0.0 python-ironicclient>=2.2.0 python-swiftclient>=3.2.0 pytz>=2013.6 oslo.concurrency>=3.25.0 oslo.config>=5.1.0 oslo.context>=2.19.2 oslo.db>=4.27.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.middleware>=3.31.0 oslo.policy>=1.30.0 oslo.rootwrap>=5.8.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.utils>=3.33.0 retrying!=1.3.0,>=1.2.3 six>=1.10.0 stevedore>=1.20.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 ironic-inspector-7.2.0/policy-generator.conf0000666000175100017510000000011413241323457021167 0ustar zuulzuul00000000000000[DEFAULT] output_file = policy.yaml.sample namespace = ironic_inspector.api ironic-inspector-7.2.0/.stestr.conf0000666000175100017510000000011313241323457017305 0ustar zuulzuul00000000000000[DEFAULT] test_path=${TESTS_DIR:-./ironic_inspector/test/unit/} top_dir=./ ironic-inspector-7.2.0/babel.cfg0000666000175100017510000000002013241323457016557 0ustar zuulzuul00000000000000[python: **.py] ironic-inspector-7.2.0/PKG-INFO0000664000175100017510000000514113241324014016123 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: ironic-inspector Version: 7.2.0 Summary: Hardware introspection for OpenStack Bare Metal Home-page: https://docs.openstack.org/ironic-inspector/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: Apache-2 Description-Content-Type: UNKNOWN Description: =============================================== Hardware introspection for OpenStack Bare Metal =============================================== Introduction ============ .. image:: https://governance.openstack.org/tc/badges/ironic-inspector.svg :target: https://governance.openstack.org/tc/reference/tags/index.html This is an auxiliary service for discovering hardware properties for a node managed by `Ironic`_. Hardware introspection or hardware properties discovery is a process of getting hardware parameters required for scheduling from a bare metal node, given it's power management credentials (e.g. IPMI address, user name and password). * Free software: Apache license * Source: https://git.openstack.org/cgit/openstack/ironic-inspector * Bugs: https://bugs.launchpad.net/ironic-inspector * Downloads: https://pypi.python.org/pypi/ironic-inspector * Documentation: https://docs.openstack.org/ironic-inspector/latest/ * Python client library and CLI tool: `python-ironic-inspector-client `_ (`documentation `_). .. _Ironic: https://wiki.openstack.org/wiki/Ironic .. note:: **ironic-inspector** was called *ironic-discoverd* before version 2.0.0. Release Notes ============= For information on any current or prior version, see `the release notes`_. .. _the release notes: https://docs.openstack.org/releasenotes/ironic-inspector/ Platform: UNKNOWN Classifier: Environment :: Console Classifier: Environment :: OpenStack Classifier: Intended Audience :: System Administrators Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 ironic-inspector-7.2.0/config-generator.conf0000666000175100017510000000031613241323457021141 0ustar zuulzuul00000000000000[DEFAULT] output_file = example.conf namespace = ironic_inspector namespace = keystonemiddleware.auth_token namespace = oslo.db namespace = oslo.log namespace = oslo.middleware.cors namespace = oslo.policy