Naming clean up

This refactoring patch fixes a few naming problems:
* namespace for processing hooks implied that it's the only type
  of hooks, added ".processing" postfix
* node_info -> introspection_data
* cached_node -> node_info

Change-Id: I8401f95236e269583257c3c5ba3762d0920d32e8
Implements: blueprint plugin-interface-v2
This commit is contained in:
Dmitry Tantsur 2015-06-08 16:43:08 +02:00
parent 3ac546b157
commit d09da96e02
13 changed files with 194 additions and 177 deletions

View File

@ -94,52 +94,52 @@ def introspect(uuid, new_ipmi_credentials=None):
raise utils.Error(msg % {'node': node.uuid,
'reason': validation.power['reason']})
cached_node = node_cache.add_node(node.uuid,
bmc_address=utils.get_ipmi_address(node))
cached_node.set_option('new_ipmi_credentials', new_ipmi_credentials)
node_info = node_cache.add_node(node.uuid,
bmc_address=utils.get_ipmi_address(node))
node_info.set_option('new_ipmi_credentials', new_ipmi_credentials)
def _handle_exceptions():
try:
_background_introspect(ironic, cached_node)
_background_introspect(ironic, node_info)
except utils.Error as exc:
cached_node.finished(error=str(exc))
node_info.finished(error=str(exc))
except Exception as exc:
msg = _('Unexpected exception in background introspection thread')
LOG.exception(msg)
cached_node.finished(error=msg)
node_info.finished(error=msg)
utils.spawn_n(_handle_exceptions)
def _background_introspect(ironic, cached_node):
def _background_introspect(ironic, node_info):
# TODO(dtantsur): pagination
macs = [p.address for p in cached_node.ports(ironic)]
macs = [p.address for p in node_info.ports(ironic)]
if macs:
cached_node.add_attribute(node_cache.MACS_ATTRIBUTE, macs)
node_info.add_attribute(node_cache.MACS_ATTRIBUTE, macs)
LOG.info(_LI('Whitelisting MAC\'s %(macs)s for node %(node)s on the'
' firewall') %
{'macs': macs, 'node': cached_node.uuid})
{'macs': macs, 'node': node_info.uuid})
firewall.update_filters(ironic)
if not cached_node.options.get('new_ipmi_credentials'):
if not node_info.options.get('new_ipmi_credentials'):
try:
utils.retry_on_conflict(ironic.node.set_boot_device,
cached_node.uuid, 'pxe', persistent=False)
node_info.uuid, 'pxe', persistent=False)
except Exception as exc:
LOG.warning(_LW('Failed to set boot device to PXE for'
' node %(node)s: %(exc)s') %
{'node': cached_node.uuid, 'exc': exc})
{'node': node_info.uuid, 'exc': exc})
try:
utils.retry_on_conflict(ironic.node.set_power_state,
cached_node.uuid, 'reboot')
node_info.uuid, 'reboot')
except Exception as exc:
raise utils.Error(_('Failed to power on node %(node)s,'
' check it\'s power '
'management configuration:\n%(exc)s')
% {'node': cached_node.uuid, 'exc': exc})
% {'node': node_info.uuid, 'exc': exc})
else:
LOG.info(_LI('Introspection environment is ready for node %(node)s, '
'manual power on is required within %(timeout)d seconds') %
{'node': cached_node.uuid,
{'node': node_info.uuid,
'timeout': CONF.timeout})

View File

@ -27,17 +27,17 @@ CONF = cfg.CONF
class ProcessingHook(object): # pragma: no cover
"""Abstract base class for introspection data processing hooks."""
def before_processing(self, node_info):
def before_processing(self, introspection_data):
"""Hook to run before any other data processing.
This hook is run even before sanity checks.
:param node_info: raw information sent by the ramdisk, may be modified
by the hook.
:param introspection_data: raw information sent by the ramdisk,
may be modified by the hook.
:returns: nothing.
"""
def before_update(self, node, ports, node_info):
def before_update(self, node, ports, introspection_data):
"""Hook to run before Ironic node update.
This hook is run after node is found and ports are created,
@ -47,7 +47,7 @@ class ProcessingHook(object): # pragma: no cover
be modified directly by the hook.
:param ports: Ironic ports created by inspector, also should not be
updated directly.
:param node_info: processed data from the ramdisk.
:param introspection_data: processed data from the ramdisk.
:returns: tuple (node patches, port patches) where
*node_patches* is a list of JSON patches [RFC 6902] to apply
to the node, *port_patches* is a dict where keys are
@ -76,9 +76,10 @@ def processing_hooks_manager(*args):
names = [x.strip()
for x in CONF.processing.processing_hooks.split(',')
if x.strip()]
_HOOKS_MGR = named.NamedExtensionManager('ironic_inspector.hooks',
names=names,
invoke_on_load=True,
invoke_args=args,
name_order=True)
_HOOKS_MGR = named.NamedExtensionManager(
'ironic_inspector.hooks.processing',
names=names,
invoke_on_load=True,
invoke_args=args,
name_order=True)
return _HOOKS_MGR

View File

@ -28,16 +28,16 @@ LOG = logging.getLogger('ironic_inspector.plugins.edeploy')
class eDeployHook(base.ProcessingHook):
"""Processing hook for saving additional data from eDeploy ramdisk."""
def before_update(self, node, ports, node_info):
def before_update(self, node, ports, introspection_data):
"""Store the hardware data from what has been discovered."""
if 'data' not in node_info:
if 'data' not in introspection_data:
LOG.warning(_LW('No eDeploy data was received from the ramdisk'))
return [], {}
# (trown) it is useful for the edeploy report tooling to have the node
# uuid stored with the other edeploy_facts
node_info['data'].append(['system', 'product',
'ironic_uuid', node.uuid])
introspection_data['data'].append(['system', 'product',
'ironic_uuid', node.uuid])
return [{'op': 'add',
'path': '/extra/edeploy_facts',
'value': node_info['data']}], {}
'value': introspection_data['data']}], {}

View File

@ -22,8 +22,8 @@ LOG = logging.getLogger('ironic_inspector.plugins.example')
class ExampleProcessingHook(base.ProcessingHook): # pragma: no cover
def before_processing(self, node_info):
LOG.debug('before_processing: %s', node_info)
def before_processing(self, introspection_data):
LOG.debug('before_processing: %s', introspection_data)
def before_update(self, node, ports, node_info):
LOG.debug('before_update: %s (node %s)', node_info, node.uuid)
def before_update(self, node, ports, introspection_data):
LOG.debug('before_update: %s (node %s)', introspection_data, node.uuid)

View File

@ -44,15 +44,15 @@ class RootDeviceHintHook(base.ProcessingHook):
the plugin needs to take precedence over the standard plugin.
"""
def before_processing(self, node_info):
"""Adds fake local_gb value if it's missing from node_info."""
if not node_info.get('local_gb'):
def before_processing(self, introspection_data):
"""Adds fake local_gb value if it's missing from introspection_data."""
if not introspection_data.get('local_gb'):
LOG.info(_LI('No volume is found on the node. Adding a fake '
'value for "local_gb"'))
node_info['local_gb'] = 1
introspection_data['local_gb'] = 1
def before_update(self, node, ports, node_info):
if 'block_devices' not in node_info:
def before_update(self, node, ports, introspection_data):
if 'block_devices' not in introspection_data:
LOG.warning(_LW('No block device was received from ramdisk'))
return [], {}
@ -63,7 +63,7 @@ class RootDeviceHintHook(base.ProcessingHook):
if 'block_devices' in node.extra:
# Compare previously discovered devices with the current ones
previous_devices = node.extra['block_devices']['serials']
current_devices = node_info['block_devices']['serials']
current_devices = introspection_data['block_devices']['serials']
new_devices = [device for device in current_devices
if device not in previous_devices]
@ -89,5 +89,5 @@ class RootDeviceHintHook(base.ProcessingHook):
return [
{'op': 'add',
'path': '/extra/block_devices',
'value': node_info['block_devices']}
'value': introspection_data['block_devices']}
], {}

View File

@ -37,9 +37,9 @@ class SchedulerHook(base.ProcessingHook):
KEYS = ('cpus', 'cpu_arch', 'memory_mb', 'local_gb')
def before_processing(self, node_info):
def before_processing(self, introspection_data):
"""Validate that required properties are provided by the ramdisk."""
missing = [key for key in self.KEYS if not node_info.get(key)]
missing = [key for key in self.KEYS if not introspection_data.get(key)]
if missing:
raise utils.Error(
_('The following required parameters are missing: %s') %
@ -47,13 +47,13 @@ class SchedulerHook(base.ProcessingHook):
LOG.info(_LI('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, '
'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'),
{key: node_info.get(key) for key in self.KEYS})
{key: introspection_data.get(key) for key in self.KEYS})
def before_update(self, node, ports, node_info):
def before_update(self, node, ports, introspection_data):
"""Update node with scheduler properties."""
overwrite = CONF.processing.overwrite_existing
patch = [{'op': 'add', 'path': '/properties/%s' % key,
'value': str(node_info[key])}
'value': str(introspection_data[key])}
for key in self.KEYS
if overwrite or not node.properties.get(key)]
return patch, {}
@ -77,18 +77,18 @@ class ValidateInterfacesHook(base.ProcessingHook):
'actual': CONF.processing.keep_ports})
sys.exit(1)
def before_processing(self, node_info):
def before_processing(self, introspection_data):
"""Validate information about network interfaces."""
bmc_address = node_info.get('ipmi_address')
if not node_info.get('interfaces'):
bmc_address = introspection_data.get('ipmi_address')
if not introspection_data.get('interfaces'):
raise utils.Error(_('No interfaces supplied by the ramdisk'))
valid_interfaces = {
n: iface for n, iface in node_info['interfaces'].items()
n: iface for n, iface in introspection_data['interfaces'].items()
if utils.is_valid_mac(iface.get('mac'))
}
pxe_mac = node_info.get('boot_interface')
pxe_mac = introspection_data.get('boot_interface')
if CONF.processing.add_ports == 'pxe' and pxe_mac:
LOG.info(_LI('PXE boot interface was %s'), pxe_mac)
@ -111,30 +111,32 @@ class ValidateInterfacesHook(base.ProcessingHook):
raise utils.Error(_('No valid interfaces found for node with '
'BMC %(ipmi_address)s, got %(interfaces)s') %
{'ipmi_address': bmc_address,
'interfaces': node_info['interfaces']})
elif valid_interfaces != node_info['interfaces']:
'interfaces': introspection_data['interfaces']})
elif valid_interfaces != introspection_data['interfaces']:
invalid = {n: iface
for n, iface in introspection_data['interfaces'].items()
if n not in valid_interfaces}
LOG.warning(_LW(
'The following interfaces were invalid or not eligible in '
'introspection data for node with BMC %(ipmi_address)s and '
'were excluded: %(invalid)s'),
{'invalid': {n: iface
for n, iface in node_info['interfaces'].items()
if n not in valid_interfaces},
'ipmi_address': bmc_address})
{'invalid': invalid, 'ipmi_address': bmc_address})
LOG.info(_LI('Eligible interfaces are %s'), valid_interfaces)
node_info['all_interfaces'] = node_info['interfaces']
node_info['interfaces'] = valid_interfaces
introspection_data['all_interfaces'] = introspection_data['interfaces']
introspection_data['interfaces'] = valid_interfaces
valid_macs = [iface['mac'] for iface in valid_interfaces.values()]
node_info['macs'] = valid_macs
introspection_data['macs'] = valid_macs
def before_update(self, node, ports, node_info):
def before_update(self, node, ports, introspection_data):
"""Drop ports that are not present in the data."""
if CONF.processing.keep_ports == 'present':
expected_macs = {iface['mac']
for iface in node_info['all_interfaces'].values()}
expected_macs = {
iface['mac']
for iface in introspection_data['all_interfaces'].values()
}
elif CONF.processing.keep_ports == 'added':
expected_macs = set(node_info['macs'])
expected_macs = set(introspection_data['macs'])
else:
return
@ -156,17 +158,17 @@ class RamdiskErrorHook(base.ProcessingHook):
DATETIME_FORMAT = '%Y.%m.%d_%H.%M.%S_%f'
def before_processing(self, node_info):
error = node_info.get('error')
logs = node_info.get('logs')
def before_processing(self, introspection_data):
error = introspection_data.get('error')
logs = introspection_data.get('logs')
if logs and (error or CONF.processing.always_store_ramdisk_logs):
self._store_logs(logs, node_info)
self._store_logs(logs, introspection_data)
if error:
raise utils.Error(_('Ramdisk reported error: %s') % error)
def _store_logs(self, logs, node_info):
def _store_logs(self, logs, introspection_data):
if not CONF.processing.ramdisk_logs_dir:
LOG.warn(_LW('Failed to store logs received from the ramdisk '
'because ramdisk_logs_dir configuration option '
@ -177,7 +179,7 @@ class RamdiskErrorHook(base.ProcessingHook):
os.makedirs(CONF.processing.ramdisk_logs_dir)
time_fmt = datetime.datetime.utcnow().strftime(self.DATETIME_FORMAT)
bmc_address = node_info.get('ipmi_address', 'unknown')
bmc_address = introspection_data.get('ipmi_address', 'unknown')
file_name = 'bmc_%s_%s' % (bmc_address, time_fmt)
with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name),
'wb') as fp:

View File

@ -31,7 +31,7 @@ _CREDENTIALS_WAIT_RETRIES = 10
_CREDENTIALS_WAIT_PERIOD = 3
def process(node_info):
def process(introspection_data):
"""Process data from the ramdisk.
This function heavily relies on the hooks to do the actual data processing.
@ -42,7 +42,7 @@ def process(node_info):
# NOTE(dtantsur): catch exceptions, so that we have changes to update
# node introspection status after look up
try:
hook_ext.obj.before_processing(node_info)
hook_ext.obj.before_processing(introspection_data)
except utils.Error as exc:
LOG.error(_LE('Hook %(hook)s failed, delaying error report '
'until node look up: %(error)s'),
@ -57,23 +57,23 @@ def process(node_info):
'in hook %s') % hook_ext.name)
try:
cached_node = node_cache.find_node(
bmc_address=node_info.get('ipmi_address'),
mac=node_info.get('macs'))
node_info = node_cache.find_node(
bmc_address=introspection_data.get('ipmi_address'),
mac=introspection_data.get('macs'))
except utils.Error as exc:
if failures:
failures.append(_('Look up error: %s') % exc)
cached_node = None
node_info = None
else:
raise
if failures and cached_node:
if failures and node_info:
msg = _('The following failures happened during running '
'pre-processing hooks for node %(uuid)s:\n%(failures)s') % {
'uuid': cached_node.uuid,
'uuid': node_info.uuid,
'failures': '\n'.join(failures)
}
cached_node.finished(error=_('Data pre-processing failed'))
node_info.finished(error=_('Data pre-processing failed'))
raise utils.Error(msg)
elif failures:
msg = _('The following failures happened during running '
@ -84,26 +84,26 @@ def process(node_info):
ironic = utils.get_client()
try:
node = cached_node.node(ironic)
node = node_info.node(ironic)
except exceptions.NotFound:
msg = (_('Node UUID %s was found in cache, but is not found in Ironic')
% cached_node.uuid)
cached_node.finished(error=msg)
% node_info.uuid)
node_info.finished(error=msg)
raise utils.Error(msg, code=404)
try:
return _process_node(ironic, node, node_info, cached_node)
return _process_node(ironic, node, introspection_data, node_info)
except utils.Error as exc:
cached_node.finished(error=str(exc))
node_info.finished(error=str(exc))
raise
except Exception as exc:
msg = _('Unexpected exception during processing')
LOG.exception(msg)
cached_node.finished(error=msg)
node_info.finished(error=msg)
raise utils.Error(msg)
def _run_post_hooks(node, ports, node_info):
def _run_post_hooks(node, ports, introspection_data):
hooks = plugins_base.processing_hooks_manager()
port_instances = list(ports.values())
@ -111,7 +111,7 @@ def _run_post_hooks(node, ports, node_info):
port_patches = {}
for hook_ext in hooks:
hook_patch = hook_ext.obj.before_update(node, port_instances,
node_info)
introspection_data)
if not hook_patch:
continue
@ -124,12 +124,13 @@ def _run_post_hooks(node, ports, node_info):
return node_patches, port_patches
def _process_node(ironic, node, node_info, cached_node):
def _process_node(ironic, node, introspection_data, node_info):
# NOTE(dtantsur): repeat the check in case something changed
utils.check_provision_state(node)
ports = {}
for mac in (node_info.get('macs') or ()):
ironic = utils.get_client()
for mac in (introspection_data.get('macs') or ()):
try:
port = ironic.port.create(node_uuid=node.uuid, address=mac)
ports[mac] = port
@ -139,9 +140,10 @@ def _process_node(ironic, node, node_info, cached_node):
'database - skipping') %
{'mac': mac, 'node': node.uuid})
node_patches, port_patches = _run_post_hooks(node, ports, node_info)
node_patches, port_patches = _run_post_hooks(node, ports,
introspection_data)
# Invalidate cache in case of hooks modifying options
cached_node.invalidate_cache()
node_info.invalidate_cache()
node = utils.retry_on_conflict(ironic.node.update, node.uuid, node_patches)
for mac, patches in port_patches.items():
@ -155,65 +157,66 @@ def _process_node(ironic, node, node_info, cached_node):
resp = {'uuid': node.uuid}
if cached_node.options.get('new_ipmi_credentials'):
if node_info.options.get('new_ipmi_credentials'):
new_username, new_password = (
cached_node.options.get('new_ipmi_credentials'))
node_info.options.get('new_ipmi_credentials'))
utils.spawn_n(_finish_set_ipmi_credentials,
ironic, node, cached_node, node_info,
ironic, node, node_info, introspection_data,
new_username, new_password)
resp['ipmi_setup_credentials'] = True
resp['ipmi_username'] = new_username
resp['ipmi_password'] = new_password
else:
utils.spawn_n(_finish, ironic, cached_node)
utils.spawn_n(_finish, ironic, node_info)
return resp
def _finish_set_ipmi_credentials(ironic, node, cached_node, node_info,
def _finish_set_ipmi_credentials(ironic, node, node_info, introspection_data,
new_username, new_password):
patch = [{'op': 'add', 'path': '/driver_info/ipmi_username',
'value': new_username},
{'op': 'add', 'path': '/driver_info/ipmi_password',
'value': new_password}]
if not utils.get_ipmi_address(node) and node_info.get('ipmi_address'):
if (not utils.get_ipmi_address(node) and
introspection_data.get('ipmi_address')):
patch.append({'op': 'add', 'path': '/driver_info/ipmi_address',
'value': node_info['ipmi_address']})
utils.retry_on_conflict(ironic.node.update, cached_node.uuid, patch)
'value': introspection_data['ipmi_address']})
utils.retry_on_conflict(ironic.node.update, node_info.uuid, patch)
for attempt in range(_CREDENTIALS_WAIT_RETRIES):
try:
# We use this call because it requires valid credentials.
# We don't care about boot device, obviously.
ironic.node.get_boot_device(cached_node.uuid)
ironic.node.get_boot_device(node_info.uuid)
except Exception as exc:
LOG.info(_LI('Waiting for credentials update on node %(node)s,'
' attempt %(attempt)d current error is %(exc)s') %
{'node': cached_node.uuid,
{'node': node_info.uuid,
'attempt': attempt, 'exc': exc})
eventlet.greenthread.sleep(_CREDENTIALS_WAIT_PERIOD)
else:
_finish(ironic, cached_node)
_finish(ironic, node_info)
return
msg = (_('Failed to validate updated IPMI credentials for node '
'%s, node might require maintenance') % cached_node.uuid)
cached_node.finished(error=msg)
'%s, node might require maintenance') % node_info.uuid)
node_info.finished(error=msg)
raise utils.Error(msg)
def _finish(ironic, cached_node):
LOG.debug('Forcing power off of node %s', cached_node.uuid)
def _finish(ironic, node_info):
LOG.debug('Forcing power off of node %s', node_info.uuid)
try:
utils.retry_on_conflict(ironic.node.set_power_state,
cached_node.uuid, 'off')
node_info.uuid, 'off')
except Exception as exc:
msg = (_('Failed to power off node %(node)s, check it\'s power '
'management configuration: %(exc)s') %
{'node': cached_node.uuid, 'exc': exc})
cached_node.finished(error=msg)
{'node': node_info.uuid, 'exc': exc})
node_info.finished(error=msg)
raise utils.Error(msg)
cached_node.finished()
node_info.finished()
LOG.info(_LI('Introspection finished successfully for node %s'),
cached_node.uuid)
node_info.uuid)

View File

@ -37,8 +37,8 @@ class BaseTest(test_base.NodeTest):
power_state='power on',
provision_state='foobar')
self.ports = [mock.Mock(address=m) for m in self.macs]
self.cached_node = mock.Mock(uuid=self.uuid, options={})
self.cached_node.ports.return_value = self.ports
self.node_info = mock.Mock(uuid=self.uuid, options={})
self.node_info.ports.return_value = self.ports
def _prepare(self, client_mock):
cli = client_mock.return_value
@ -56,7 +56,7 @@ class BaseTest(test_base.NodeTest):
class TestIntrospect(BaseTest):
def test_ok(self, client_mock, add_mock, filters_mock):
cli = self._prepare(client_mock)
add_mock.return_value = self.cached_node
add_mock.return_value = self.node_info
introspect.introspect(self.node.uuid)
@ -65,9 +65,9 @@ class TestIntrospect(BaseTest):
add_mock.assert_called_once_with(self.uuid,
bmc_address=self.bmc_address)
self.cached_node.ports.assert_called_once_with(cli)
self.cached_node.add_attribute.assert_called_once_with('mac',
self.macs)
self.node_info.ports.assert_called_once_with(cli)
self.node_info.add_attribute.assert_called_once_with('mac',
self.macs)
filters_mock.assert_called_with(cli)
cli.node.set_boot_device.assert_called_once_with(self.uuid,
'pxe',
@ -79,7 +79,7 @@ class TestIntrospect(BaseTest):
def test_ok_ilo_and_drac(self, client_mock, add_mock, filters_mock):
self._prepare(client_mock)
add_mock.return_value = self.cached_node
add_mock.return_value = self.node_info
for name in ('ilo_address', 'drac_host'):
self.node.driver_info = {name: self.bmc_address}
@ -96,13 +96,13 @@ class TestIntrospect(BaseTest):
None]
cli.node.set_power_state.side_effect = [exceptions.Conflict,
None]
add_mock.return_value = self.cached_node
add_mock.return_value = self.node_info
introspect.introspect(self.node.uuid)
cli.node.get.assert_called_once_with(self.uuid)
cli.node.validate.assert_called_with(self.uuid)
self.cached_node.ports.assert_called_once_with(cli)
self.node_info.ports.assert_called_once_with(cli)
add_mock.assert_called_once_with(self.uuid,
bmc_address=self.bmc_address)
@ -117,7 +117,7 @@ class TestIntrospect(BaseTest):
cli = self._prepare(client_mock)
cli.node.set_boot_device.side_effect = exceptions.BadRequest()
cli.node.set_power_state.side_effect = exceptions.BadRequest()
add_mock.return_value = self.cached_node
add_mock.return_value = self.node_info
introspect.introspect(self.node.uuid)
@ -135,7 +135,7 @@ class TestIntrospect(BaseTest):
def test_unexpected_error(self, client_mock, add_mock, filters_mock):
cli = self._prepare(client_mock)
add_mock.return_value = self.cached_node
add_mock.return_value = self.node_info
filters_mock.side_effect = RuntimeError()
introspect.introspect(self.node.uuid)
@ -176,15 +176,15 @@ class TestIntrospect(BaseTest):
def test_no_macs(self, client_mock, add_mock, filters_mock):
cli = self._prepare(client_mock)
self.ports[:] = []
add_mock.return_value = self.cached_node
add_mock.return_value = self.node_info
introspect.introspect(self.node.uuid)
self.cached_node.ports.assert_called_once_with(cli)
self.node_info.ports.assert_called_once_with(cli)
add_mock.assert_called_once_with(self.uuid,
bmc_address=self.bmc_address)
self.assertFalse(self.cached_node.add_attribute.called)
self.assertFalse(self.node_info.add_attribute.called)
self.assertFalse(filters_mock.called)
cli.node.set_boot_device.assert_called_once_with(self.uuid,
'pxe',
@ -204,7 +204,7 @@ class TestIntrospect(BaseTest):
'Cannot get node',
introspect.introspect, self.uuid)
self.assertEqual(0, self.cached_node.ports.call_count)
self.assertEqual(0, self.node_info.ports.call_count)
self.assertEqual(0, filters_mock.call_count)
self.assertEqual(0, cli.node.set_power_state.call_count)
self.assertFalse(add_mock.called)
@ -222,7 +222,7 @@ class TestIntrospect(BaseTest):
introspect.introspect, self.uuid)
cli.node.validate.assert_called_once_with(self.uuid)
self.assertEqual(0, self.cached_node.ports.call_count)
self.assertEqual(0, self.node_info.ports.call_count)
self.assertEqual(0, filters_mock.call_count)
self.assertEqual(0, cli.node.set_power_state.call_count)
self.assertFalse(add_mock.called)
@ -237,7 +237,7 @@ class TestIntrospect(BaseTest):
'node %s with provision state "active"' % self.uuid,
introspect.introspect, self.uuid)
self.assertEqual(0, self.cached_node.ports.call_count)
self.assertEqual(0, self.node_info.ports.call_count)
self.assertEqual(0, filters_mock.call_count)
self.assertEqual(0, cli.node.set_power_state.call_count)
self.assertFalse(add_mock.called)
@ -254,12 +254,12 @@ class TestSetIpmiCredentials(BaseTest):
CONF.set_override('enable_setting_ipmi_credentials', True,
'processing')
self.new_creds = ('user', 'password')
self.cached_node.options['new_ipmi_credentials'] = self.new_creds
self.node_info.options['new_ipmi_credentials'] = self.new_creds
self.node.maintenance = True
def test_ok(self, client_mock, add_mock, filters_mock):
cli = self._prepare(client_mock)
add_mock.return_value = self.cached_node
add_mock.return_value = self.node_info
introspect.introspect(self.uuid, new_ipmi_credentials=self.new_creds)
@ -289,7 +289,7 @@ class TestSetIpmiCredentials(BaseTest):
def test_default_username(self, client_mock, add_mock, filters_mock):
cli = self._prepare(client_mock)
add_mock.return_value = self.cached_node
add_mock.return_value = self.node_info
self.node.driver_info['ipmi_username'] = self.new_creds[0]
introspect.introspect(self.uuid,

View File

@ -170,11 +170,11 @@ class TestPlugins(unittest.TestCase):
plugins_base._HOOKS_MGR = None
CONF.set_override('processing_hooks', 'example', 'processing')
mgr = plugins_base.processing_hooks_manager()
mgr.map_method('before_processing', 'node_info')
mock_pre.assert_called_once_with(mock.ANY, 'node_info')
mgr.map_method('before_update', 'node', ['port'], 'node_info')
mgr.map_method('before_processing', 'introspection_data')
mock_pre.assert_called_once_with(mock.ANY, 'introspection_data')
mgr.map_method('before_update', 'node', ['port'], 'introspection_data')
mock_post.assert_called_once_with(mock.ANY, 'node', ['port'],
'node_info')
'introspection_data')
def test_manager_is_cached(self):
self.assertIs(plugins_base.processing_hooks_manager(),

View File

@ -22,11 +22,13 @@ class TestEdeploy(test_base.NodeTest):
self.hook = edeploy.eDeployHook()
def test_data_recieved(self):
node_info = {'data': [['memory', 'total', 'size', '4294967296'],
['cpu', 'physical', 'number', '1'],
['cpu', 'logical', 'number', '1']]}
self.hook.before_processing(node_info)
node_patches, _ = self.hook.before_update(self.node, None, node_info)
introspection_data = {
'data': [['memory', 'total', 'size', '4294967296'],
['cpu', 'physical', 'number', '1'],
['cpu', 'logical', 'number', '1']]}
self.hook.before_processing(introspection_data)
node_patches, _ = self.hook.before_update(self.node, None,
introspection_data)
expected_value = [['memory', 'total', 'size', '4294967296'],
['cpu', 'physical', 'number', '1'],
@ -40,7 +42,8 @@ class TestEdeploy(test_base.NodeTest):
node_patches[0]['value'])
def test_no_data_recieved(self):
node_info = {'cats': 'meow'}
self.hook.before_processing(node_info)
node_patches, _ = self.hook.before_update(self.node, None, node_info)
introspection_data = {'cats': 'meow'}
self.hook.before_processing(introspection_data)
node_patches, _ = self.hook.before_update(self.node, None,
introspection_data)
self.assertEqual(0, len(node_patches))

View File

@ -22,33 +22,35 @@ class TestRootDeviceHint(test_base.NodeTest):
self.hook = root_device_hint.RootDeviceHintHook()
def test_missing_local_gb(self):
node_info = {}
self.hook.before_processing(node_info)
introspection_data = {}
self.hook.before_processing(introspection_data)
self.assertEqual(1, node_info['local_gb'])
self.assertEqual(1, introspection_data['local_gb'])
def test_local_gb_not_changes(self):
node_info = {'local_gb': 42}
self.hook.before_processing(node_info)
introspection_data = {'local_gb': 42}
self.hook.before_processing(introspection_data)
self.assertEqual(42, node_info['local_gb'])
self.assertEqual(42, introspection_data['local_gb'])
def test_no_previous_block_devices(self):
node_info = {'block_devices': {'serials': ['foo', 'bar']}}
node_patches, _ = self.hook.before_update(self.node, None, node_info)
introspection_data = {'block_devices': {'serials': ['foo', 'bar']}}
node_patches, _ = self.hook.before_update(self.node, None,
introspection_data)
self.assertEqual('add',
node_patches[0]['op'])
self.assertEqual('/extra/block_devices',
node_patches[0]['path'])
self.assertEqual(node_info['block_devices'],
self.assertEqual(introspection_data['block_devices'],
node_patches[0]['value'])
def test_root_device_found(self):
self.node.extra['block_devices'] = {'serials': ['foo', 'bar']}
node_info = {'block_devices': {'serials': ['foo', 'baz']}}
self.hook.before_processing(node_info)
node_patches, _ = self.hook.before_update(self.node, None, node_info)
introspection_data = {'block_devices': {'serials': ['foo', 'baz']}}
self.hook.before_processing(introspection_data)
node_patches, _ = self.hook.before_update(self.node, None,
introspection_data)
self.assertEqual('remove',
node_patches[0]['op'])
@ -63,31 +65,37 @@ class TestRootDeviceHint(test_base.NodeTest):
def test_root_device_already_exposed(self):
self.node.properties['root_device'] = {'serial': 'foo'}
node_info = {'block_devices': {'serials': ['foo', 'baz']}}
self.hook.before_processing(node_info)
node_patches, _ = self.hook.before_update(self.node, None, node_info)
introspection_data = {'block_devices': {'serials': ['foo', 'baz']}}
self.hook.before_processing(introspection_data)
node_patches, _ = self.hook.before_update(self.node, None,
introspection_data)
self.assertEqual(0, len(node_patches))
def test_multiple_new_devices(self):
self.node.extra['block_devices'] = {'serials': ['foo', 'bar']}
node_info = {'block_devices': {'serials': ['foo', 'baz', 'qux']}}
self.hook.before_processing(node_info)
node_patches, _ = self.hook.before_update(self.node, None, node_info)
introspection_data = {
'block_devices': {'serials': ['foo', 'baz', 'qux']}
}
self.hook.before_processing(introspection_data)
node_patches, _ = self.hook.before_update(self.node, None,
introspection_data)
self.assertEqual(0, len(node_patches))
def test_no_new_devices(self):
self.node.extra['block_devices'] = {'serials': ['foo', 'bar']}
node_info = {'block_devices': {'serials': ['foo', 'bar']}}
self.hook.before_processing(node_info)
node_patches, _ = self.hook.before_update(self.node, None, node_info)
introspection_data = {'block_devices': {'serials': ['foo', 'bar']}}
self.hook.before_processing(introspection_data)
node_patches, _ = self.hook.before_update(self.node, None,
introspection_data)
self.assertEqual(0, len(node_patches))
def test_no_block_devices_from_ramdisk(self):
node_info = {}
self.hook.before_processing(node_info)
node_patches, _ = self.hook.before_update(self.node, None, node_info)
introspection_data = {}
self.hook.before_processing(introspection_data)
node_patches, _ = self.hook.before_update(self.node, None,
introspection_data)
self.assertEqual(0, len(node_patches))

View File

@ -321,8 +321,8 @@ class TestProcessNode(BaseTest):
self.data['macs'] = self.macs # validate_interfaces hook
self.data['all_interfaces'] = self.data['interfaces']
self.ports = self.all_ports
self.cached_node = node_cache.NodeInfo(uuid=self.uuid,
started_at=self.started_at)
self.node_info = node_cache.NodeInfo(uuid=self.uuid,
started_at=self.started_at)
self.patch_props = [
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
@ -347,7 +347,7 @@ class TestProcessNode(BaseTest):
def call(self, mock_cli):
mock_cli.return_value = self.cli
return process._process_node(self.cli, self.node, self.data,
self.cached_node)
self.node_info)
def test_return_includes_uuid(self, filters_mock, post_hook_mock):
ret_val = self.call()
@ -355,7 +355,7 @@ class TestProcessNode(BaseTest):
def test_return_includes_uuid_with_ipmi_creds(self, filters_mock,
post_hook_mock):
self.cached_node.set_option('new_ipmi_credentials', self.new_creds)
self.node_info.set_option('new_ipmi_credentials', self.new_creds)
ret_val = self.call()
self.assertEqual(self.uuid, ret_val.get('uuid'))
self.assertTrue(ret_val.get('ipmi_setup_credentials'))
@ -454,7 +454,7 @@ class TestProcessNode(BaseTest):
port_patch)
def test_set_ipmi_credentials(self, filters_mock, post_hook_mock):
self.cached_node.set_option('new_ipmi_credentials', self.new_creds)
self.node_info.set_option('new_ipmi_credentials', self.new_creds)
self.call()
@ -466,7 +466,7 @@ class TestProcessNode(BaseTest):
def test_set_ipmi_credentials_no_address(self, filters_mock,
post_hook_mock):
self.cached_node.set_option('new_ipmi_credentials', self.new_creds)
self.node_info.set_option('new_ipmi_credentials', self.new_creds)
del self.node.driver_info['ipmi_address']
self.patch_credentials.append({'op': 'add',
'path': '/driver_info/ipmi_address',
@ -483,7 +483,7 @@ class TestProcessNode(BaseTest):
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
def test_set_ipmi_credentials_timeout(self, finished_mock,
filters_mock, post_hook_mock):
self.cached_node.set_option('new_ipmi_credentials', self.new_creds)
self.node_info.set_option('new_ipmi_credentials', self.new_creds)
self.cli.node.get_boot_device.side_effect = RuntimeError('boom')
self.assertRaisesRegexp(utils.Error, 'Failed to validate',

View File

@ -34,7 +34,7 @@ setup(
'console_scripts': [
"ironic-inspector = ironic_inspector.main:main",
],
'ironic_inspector.hooks': [
'ironic_inspector.hooks.processing': [
"scheduler = ironic_inspector.plugins.standard:SchedulerHook",
"validate_interfaces = ironic_inspector.plugins.standard:ValidateInterfacesHook",
"ramdisk_error = ironic_inspector.plugins.standard:RamdiskErrorHook",