Track node identification during the whole processing

Currently our logging in processing is very inconsistent:
some log strings mention node UUID, some - node BMC IP, some nothing.
This change introduces a common prefix for all processing logs
based on as much information as possible.
Only code that actually have some context about the node (either
NodeInfo or introspection data) is updated.

Also logging BMC addresses can be disabled now.

Updates example.conf (a lot of updated comments from oslo).

Change-Id: Ib20f2acdc60bfaceed7a33467557b92857c32798
This commit is contained in:
Dmitry Tantsur 2016-01-08 14:00:09 +01:00
parent 1350b06b16
commit 0423d93736
16 changed files with 413 additions and 243 deletions

View File

@ -194,6 +194,7 @@ function configure_inspector {
mkdir_chown_stack "$IRONIC_INSPECTOR_RAMDISK_LOGDIR" mkdir_chown_stack "$IRONIC_INSPECTOR_RAMDISK_LOGDIR"
inspector_iniset processing ramdisk_logs_dir "$IRONIC_INSPECTOR_RAMDISK_LOGDIR" inspector_iniset processing ramdisk_logs_dir "$IRONIC_INSPECTOR_RAMDISK_LOGDIR"
inspector_iniset processing always_store_ramdisk_logs "$IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS" inspector_iniset processing always_store_ramdisk_logs "$IRONIC_INSPECTOR_ALWAYS_STORE_RAMDISK_LOGS"
inspector_iniset processing log_bmc_address False
} }
function configure_inspector_swift { function configure_inspector_swift {

View File

@ -84,47 +84,62 @@
# The name of a logging configuration file. This file is appended to # The name of a logging configuration file. This file is appended to
# any existing logging configuration files. For details about logging # any existing logging configuration files. For details about logging
# configuration files, see the Python logging module documentation. # configuration files, see the Python logging module documentation.
# (string value) # Note that when logging configuration files are used then all logging
# configuration is set in the configuration file and other logging
# configuration options are ignored (for example, log_format). (string
# value)
# Deprecated group/name - [DEFAULT]/log_config # Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None> #log_config_append = <None>
# DEPRECATED. A logging.Formatter log message format string which may # DEPRECATED. A logging.Formatter log message format string which may
# use any of the available logging.LogRecord attributes. This option # use any of the available logging.LogRecord attributes. This option
# is deprecated. Please use logging_context_format_string and # is deprecated. Please use logging_context_format_string and
# logging_default_format_string instead. (string value) # logging_default_format_string instead. This option is ignored if
# log_config_append is set. (string value)
#log_format = <None> #log_format = <None>
# Format string for %%(asctime)s in log records. Default: %(default)s # Format string for %%(asctime)s in log records. Default: %(default)s
# . (string value) # . This option is ignored if log_config_append is set. (string value)
#log_date_format = %Y-%m-%d %H:%M:%S #log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is set, # (Optional) Name of log file to output to. If no default is set,
# logging will go to stdout. (string value) # logging will go to stdout. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile # Deprecated group/name - [DEFAULT]/logfile
#log_file = <None> #log_file = <None>
# (Optional) The base directory used for relative --log-file paths. # (Optional) The base directory used for relative --log-file paths.
# (string value) # This option is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir # Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None> #log_dir = <None>
# (Optional) Uses logging handler designed to watch file system. When
# log file is moved or removed this handler will open a new log file
# with specified path instantaneously. It makes sense only if log-file
# option is specified and Linux platform is used. This option is
# ignored if log_config_append is set. (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and # Use syslog for logging. Existing syslog format is DEPRECATED and
# will be changed later to honor RFC5424. (boolean value) # will be changed later to honor RFC5424. This option is ignored if
# log_config_append is set. (boolean value)
#use_syslog = false #use_syslog = false
# (Optional) Enables or disables syslog rfc5424 format for logging. If # (Optional) Enables or disables syslog rfc5424 format for logging. If
# enabled, prefixes the MSG part of the syslog message with APP-NAME # enabled, prefixes the MSG part of the syslog message with APP-NAME
# (RFC5424). The format without the APP-NAME is deprecated in Kilo, # (RFC5424). The format without the APP-NAME is deprecated in Kilo,
# and will be removed in Mitaka, along with this option. (boolean # and will be removed in Mitaka, along with this option. This option
# value) # is ignored if log_config_append is set. (boolean value)
# This option is deprecated for removal. # This option is deprecated for removal.
# Its value may be silently ignored in the future. # Its value may be silently ignored in the future.
#use_syslog_rfc_format = true #use_syslog_rfc_format = true
# Syslog facility to receive log lines. (string value) # Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER #syslog_log_facility = LOG_USER
# Log output to standard error. (boolean value) # Log output to standard error. This option is ignored if
# log_config_append is set. (boolean value)
#use_stderr = true #use_stderr = true
# Format string to use for log messages with context. (string value) # Format string to use for log messages with context. (string value)
@ -141,7 +156,8 @@
# value) # value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# List of logger=LEVEL pairs. (list value) # List of logger=LEVEL pairs. This option is ignored if
# log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN
# Enables or disables publication of error events. (boolean value) # Enables or disables publication of error events. (boolean value)
@ -155,6 +171,10 @@
# (string value) # (string value)
#instance_uuid_format = "[instance: %(uuid)s] " #instance_uuid_format = "[instance: %(uuid)s] "
# Format string for user_identity field of the
# logging_context_format_string (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# Enables or disables fatal status of deprecations. (boolean value) # Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false #fatal_deprecations = false
@ -535,6 +555,14 @@
# Service tenant name. (string value) # Service tenant name. (string value)
#admin_tenant_name = admin #admin_tenant_name = admin
# Authentication type to load (unknown value)
# Deprecated group/name - [DEFAULT]/auth_plugin
#auth_type = <None>
# Config Section from which to load plugin specific options (unknown
# value)
#auth_section = <None>
[processing] [processing]
@ -615,6 +643,10 @@
# local_gb is calculated on the ramdisk side. (boolean value) # local_gb is calculated on the ramdisk side. (boolean value)
#disk_partitioning_spacing = true #disk_partitioning_spacing = true
# Whether to log node BMC address with every message during
# processing. (boolean value)
#log_bmc_address = true
[swift] [swift]

View File

@ -178,6 +178,10 @@ PROCESSING_OPTS = [
'partitioning. Only has effect when used with the IPA ' 'partitioning. Only has effect when used with the IPA '
'as a ramdisk, for older ramdisk local_gb is ' 'as a ramdisk, for older ramdisk local_gb is '
'calculated on the ramdisk side.'), 'calculated on the ramdisk side.'),
cfg.BoolOpt('log_bmc_address',
default=True,
help='Whether to log node BMC address with every message '
'during processing.'),
] ]

View File

@ -20,7 +20,6 @@ import time
from eventlet import semaphore from eventlet import semaphore
from ironicclient import exceptions from ironicclient import exceptions
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log
from ironic_inspector.common.i18n import _, _LI, _LW from ironic_inspector.common.i18n import _, _LI, _LW
from ironic_inspector import firewall from ironic_inspector import firewall
@ -30,7 +29,7 @@ from ironic_inspector import utils
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger("ironic_inspector.introspect") LOG = utils.getProcessingLogger(__name__)
PASSWORD_ACCEPTED_CHARS = set(string.ascii_letters + string.digits) PASSWORD_ACCEPTED_CHARS = set(string.ascii_letters + string.digits)
PASSWORD_MAX_LENGTH = 20 # IPMI v2.0 PASSWORD_MAX_LENGTH = 20 # IPMI v2.0
@ -47,20 +46,19 @@ def _validate_ipmi_credentials(node, new_ipmi_credentials):
if not new_username: if not new_username:
new_username = node.driver_info.get('ipmi_username') new_username = node.driver_info.get('ipmi_username')
if not new_username: if not new_username:
raise utils.Error(_('Setting IPMI credentials requested for node %s,' raise utils.Error(_('Setting IPMI credentials requested, but neither '
' but neither new user name nor' 'new user name nor driver_info[ipmi_username] '
' driver_info[ipmi_username] are provided') 'are provided'),
% node.uuid) node_info=node)
wrong_chars = {c for c in new_password wrong_chars = {c for c in new_password
if c not in PASSWORD_ACCEPTED_CHARS} if c not in PASSWORD_ACCEPTED_CHARS}
if wrong_chars: if wrong_chars:
raise utils.Error(_('Forbidden characters encountered in new IPMI ' raise utils.Error(_('Forbidden characters encountered in new IPMI '
'password for node %(node)s: "%(chars)s"; ' 'password: "%s"; use only letters and numbers')
'use only letters and numbers') % % ''.join(wrong_chars), node_info=node)
{'node': node.uuid, 'chars': ''.join(wrong_chars)})
if not 0 < len(new_password) <= PASSWORD_MAX_LENGTH: if not 0 < len(new_password) <= PASSWORD_MAX_LENGTH:
raise utils.Error(_('IPMI password length should be > 0 and <= %d') raise utils.Error(_('IPMI password length should be > 0 and <= %d')
% PASSWORD_MAX_LENGTH) % PASSWORD_MAX_LENGTH, node_info=node)
return new_username, new_password return new_username, new_password
@ -91,10 +89,9 @@ def introspect(uuid, new_ipmi_credentials=None, token=None):
else: else:
validation = ironic.node.validate(node.uuid) validation = ironic.node.validate(node.uuid)
if not validation.power['result']: if not validation.power['result']:
msg = _('Failed validation of power interface for node %(node)s, ' msg = _('Failed validation of power interface, reason: %s')
'reason: %(reason)s') raise utils.Error(msg % validation.power['reason'],
raise utils.Error(msg % {'node': node.uuid, node_info=node)
'reason': validation.power['reason']})
node_info = node_cache.add_node(node.uuid, node_info = node_cache.add_node(node.uuid,
bmc_address=utils.get_ipmi_address(node), bmc_address=utils.get_ipmi_address(node),
@ -105,10 +102,11 @@ def introspect(uuid, new_ipmi_credentials=None, token=None):
try: try:
_background_introspect(ironic, node_info) _background_introspect(ironic, node_info)
except utils.Error as exc: except utils.Error as exc:
# Logging has already happened in Error.__init__
node_info.finished(error=str(exc)) node_info.finished(error=str(exc))
except Exception as exc: except Exception as exc:
msg = _('Unexpected exception in background introspection thread') msg = _('Unexpected exception in background introspection thread')
LOG.exception(msg) LOG.exception(msg, node_info=node_info)
node_info.finished(error=msg) node_info.finished(error=msg)
utils.spawn_n(_handle_exceptions) utils.spawn_n(_handle_exceptions)
@ -141,42 +139,38 @@ def _background_introspect_locked(ironic, node_info):
macs = list(node_info.ports()) macs = list(node_info.ports())
if macs: if macs:
node_info.add_attribute(node_cache.MACS_ATTRIBUTE, macs) node_info.add_attribute(node_cache.MACS_ATTRIBUTE, macs)
LOG.info(_LI('Whitelisting MAC\'s %(macs)s for node %(node)s on the' LOG.info(_LI('Whitelisting MAC\'s %s on the firewall'), macs,
' firewall') % node_info=node_info)
{'macs': macs, 'node': node_info.uuid})
firewall.update_filters(ironic) firewall.update_filters(ironic)
attrs = node_info.attributes attrs = node_info.attributes
if CONF.processing.node_not_found_hook is None and not attrs: if CONF.processing.node_not_found_hook is None and not attrs:
raise utils.Error( raise utils.Error(
_('No lookup attributes were found for node %s, inspector won\'t ' _('No lookup attributes were found, inspector won\'t '
'be able to find it after introspection. Consider creating ' 'be able to find it after introspection, consider creating '
'ironic ports or providing an IPMI address.') % node_info.uuid) 'ironic ports or providing an IPMI address'),
node_info=node_info)
LOG.info(_LI('The following attributes will be used for looking up ' LOG.info(_LI('The following attributes will be used for look up: %s'),
'node %(uuid)s: %(attrs)s'), attrs, node_info=node_info)
{'attrs': attrs, 'uuid': node_info.uuid})
if not node_info.options.get('new_ipmi_credentials'): if not node_info.options.get('new_ipmi_credentials'):
try: try:
ironic.node.set_boot_device(node_info.uuid, 'pxe', ironic.node.set_boot_device(node_info.uuid, 'pxe',
persistent=False) persistent=False)
except Exception as exc: except Exception as exc:
LOG.warning(_LW('Failed to set boot device to PXE for' LOG.warning(_LW('Failed to set boot device to PXE: %s'),
' node %(node)s: %(exc)s') % exc, node_info=node_info)
{'node': node_info.uuid, 'exc': exc})
try: try:
ironic.node.set_power_state(node_info.uuid, 'reboot') ironic.node.set_power_state(node_info.uuid, 'reboot')
except Exception as exc: except Exception as exc:
raise utils.Error(_('Failed to power on node %(node)s,' raise utils.Error(_('Failed to power on the node, check it\'s '
' check it\'s power ' 'power management configuration: %s'),
'management configuration:\n%(exc)s') exc, node_info=node_info)
% {'node': node_info.uuid, 'exc': exc}) LOG.info(_LI('Introspection started successfully'),
LOG.info(_LI('Introspection started successfully for node %s'), node_info=node_info)
node_info.uuid)
else: else:
LOG.info(_LI('Introspection environment is ready for node %(node)s, ' LOG.info(_LI('Introspection environment is ready, manual power on is '
'manual power on is required within %(timeout)d seconds') % 'required within %d seconds'), CONF.timeout,
{'node': node_info.uuid, node_info=node_info)
'timeout': CONF.timeout})

View File

@ -42,7 +42,7 @@ CONF = cfg.CONF
app = flask.Flask(__name__) app = flask.Flask(__name__)
LOG = log.getLogger('ironic_inspector.main') LOG = utils.getProcessingLogger(__name__)
MINIMUM_API_VERSION = (1, 0) MINIMUM_API_VERSION = (1, 0)
CURRENT_API_VERSION = (1, 2) CURRENT_API_VERSION = (1, 2)
@ -172,7 +172,8 @@ def api_continue():
logged_data = {k: (v if k not in _LOGGING_EXCLUDED_KEYS else '<hidden>') logged_data = {k: (v if k not in _LOGGING_EXCLUDED_KEYS else '<hidden>')
for k, v in data.items()} for k, v in data.items()}
LOG.debug("Received data from the ramdisk: %s", logged_data) LOG.debug("Received data from the ramdisk: %s", logged_data,
data=data)
return flask.jsonify(process.process(data)) return flask.jsonify(process.process(data))

View File

@ -21,7 +21,6 @@ from ironicclient import exceptions
from oslo_concurrency import lockutils from oslo_concurrency import lockutils
from oslo_config import cfg from oslo_config import cfg
from oslo_db import exception as db_exc from oslo_db import exception as db_exc
from oslo_log import log
from oslo_utils import excutils from oslo_utils import excutils
from sqlalchemy import text from sqlalchemy import text
@ -32,7 +31,7 @@ from ironic_inspector import utils
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger("ironic_inspector.node_cache") LOG = utils.getProcessingLogger(__name__)
MACS_ATTRIBUTE = 'mac' MACS_ATTRIBUTE = 'mac'
@ -96,13 +95,13 @@ class NodeInfo(object):
if self._locked: if self._locked:
return True return True
LOG.debug('Attempting to acquire lock on node %s', self.uuid) LOG.debug('Attempting to acquire lock', node_info=self)
if self._lock.acquire(blocking): if self._lock.acquire(blocking):
self._locked = True self._locked = True
LOG.debug('Successfully acquired lock on node %s', self.uuid) LOG.debug('Successfully acquired lock', node_info=self)
return True return True
else: else:
LOG.debug('Unable to acquire lock on node %s', self.uuid) LOG.debug('Unable to acquire lock', node_info=self)
return False return False
def release_lock(self): def release_lock(self):
@ -111,7 +110,7 @@ class NodeInfo(object):
Does nothing if lock was not acquired using this NodeInfo object. Does nothing if lock was not acquired using this NodeInfo object.
""" """
if self._locked: if self._locked:
LOG.debug('Successfully released lock on node %s', self.uuid) LOG.debug('Successfully released lock', node_info=self)
self._lock.release() self._lock.release()
self._locked = False self._locked = False
@ -192,10 +191,11 @@ class NodeInfo(object):
session) session)
except db_exc.DBDuplicateEntry as exc: except db_exc.DBDuplicateEntry as exc:
LOG.error(_LE('Database integrity error %s during ' LOG.error(_LE('Database integrity error %s during '
'adding attributes'), exc) 'adding attributes'), exc, node_info=self)
raise utils.Error(_( raise utils.Error(_(
'Some or all of %(name)s\'s %(value)s are already ' 'Some or all of %(name)s\'s %(value)s are already '
'on introspection') % {'name': name, 'value': value}) 'on introspection') % {'name': name, 'value': value},
node_info=self)
# Invalidate attributes so they're loaded on next usage # Invalidate attributes so they're loaded on next usage
self._attributes = None self._attributes = None
@ -229,9 +229,8 @@ class NodeInfo(object):
if mac not in self.ports(): if mac not in self.ports():
self._create_port(mac) self._create_port(mac)
else: else:
LOG.warning( LOG.warning(_LW('Port %s already exists, skipping'),
_LW('Port %(mac)s already exists for node %(uuid)s, ' mac, node_info=self)
'skipping'), {'mac': mac, 'uuid': self.uuid})
def ports(self): def ports(self):
"""Get Ironic port objects associated with the cached node record. """Get Ironic port objects associated with the cached node record.
@ -249,9 +248,8 @@ class NodeInfo(object):
try: try:
port = self.ironic.port.create(node_uuid=self.uuid, address=mac) port = self.ironic.port.create(node_uuid=self.uuid, address=mac)
except exceptions.Conflict: except exceptions.Conflict:
LOG.warning( LOG.warning(_LW('Port %s already exists, skipping'),
_LW('Port %(mac)s already exists for node %(uuid)s, ' mac, node_info=self)
'skipping'), {'mac': mac, 'uuid': self.uuid})
# NOTE(dtantsur): we didn't get port object back, so we have to # NOTE(dtantsur): we didn't get port object back, so we have to
# reload ports on next access # reload ports on next access
self._ports = None self._ports = None
@ -266,8 +264,7 @@ class NodeInfo(object):
:param patches: JSON patches to apply :param patches: JSON patches to apply
:raises: ironicclient exceptions :raises: ironicclient exceptions
""" """
LOG.debug('Updating node %(uuid)s with patches %(patches)s', LOG.debug('Updating node with patches %s', patches, node_info=self)
{'uuid': self.uuid, 'patches': patches})
self._node = self.ironic.node.update(self.uuid, patches) self._node = self.ironic.node.update(self.uuid, patches)
def patch_port(self, port, patches): def patch_port(self, port, patches):
@ -280,9 +277,9 @@ class NodeInfo(object):
if isinstance(port, str): if isinstance(port, str):
port = ports[port] port = ports[port]
LOG.debug('Updating port %(mac)s of node %(uuid)s with patches ' LOG.debug('Updating port %(mac)s with patches %(patches)s',
'%(patches)s', {'mac': port.address, 'patches': patches},
{'mac': port.address, 'uuid': self.uuid, 'patches': patches}) node_info=self)
new_port = self.ironic.port.update(port.uuid, patches) new_port = self.ironic.port.update(port.uuid, patches)
ports[port.address] = new_port ports[port.address] = new_port

View File

@ -21,15 +21,15 @@ is stored in the 'inspector' container.
import json import json
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log
from ironic_inspector.common.i18n import _LW from ironic_inspector.common.i18n import _LW
from ironic_inspector.common import swift from ironic_inspector.common import swift
from ironic_inspector.plugins import base from ironic_inspector.plugins import base
from ironic_inspector import utils
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger('ironic_inspector.plugins.extra_hardware') LOG = utils.getProcessingLogger(__name__)
EDEPLOY_ITEM_SIZE = 4 EDEPLOY_ITEM_SIZE = 4
@ -52,7 +52,8 @@ class ExtraHardwareHook(base.ProcessingHook):
""" """
if 'data' not in introspection_data: if 'data' not in introspection_data:
LOG.warning(_LW('No extra hardware information was received from ' LOG.warning(_LW('No extra hardware information was received from '
'the ramdisk')) 'the ramdisk'),
node_info=node_info, data=introspection_data)
return return
data = introspection_data['data'] data = introspection_data['data']
@ -65,16 +66,19 @@ class ExtraHardwareHook(base.ProcessingHook):
# by rules. # by rules.
if self._is_edeploy_data(data): if self._is_edeploy_data(data):
LOG.debug('Extra hardware data is in eDeploy format, ' LOG.debug('Extra hardware data is in eDeploy format, '
'converting to usable format.') 'converting to usable format',
node_info=node_info, data=introspection_data)
introspection_data['extra'] = self._convert_edeploy_data(data) introspection_data['extra'] = self._convert_edeploy_data(data)
else: else:
LOG.warning(_LW('Extra hardware data was not in a recognised ' LOG.warning(_LW('Extra hardware data was not in a recognised '
'format (eDeploy), and will not be forwarded to ' 'format (eDeploy), and will not be forwarded to '
'introspection rules.')) 'introspection rules'),
node_info=node_info, data=introspection_data)
LOG.debug('Deleting \"data\" key from introspection data as it is ' LOG.debug('Deleting \"data\" key from introspection data as it is '
'assumed unusable by introspection rules. Raw data is ' 'assumed unusable by introspection rules. Raw data is '
'stored in swift.') 'stored in swift',
node_info=node_info, data=introspection_data)
del introspection_data['data'] del introspection_data['data']
node_info.patch([{'op': 'add', 'path': '/extra/hardware_swift_object', node_info.patch([{'op': 'add', 'path': '/extra/hardware_swift_object',

View File

@ -13,13 +13,12 @@
"""Gather root device hint from recognized block devices.""" """Gather root device hint from recognized block devices."""
from oslo_log import log
from ironic_inspector.common.i18n import _LI, _LW from ironic_inspector.common.i18n import _LI, _LW
from ironic_inspector.plugins import base from ironic_inspector.plugins import base
from ironic_inspector import utils
LOG = log.getLogger(__name__) LOG = utils.getProcessingLogger(__name__)
class RaidDeviceDetection(base.ProcessingHook): class RaidDeviceDetection(base.ProcessingHook):
@ -55,19 +54,22 @@ class RaidDeviceDetection(base.ProcessingHook):
"""Adds fake local_gb value if it's missing from introspection_data.""" """Adds fake local_gb value if it's missing from introspection_data."""
if not introspection_data.get('local_gb'): if not introspection_data.get('local_gb'):
LOG.info(_LI('No volume is found on the node. Adding a fake ' LOG.info(_LI('No volume is found on the node. Adding a fake '
'value for "local_gb"')) 'value for "local_gb"'),
data=introspection_data)
introspection_data['local_gb'] = 1 introspection_data['local_gb'] = 1
def before_update(self, introspection_data, node_info, **kwargs): def before_update(self, introspection_data, node_info, **kwargs):
current_devices = self._get_serials(introspection_data) current_devices = self._get_serials(introspection_data)
if not current_devices: if not current_devices:
LOG.warning(_LW('No block device was received from ramdisk')) LOG.warning(_LW('No block device was received from ramdisk'),
node_info=node_info, data=introspection_data)
return return
node = node_info.node() node = node_info.node()
if 'root_device' in node.properties: if 'root_device' in node.properties:
LOG.info(_LI('Root device is already known for the node')) LOG.info(_LI('Root device is already known for the node'),
node_info=node_info, data=introspection_data)
return return
if 'block_devices' in node.extra: if 'block_devices' in node.extra:
@ -78,10 +80,12 @@ class RaidDeviceDetection(base.ProcessingHook):
if len(new_devices) > 1: if len(new_devices) > 1:
LOG.warning(_LW('Root device cannot be identified because ' LOG.warning(_LW('Root device cannot be identified because '
'multiple new devices were found')) 'multiple new devices were found'),
node_info=node_info, data=introspection_data)
return return
elif len(new_devices) == 0: elif len(new_devices) == 0:
LOG.warning(_LW('No new devices were found')) LOG.warning(_LW('No new devices were found'),
node_info=node_info, data=introspection_data)
return return
node_info.patch([ node_info.patch([

View File

@ -16,13 +16,12 @@
import operator import operator
import netaddr import netaddr
from oslo_log import log
from ironic_inspector.plugins import base from ironic_inspector.plugins import base
from ironic_inspector import utils from ironic_inspector import utils
LOG = log.getLogger(__name__) LOG = utils.getProcessingLogger(__name__)
def coerce(value, expected): def coerce(value, expected):
@ -84,7 +83,7 @@ class FailAction(base.RuleActionPlugin):
REQUIRED_PARAMS = {'message'} REQUIRED_PARAMS = {'message'}
def apply(self, node_info, params, **kwargs): def apply(self, node_info, params, **kwargs):
raise utils.Error(params['message']) raise utils.Error(params['message'], node_info=node_info)
class SetAttributeAction(base.RuleActionPlugin): class SetAttributeAction(base.RuleActionPlugin):
@ -99,9 +98,8 @@ class SetAttributeAction(base.RuleActionPlugin):
try: try:
node_info.get_by_path(params['path']) node_info.get_by_path(params['path'])
except KeyError: except KeyError:
LOG.debug('Field %(path)s was not set on node %(node)s, ' LOG.debug('Field %s was not set, no need for rollback',
'no need for rollback', params['path'], node_info=node_info)
{'path': params['path'], 'node': node_info.uuid})
return return
node_info.patch([{'op': 'remove', 'path': params['path']}]) node_info.patch([{'op': 'remove', 'path': params['path']}])

View File

@ -19,7 +19,6 @@ import os
import sys import sys
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log
from oslo_utils import units from oslo_utils import units
from ironic_inspector.common.i18n import _, _LC, _LI, _LW from ironic_inspector.common.i18n import _, _LC, _LI, _LW
@ -30,7 +29,7 @@ from ironic_inspector import utils
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger('ironic_inspector.plugins.standard') LOG = utils.getProcessingLogger('ironic_inspector.plugins.standard')
KNOWN_ROOT_DEVICE_HINTS = ('model', 'vendor', 'serial', 'wwn', 'hctl', KNOWN_ROOT_DEVICE_HINTS = ('model', 'vendor', 'serial', 'wwn', 'hctl',
'size') 'size')
@ -47,21 +46,22 @@ class RootDiskSelectionHook(base.ProcessingHook):
"""Detect root disk from root device hints and IPA inventory.""" """Detect root disk from root device hints and IPA inventory."""
hints = node_info.node().properties.get('root_device') hints = node_info.node().properties.get('root_device')
if not hints: if not hints:
LOG.debug('Root device hints are not provided for node %s', LOG.debug('Root device hints are not provided',
node_info.uuid) node_info=node_info, data=introspection_data)
return return
inventory = introspection_data.get('inventory') inventory = introspection_data.get('inventory')
if not inventory: if not inventory:
LOG.error(_LW('Root device selection require ironic-python-agent ' LOG.error(_LW('Root device selection require ironic-python-agent '
'as an inspection ramdisk')) 'as an inspection ramdisk'),
node_info=node_info, data=introspection_data)
# TODO(dtantsur): make it a real error in Mitaka cycle # TODO(dtantsur): make it a real error in Mitaka cycle
return return
disks = inventory.get('disks', []) disks = inventory.get('disks', [])
if not disks: if not disks:
raise utils.Error(_('No disks found on a node %s') % raise utils.Error(_('No disks found'),
node_info.uuid) node_info=node_info, data=introspection_data)
for disk in disks: for disk in disks:
properties = disk.copy() properties = disk.copy()
@ -72,22 +72,21 @@ class RootDiskSelectionHook(base.ProcessingHook):
actual = properties.get(name) actual = properties.get(name)
if actual != value: if actual != value:
LOG.debug('Disk %(disk)s does not satisfy hint ' LOG.debug('Disk %(disk)s does not satisfy hint '
'%(name)s=%(value)s for node %(node)s, ' '%(name)s=%(value)s, actual value is %(actual)s',
'actual value is %(actual)s', {'disk': disk.get('name'), 'name': name,
{'disk': disk.get('name'), 'value': value, 'actual': actual},
'name': name, 'value': value, node_info=node_info, data=introspection_data)
'node': node_info.uuid, 'actual': actual})
break break
else: else:
LOG.debug('Disk %(disk)s of size %(size)s satisfies ' LOG.debug('Disk %(disk)s of size %(size)s satisfies '
'root device hints for node %(node)s', 'root device hints',
{'disk': disk.get('name'), 'node': node_info.uuid, {'disk': disk.get('name'), 'size': disk['size']},
'size': disk['size']}) node_info=node_info, data=introspection_data)
introspection_data['root_disk'] = disk introspection_data['root_disk'] = disk
return return
raise utils.Error(_('No disks satisfied root device hints for node %s') raise utils.Error(_('No disks satisfied root device hints'),
% node_info.uuid) node_info=node_info, data=introspection_data)
class SchedulerHook(base.ProcessingHook): class SchedulerHook(base.ProcessingHook):
@ -107,11 +106,12 @@ class SchedulerHook(base.ProcessingHook):
if missing: if missing:
raise utils.Error( raise utils.Error(
_('The following required parameters are missing: %s') % _('The following required parameters are missing: %s') %
missing) missing, node_info=node_info, data=introspection_data)
LOG.info(_LI('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, ' LOG.info(_LI('Discovered data: CPUs: %(cpus)s %(cpu_arch)s, '
'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'), 'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'),
{key: introspection_data.get(key) for key in self.KEYS}) {key: introspection_data.get(key) for key in self.KEYS},
node_info=node_info, data=introspection_data)
overwrite = CONF.processing.overwrite_existing overwrite = CONF.processing.overwrite_existing
properties = {key: str(introspection_data[key]) properties = {key: str(introspection_data[key])
@ -140,28 +140,23 @@ class ValidateInterfacesHook(base.ProcessingHook):
def before_processing(self, introspection_data, **kwargs): def before_processing(self, introspection_data, **kwargs):
"""Validate information about network interfaces.""" """Validate information about network interfaces."""
bmc_address = introspection_data.get('ipmi_address')
if not introspection_data.get('interfaces'): if not introspection_data.get('interfaces'):
raise utils.Error(_('No interfaces supplied by the ramdisk')) raise utils.Error(_('No interfaces supplied by the ramdisk'),
data=introspection_data)
valid_interfaces = { valid_interfaces = {
n: iface for n, iface in introspection_data['interfaces'].items() n: iface for n, iface in introspection_data['interfaces'].items()
if utils.is_valid_mac(iface.get('mac')) if utils.is_valid_mac(iface.get('mac'))
} }
pxe_mac = introspection_data.get('boot_interface') pxe_mac = utils.get_pxe_mac(introspection_data)
if CONF.processing.add_ports == 'pxe' and not pxe_mac: if CONF.processing.add_ports == 'pxe' and not pxe_mac:
LOG.warning(_LW('No boot interface provided in the introspection ' LOG.warning(_LW('No boot interface provided in the introspection '
'data, will add all ports with IP addresses')) 'data, will add all ports with IP addresses'),
data=introspection_data)
if CONF.processing.add_ports == 'pxe' and pxe_mac: if CONF.processing.add_ports == 'pxe' and pxe_mac:
LOG.info(_LI('PXE boot interface was %s'), pxe_mac)
if '-' in pxe_mac:
# pxelinux format: 01-aa-bb-cc-dd-ee-ff
pxe_mac = pxe_mac.split('-', 1)[1]
pxe_mac = pxe_mac.replace('-', ':').lower()
valid_interfaces = { valid_interfaces = {
n: iface for n, iface in valid_interfaces.items() n: iface for n, iface in valid_interfaces.items()
if iface['mac'].lower() == pxe_mac if iface['mac'].lower() == pxe_mac
@ -173,20 +168,19 @@ class ValidateInterfacesHook(base.ProcessingHook):
} }
if not valid_interfaces: if not valid_interfaces:
raise utils.Error(_('No valid interfaces found for node with ' raise utils.Error(_('No valid interfaces found in %s') %
'BMC %(ipmi_address)s, got %(interfaces)s') % introspection_data['interfaces'],
{'ipmi_address': bmc_address, data=introspection_data)
'interfaces': introspection_data['interfaces']})
elif valid_interfaces != introspection_data['interfaces']: elif valid_interfaces != introspection_data['interfaces']:
invalid = {n: iface invalid = {n: iface
for n, iface in introspection_data['interfaces'].items() for n, iface in introspection_data['interfaces'].items()
if n not in valid_interfaces} if n not in valid_interfaces}
LOG.warning(_LW( LOG.warning(_LW(
'The following interfaces were invalid or not eligible in ' 'The following interfaces were invalid or not eligible in '
'introspection data for node with BMC %(ipmi_address)s and ' 'introspection data and were excluded: %s'), invalid,
'were excluded: %(invalid)s'), data=introspection_data)
{'invalid': invalid, 'ipmi_address': bmc_address}) LOG.info(_LI('Eligible interfaces are %s'), valid_interfaces,
LOG.info(_LI('Eligible interfaces are %s'), valid_interfaces) data=introspection_data)
introspection_data['all_interfaces'] = introspection_data['interfaces'] introspection_data['all_interfaces'] = introspection_data['interfaces']
introspection_data['interfaces'] = valid_interfaces introspection_data['interfaces'] = valid_interfaces
@ -209,12 +203,11 @@ class ValidateInterfacesHook(base.ProcessingHook):
for port in list(node_info.ports().values()): for port in list(node_info.ports().values()):
if port.address not in expected_macs: if port.address not in expected_macs:
LOG.info(_LI("Deleting port %(port)s as its MAC %(mac)s is " LOG.info(_LI("Deleting port %(port)s as its MAC %(mac)s is "
"not in expected MAC list %(expected)s for node " "not in expected MAC list %(expected)s"),
"%(node)s"),
{'port': port.uuid, {'port': port.uuid,
'mac': port.address, 'mac': port.address,
'expected': list(sorted(expected_macs)), 'expected': list(sorted(expected_macs))},
'node': node_info.uuid}) node_info=node_info, data=introspection_data)
node_info.delete_port(port) node_info.delete_port(port)
@ -231,17 +224,20 @@ class RamdiskErrorHook(base.ProcessingHook):
if logs: if logs:
self._store_logs(logs, introspection_data) self._store_logs(logs, introspection_data)
else: else:
LOG.debug('No logs received from the ramdisk') LOG.debug('No logs received from the ramdisk',
data=introspection_data)
if error: if error:
raise utils.Error(_('Ramdisk reported error: %s') % error) raise utils.Error(_('Ramdisk reported error: %s') % error,
data=introspection_data)
def _store_logs(self, logs, introspection_data): def _store_logs(self, logs, introspection_data):
if not CONF.processing.ramdisk_logs_dir: if not CONF.processing.ramdisk_logs_dir:
LOG.warning( LOG.warning(
_LW('Failed to store logs received from the ramdisk ' _LW('Failed to store logs received from the ramdisk '
'because ramdisk_logs_dir configuration option ' 'because ramdisk_logs_dir configuration option '
'is not set')) 'is not set'),
data=introspection_data)
return return
if not os.path.exists(CONF.processing.ramdisk_logs_dir): if not os.path.exists(CONF.processing.ramdisk_logs_dir):
@ -253,4 +249,5 @@ class RamdiskErrorHook(base.ProcessingHook):
with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name), with open(os.path.join(CONF.processing.ramdisk_logs_dir, file_name),
'wb') as fp: 'wb') as fp:
fp.write(base64.b64decode(logs)) fp.write(base64.b64decode(logs))
LOG.info(_LI('Ramdisk logs stored in file %s'), file_name) LOG.info(_LI('Ramdisk logs stored in file %s'), file_name,
data=introspection_data)

View File

@ -16,7 +16,6 @@
import eventlet import eventlet
from ironicclient import exceptions from ironicclient import exceptions
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log
from ironic_inspector.common.i18n import _, _LE, _LI, _LW from ironic_inspector.common.i18n import _, _LE, _LI, _LW
from ironic_inspector.common import swift from ironic_inspector.common import swift
@ -28,7 +27,7 @@ from ironic_inspector import utils
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger("ironic_inspector.process") LOG = utils.getProcessingLogger(__name__)
_CREDENTIALS_WAIT_RETRIES = 10 _CREDENTIALS_WAIT_RETRIES = 10
_CREDENTIALS_WAIT_PERIOD = 3 _CREDENTIALS_WAIT_PERIOD = 3
@ -74,13 +73,15 @@ def process(introspection_data):
except utils.Error as exc: except utils.Error as exc:
LOG.error(_LE('Hook %(hook)s failed, delaying error report ' LOG.error(_LE('Hook %(hook)s failed, delaying error report '
'until node look up: %(error)s'), 'until node look up: %(error)s'),
{'hook': hook_ext.name, 'error': exc}) {'hook': hook_ext.name, 'error': exc},
data=introspection_data)
failures.append('Preprocessing hook %(hook)s: %(error)s' % failures.append('Preprocessing hook %(hook)s: %(error)s' %
{'hook': hook_ext.name, 'error': exc}) {'hook': hook_ext.name, 'error': exc})
except Exception as exc: except Exception as exc:
LOG.exception(_LE('Hook %(hook)s failed, delaying error report ' LOG.exception(_LE('Hook %(hook)s failed, delaying error report '
'until node look up: %(error)s'), 'until node look up: %(error)s'),
{'hook': hook_ext.name, 'error': exc}) {'hook': hook_ext.name, 'error': exc},
data=introspection_data)
failures.append(_('Unexpected exception %(exc_class)s during ' failures.append(_('Unexpected exception %(exc_class)s during '
'preprocessing in hook %(hook)s: %(error)s') % 'preprocessing in hook %(hook)s: %(error)s') %
{'hook': hook_ext.name, {'hook': hook_ext.name,
@ -93,28 +94,23 @@ def process(introspection_data):
# node_not_found hook # node_not_found hook
node_info.acquire_lock() node_info.acquire_lock()
if failures and node_info: if failures or node_info is None:
msg = _('The following failures happened during running ' msg = _('The following failures happened during running '
'pre-processing hooks for node %(uuid)s:\n%(failures)s') % { 'pre-processing hooks:\n%s') % '\n'.join(failures)
'uuid': node_info.uuid, if node_info is not None:
'failures': '\n'.join(failures)
}
node_info.finished(error='\n'.join(failures)) node_info.finished(error='\n'.join(failures))
raise utils.Error(msg) raise utils.Error(msg, node_info=node_info, data=introspection_data)
elif not node_info:
msg = _('The following failures happened during running ' LOG.info(_LI('Matching node is %s'), node_info.uuid,
'pre-processing hooks for unknown node:\n%(failures)s') % { node_info=node_info, data=introspection_data)
'failures': '\n'.join(failures)
}
raise utils.Error(msg)
try: try:
node = node_info.node() node = node_info.node()
except exceptions.NotFound: except exceptions.NotFound:
msg = (_('Node UUID %s was found in cache, but is not found in Ironic') msg = _('Node was found in cache, but is not found in Ironic')
% node_info.uuid)
node_info.finished(error=msg) node_info.finished(error=msg)
raise utils.Error(msg, code=404) raise utils.Error(msg, code=404, node_info=node_info,
data=introspection_data)
try: try:
return _process_node(node, introspection_data, node_info) return _process_node(node, introspection_data, node_info)
@ -127,7 +123,7 @@ def process(introspection_data):
'%(error)s') % {'exc_class': exc.__class__.__name__, '%(error)s') % {'exc_class': exc.__class__.__name__,
'error': exc} 'error': exc}
node_info.finished(error=msg) node_info.finished(error=msg)
raise utils.Error(msg) raise utils.Error(msg, node_info=node_info, data=introspection_data)
def _run_post_hooks(node_info, introspection_data): def _run_post_hooks(node_info, introspection_data):
@ -160,16 +156,17 @@ def _process_node(node, introspection_data, node_info):
if CONF.processing.store_data == 'swift': if CONF.processing.store_data == 'swift':
swift_object_name = swift.store_introspection_data(introspection_data, swift_object_name = swift.store_introspection_data(introspection_data,
node_info.uuid) node_info.uuid)
LOG.info(_LI('Introspection data for node %(node)s was stored in ' LOG.info(_LI('Introspection data was stored in Swift in object %s'),
'Swift in object %(obj)s'), swift_object_name,
{'node': node_info.uuid, 'obj': swift_object_name}) node_info=node_info, data=introspection_data)
if CONF.processing.store_data_location: if CONF.processing.store_data_location:
node_info.patch([{'op': 'add', 'path': '/extra/%s' % node_info.patch([{'op': 'add', 'path': '/extra/%s' %
CONF.processing.store_data_location, CONF.processing.store_data_location,
'value': swift_object_name}]) 'value': swift_object_name}])
else: else:
LOG.debug('Swift support is disabled, introspection data for node %s ' LOG.debug('Swift support is disabled, introspection data '
'won\'t be stored', node_info.uuid) 'won\'t be stored',
node_info=node_info, data=introspection_data)
ironic = utils.get_client() ironic = utils.get_client()
firewall.update_filters(ironic) firewall.update_filters(ironic)
@ -189,7 +186,7 @@ def _process_node(node, introspection_data, node_info):
resp['ipmi_username'] = new_username resp['ipmi_username'] = new_username
resp['ipmi_password'] = new_password resp['ipmi_password'] = new_password
else: else:
utils.spawn_n(_finish, ironic, node_info) utils.spawn_n(_finish, ironic, node_info, introspection_data)
return resp return resp
@ -212,22 +209,22 @@ def _finish_set_ipmi_credentials(ironic, node, node_info, introspection_data,
# We don't care about boot device, obviously. # We don't care about boot device, obviously.
ironic.node.get_boot_device(node_info.uuid) ironic.node.get_boot_device(node_info.uuid)
except Exception as exc: except Exception as exc:
LOG.info(_LI('Waiting for credentials update on node %(node)s,' LOG.info(_LI('Waiting for credentials update, attempt %(attempt)d '
' attempt %(attempt)d current error is %(exc)s') % 'current error is %(exc)s') %
{'node': node_info.uuid, {'attempt': attempt, 'exc': exc},
'attempt': attempt, 'exc': exc}) node_info=node_info, data=introspection_data)
eventlet.greenthread.sleep(_CREDENTIALS_WAIT_PERIOD) eventlet.greenthread.sleep(_CREDENTIALS_WAIT_PERIOD)
else: else:
_finish(ironic, node_info) _finish(ironic, node_info, introspection_data)
return return
msg = (_('Failed to validate updated IPMI credentials for node ' msg = (_('Failed to validate updated IPMI credentials for node '
'%s, node might require maintenance') % node_info.uuid) '%s, node might require maintenance') % node_info.uuid)
node_info.finished(error=msg) node_info.finished(error=msg)
raise utils.Error(msg) raise utils.Error(msg, node_info=node_info, data=introspection_data)
def _finish(ironic, node_info): def _finish(ironic, node_info, introspection_data):
LOG.debug('Forcing power off of node %s', node_info.uuid) LOG.debug('Forcing power off of node %s', node_info.uuid)
try: try:
ironic.node.set_power_state(node_info.uuid, 'off') ironic.node.set_power_state(node_info.uuid, 'off')
@ -236,8 +233,8 @@ def _finish(ironic, node_info):
'management configuration: %(exc)s') % 'management configuration: %(exc)s') %
{'node': node_info.uuid, 'exc': exc}) {'node': node_info.uuid, 'exc': exc})
node_info.finished(error=msg) node_info.finished(error=msg)
raise utils.Error(msg) raise utils.Error(msg, node_info=node_info, data=introspection_data)
node_info.finished() node_info.finished()
LOG.info(_LI('Introspection finished successfully for node %s'), LOG.info(_LI('Introspection finished successfully'),
node_info.uuid) node_info=node_info, data=introspection_data)

View File

@ -17,7 +17,6 @@
import jsonpath_rw as jsonpath import jsonpath_rw as jsonpath
import jsonschema import jsonschema
from oslo_db import exception as db_exc from oslo_db import exception as db_exc
from oslo_log import log
from oslo_utils import timeutils from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
from sqlalchemy import orm from sqlalchemy import orm
@ -28,7 +27,7 @@ from ironic_inspector.plugins import base as plugins_base
from ironic_inspector import utils from ironic_inspector import utils
LOG = log.getLogger(__name__) LOG = utils.getProcessingLogger(__name__)
_CONDITIONS_SCHEMA = None _CONDITIONS_SCHEMA = None
_ACTIONS_SCHEMA = None _ACTIONS_SCHEMA = None
@ -128,8 +127,8 @@ class IntrospectionRule(object):
:param data: introspection data :param data: introspection data
:returns: True if conditions match, otherwise False :returns: True if conditions match, otherwise False
""" """
LOG.debug('Checking rule "%(descr)s" on node %(uuid)s', LOG.debug('Checking rule "%s"', self.description,
{'descr': self.description, 'uuid': node_info.uuid}) node_info=node_info, data=data)
ext_mgr = plugins_base.rule_conditions_manager() ext_mgr = plugins_base.rule_conditions_manager()
for cond in self._conditions: for cond in self._conditions:
field_values = jsonpath.parse(cond.field).find(data) field_values = jsonpath.parse(cond.field).find(data)
@ -138,16 +137,15 @@ class IntrospectionRule(object):
if not field_values: if not field_values:
if cond_ext.ALLOW_NONE: if cond_ext.ALLOW_NONE:
LOG.debug('Field with JSON path %(path)s was not found in ' LOG.debug('Field with JSON path %s was not found in data',
'data for node %(uuid)s', cond.field, node_info=node_info, data=data)
{'path': cond.field, 'uuid': node_info.uuid})
field_values = [None] field_values = [None]
else: else:
LOG.info(_LI('Field with JSON path %(path)s was not found ' LOG.info(_LI('Field with JSON path %(path)s was not found '
'in data for node %(uuid)s, rule "%(rule)s" ' 'in data, rule "%(rule)s" will not '
'will not be applied'), 'be applied'),
{'path': cond.field, 'uuid': node_info.uuid, {'path': cond.field, 'rule': self.description},
'rule': self.description}) node_info=node_info, data=data)
return False return False
for value in field_values: for value in field_values:
@ -158,46 +156,45 @@ class IntrospectionRule(object):
break break
if not result: if not result:
LOG.info(_LI('Rule "%(rule)s" will not be applied to node ' LOG.info(_LI('Rule "%(rule)s" will not be applied: condition '
'%(uuid)s: condition %(field)s %(op)s %(params)s ' '%(field)s %(op)s %(params)s failed'),
'failed'), {'rule': self.description, 'field': cond.field,
{'rule': self.description, 'uuid': node_info.uuid, 'op': cond.op, 'params': cond.params},
'field': cond.field, 'op': cond.op, node_info=node_info, data=data)
'params': cond.params})
return False return False
LOG.info(_LI('Rule "%(rule)s" will be applied to node %(uuid)s'), LOG.info(_LI('Rule "%s" will be applied'), self.description,
{'rule': self.description, 'uuid': node_info.uuid}) node_info=node_info, data=data)
return True return True
def apply_actions(self, node_info, rollback=False): def apply_actions(self, node_info, rollback=False, data=None):
"""Run actions on a node. """Run actions on a node.
:param node_info: NodeInfo instance :param node_info: NodeInfo instance
:param rollback: if True, rollback actions are executed :param rollback: if True, rollback actions are executed
:param data: introspection data (only used for logging)
""" """
if rollback: if rollback:
method = 'rollback' method = 'rollback'
else: else:
method = 'apply' method = 'apply'
LOG.debug('Running %(what)s actions for rule "%(rule)s" ' LOG.debug('Running %(what)s actions for rule "%(rule)s"',
'on node %(node)s', {'what': method, 'rule': self.description},
{'what': method, 'rule': self.description, node_info=node_info, data=data)
'node': node_info.uuid})
ext_mgr = plugins_base.rule_actions_manager() ext_mgr = plugins_base.rule_actions_manager()
for act in self._actions: for act in self._actions:
LOG.debug('Running %(what)s action `%(action)s %(params)s` for ' LOG.debug('Running %(what)s action `%(action)s %(params)s`',
'node %(node)s',
{'action': act.action, 'params': act.params, {'action': act.action, 'params': act.params,
'node': node_info.uuid, 'what': method}) 'what': method},
node_info=node_info, data=data)
ext = ext_mgr[act.action].obj ext = ext_mgr[act.action].obj
getattr(ext, method)(node_info, act.params) getattr(ext, method)(node_info, act.params)
LOG.debug('Successfully applied %(what)s to node %(node)s', LOG.debug('Successfully applied %s',
{'what': 'rollback actions' if rollback else 'actions', 'rollback actions' if rollback else 'actions',
'node': node_info.uuid}) node_info=node_info, data=data)
def create(conditions_json, actions_json, uuid=None, def create(conditions_json, actions_json, uuid=None,
@ -349,11 +346,12 @@ def apply(node_info, data):
"""Apply rules to a node.""" """Apply rules to a node."""
rules = get_all() rules = get_all()
if not rules: if not rules:
LOG.debug('No custom introspection rules to apply to node %s', LOG.debug('No custom introspection rules to apply',
node_info.uuid) node_info=node_info, data=data)
return return
LOG.debug('Applying custom introspection rules to node %s', node_info.uuid) LOG.debug('Applying custom introspection rules',
node_info=node_info, data=data)
to_rollback = [] to_rollback = []
to_apply = [] to_apply = []
@ -364,18 +362,19 @@ def apply(node_info, data):
to_rollback.append(rule) to_rollback.append(rule)
if to_rollback: if to_rollback:
LOG.debug('Running rollback actions on node %s', node_info.uuid) LOG.debug('Running rollback actions', node_info=node_info, data=data)
for rule in to_rollback: for rule in to_rollback:
rule.apply_actions(node_info, rollback=True) rule.apply_actions(node_info, rollback=True)
else: else:
LOG.debug('No rollback actions to apply on node %s', node_info.uuid) LOG.debug('No rollback actions to apply',
node_info=node_info, data=data)
if to_apply: if to_apply:
LOG.debug('Running actions on node %s', node_info.uuid) LOG.debug('Running actions', node_info=node_info, data=data)
for rule in to_apply: for rule in to_apply:
rule.apply_actions(node_info, rollback=False) rule.apply_actions(node_info, rollback=False)
else: else:
LOG.debug('No actions to apply on node %s', node_info.uuid) LOG.debug('No actions to apply', node_info=node_info, data=data)
LOG.info(_LI('Successfully applied custom introspection rules to node %s'), LOG.info(_LI('Successfully applied custom introspection rules'),
node_info.uuid) node_info=node_info, data=data)

View File

@ -213,7 +213,7 @@ class TestIntrospect(BaseTest):
self.assertRaisesRegexp( self.assertRaisesRegexp(
utils.Error, utils.Error,
'Failed validation of power interface for node', 'Failed validation of power interface',
introspect.introspect, self.uuid) introspect.introspect, self.uuid)
cli.node.validate.assert_called_once_with(self.uuid) cli.node.validate.assert_called_once_with(self.uuid)
@ -229,7 +229,7 @@ class TestIntrospect(BaseTest):
cli.node.get.return_value = self.node cli.node.get.return_value = self.node
self.assertRaisesRegexp( self.assertRaisesRegexp(
utils.Error, 'Invalid provision state "active"', utils.Error, 'Invalid provision state for introspection: "active"',
introspect.introspect, self.uuid) introspect.introspect, self.uuid)
self.assertEqual(0, self.node_info.ports.call_count) self.assertEqual(0, self.node_info.ports.call_count)

View File

@ -158,7 +158,8 @@ class TestGetIpmiAddress(base.BaseTest):
@mock.patch('socket.gethostbyname') @mock.patch('socket.gethostbyname')
def test_bad_hostname_errors(self, mock_socket): def test_bad_hostname_errors(self, mock_socket):
node = mock.Mock(spec=['driver_info', 'uuid'], node = mock.Mock(spec=['driver_info', 'uuid'],
driver_info={'ipmi_address': 'meow'}) driver_info={'ipmi_address': 'meow'},
uuid='uuid1')
mock_socket.side_effect = socket.gaierror('Boom') mock_socket.side_effect = socket.gaierror('Boom')
self.assertRaises(utils.Error, utils.get_ipmi_address, node) self.assertRaises(utils.Error, utils.get_ipmi_address, node)
@ -212,3 +213,66 @@ class TestSpawnN(unittest.TestCase):
greenpool.spawn_n.assert_called_with(func, "goodbye") greenpool.spawn_n.assert_called_with(func, "goodbye")
mock_green_pool.assert_called_once_with(CONF.max_concurrency) mock_green_pool.assert_called_once_with(CONF.max_concurrency)
class TestProcessingLogger(base.BaseTest):
def test_prefix_no_info(self):
self.assertEqual('[unidentified node]',
utils.processing_logger_prefix())
def test_prefix_only_uuid(self):
node_info = mock.Mock(uuid='NNN')
self.assertEqual('[node: NNN]',
utils.processing_logger_prefix(node_info=node_info))
def test_prefix_only_bmc(self):
data = {'inventory': {'bmc_address': '1.2.3.4'}}
self.assertEqual('[node: BMC 1.2.3.4]',
utils.processing_logger_prefix(data=data))
def test_prefix_only_mac(self):
data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff'}
self.assertEqual('[node: MAC aa:bb:cc:dd:ee:ff]',
utils.processing_logger_prefix(data=data))
def test_prefix_everything(self):
node_info = mock.Mock(uuid='NNN')
data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff',
'inventory': {'bmc_address': '1.2.3.4'}}
self.assertEqual('[node: NNN MAC aa:bb:cc:dd:ee:ff BMC 1.2.3.4]',
utils.processing_logger_prefix(node_info=node_info,
data=data))
def test_adapter_no_bmc(self):
CONF.set_override('log_bmc_address', False, 'processing')
node_info = mock.Mock(uuid='NNN')
data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff',
'inventory': {'bmc_address': '1.2.3.4'}}
logger = utils.getProcessingLogger(__name__)
msg, _kwargs = logger.process('foo', {'node_info': node_info,
'data': data})
self.assertEqual(
'[node: NNN MAC aa:bb:cc:dd:ee:ff] foo',
msg)
def test_adapter_with_bmc(self):
node_info = mock.Mock(uuid='NNN')
data = {'boot_interface': '01-aa-bb-cc-dd-ee-ff',
'inventory': {'bmc_address': '1.2.3.4'}}
logger = utils.getProcessingLogger(__name__)
msg, _kwargs = logger.process('foo', {'node_info': node_info,
'data': data})
self.assertEqual(
'[node: NNN MAC aa:bb:cc:dd:ee:ff BMC 1.2.3.4] foo',
msg)
def test_adapter_empty_data(self):
logger = utils.getProcessingLogger(__name__)
msg, _kwargs = logger.process('foo', {'node_info': None,
'data': None})
self.assertEqual('[unidentified node] foo', msg)
def test_adapter_no_data(self):
logger = utils.getProcessingLogger(__name__)
msg, _kwargs = logger.process('foo', {})
self.assertEqual('foo', msg)

View File

@ -11,6 +11,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import logging as pylog
import re import re
import socket import socket
@ -23,6 +24,7 @@ from oslo_log import log
import six import six
from ironic_inspector.common.i18n import _, _LE from ironic_inspector.common.i18n import _, _LE
from ironic_inspector import conf # noqa
CONF = cfg.CONF CONF = cfg.CONF
@ -30,21 +32,107 @@ CONF = cfg.CONF
VALID_STATES = {'enroll', 'manageable', 'inspecting', 'inspectfail'} VALID_STATES = {'enroll', 'manageable', 'inspecting', 'inspectfail'}
SET_CREDENTIALS_VALID_STATES = {'enroll'} SET_CREDENTIALS_VALID_STATES = {'enroll'}
LOG = log.getLogger('ironic_inspector.utils')
GREEN_POOL = None GREEN_POOL = None
# 1.6 is a Kilo API version, which has all we need and is pretty well tested # 1.6 is a Kilo API version, which has all we need and is pretty well tested
DEFAULT_IRONIC_API_VERSION = '1.6' DEFAULT_IRONIC_API_VERSION = '1.6'
def get_ipmi_address(node):
ipmi_fields = ['ipmi_address'] + CONF.ipmi_address_fields
# NOTE(sambetts): IPMI Address is useless to us if bridging is enabled so
# just ignore it and return None
if node.driver_info.get("ipmi_bridging", "no") != "no":
return
for name in ipmi_fields:
value = node.driver_info.get(name)
if value:
try:
ip = socket.gethostbyname(value)
return ip
except socket.gaierror:
msg = ('Failed to resolve the hostname (%s) for node %s')
raise Error(msg % (value, node.uuid), node_info=node)
def get_ipmi_address_from_data(introspection_data):
try:
return introspection_data['inventory']['bmc_address']
except KeyError:
return introspection_data.get('ipmi_address')
def get_pxe_mac(introspection_data):
pxe_mac = introspection_data.get('boot_interface')
if pxe_mac and '-' in pxe_mac:
# pxelinux format: 01-aa-bb-cc-dd-ee-ff
pxe_mac = pxe_mac.split('-', 1)[1]
pxe_mac = pxe_mac.replace('-', ':').lower()
return pxe_mac
def processing_logger_prefix(data=None, node_info=None):
"""Calculate prefix for logging.
Tries to use:
* node UUID,
* node PXE MAC,
* node BMC address
:param data: introspection data
:param node_info: NodeInfo or ironic node object
:return: logging prefix as a string
"""
# TODO(dtantsur): try to get MAC and BMC address for node_info as well
parts = []
data = data or {}
if node_info is not None:
parts.append(node_info.uuid)
pxe_mac = get_pxe_mac(data)
if pxe_mac:
parts.append('MAC %s' % pxe_mac)
if CONF.processing.log_bmc_address:
bmc_address = get_ipmi_address_from_data(data) if data else None
if bmc_address:
parts.append('BMC %s' % bmc_address)
if parts:
return _('[node: %s]') % ' '.join(parts)
else:
return _('[unidentified node]')
class ProcessingLoggerAdapter(log.KeywordArgumentAdapter):
def process(self, msg, kwargs):
if 'data' not in kwargs and 'node_info' not in kwargs:
return super(ProcessingLoggerAdapter, self).process(msg, kwargs)
data = kwargs.get('data', {})
node_info = kwargs.get('node_info')
prefix = processing_logger_prefix(data, node_info)
msg, kwargs = super(ProcessingLoggerAdapter, self).process(msg, kwargs)
return ('%s %s' % (prefix, msg)), kwargs
def getProcessingLogger(name):
# We can't use getLogger from oslo_log, as it's an adapter itself
logger = pylog.getLogger(name)
return ProcessingLoggerAdapter(logger, {})
LOG = getProcessingLogger(__name__)
class Error(Exception): class Error(Exception):
"""Inspector exception.""" """Inspector exception."""
def __init__(self, msg, code=400): def __init__(self, msg, code=400, **kwargs):
super(Error, self).__init__(msg) super(Error, self).__init__(msg)
LOG.error(msg) LOG.error(msg, **kwargs)
self.http_code = code self.http_code = code
@ -155,35 +243,19 @@ def get_auth_strategy():
return CONF.auth_strategy return CONF.auth_strategy
def get_ipmi_address(node):
ipmi_fields = ['ipmi_address'] + CONF.ipmi_address_fields
# NOTE(sambetts): IPMI Address is useless to us if bridging is enabled so
# just ignore it and return None
if node.driver_info.get("ipmi_bridging", "no") != "no":
return
for name in ipmi_fields:
value = node.driver_info.get(name)
if value:
try:
ip = socket.gethostbyname(value)
return ip
except socket.gaierror:
msg = ('Failed to resolve the hostname (%s) for node %s')
raise Error(msg % (value, node.uuid))
def check_provision_state(node, with_credentials=False): def check_provision_state(node, with_credentials=False):
state = node.provision_state.lower() state = node.provision_state.lower()
if with_credentials and state not in SET_CREDENTIALS_VALID_STATES: if with_credentials and state not in SET_CREDENTIALS_VALID_STATES:
msg = _('Invalid provision state "%(state)s" for setting IPMI ' msg = _('Invalid provision state for setting IPMI credentials: '
'credentials on node %(node)s, valid states are %(valid)s') '"%(state)s", valid states are %(valid)s')
raise Error(msg % {'node': node.uuid, 'state': state, raise Error(msg % {'state': state,
'valid': list(SET_CREDENTIALS_VALID_STATES)}) 'valid': list(SET_CREDENTIALS_VALID_STATES)},
node_info=node)
elif not with_credentials and state not in VALID_STATES: elif not with_credentials and state not in VALID_STATES:
msg = _('Invalid provision state "%(state)s" for introspection of ' msg = _('Invalid provision state for introspection: '
'node %(node)s, valid states are "%(valid)s"') '"%(state)s", valid states are "%(valid)s"')
raise Error(msg % {'node': node.uuid, 'state': state, raise Error(msg % {'state': state, 'valid': list(VALID_STATES)},
'valid': list(VALID_STATES)}) node_info=node)
def capabilities_to_dict(caps): def capabilities_to_dict(caps):

View File

@ -0,0 +1,6 @@
---
other:
- Logging during processing is now more consistent in terms of how it
identifies the node. Now we try to prefix the log message with node UUID,
BMC address and PXE MAC address (if available). Logging BMC addresses can
be disabled via new "log_bmc_address" option in the "processing" section.