Remove translation of log messages
The i18n team has decided not to translate the logs because it seems like it not very useful; operators prefer to have them in English so that they can search for those strings on the internet. Change-Id: I2a62525e18a34e3bbbdfe4e8924d0109568e8e72 Closes-Bug: #1674374
This commit is contained in:
parent
20dba3fb88
commit
7d59d48039
|
@ -19,7 +19,6 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _LE
|
|
||||||
from ironic.drivers.modules.drac import common as drac_common
|
from ironic.drivers.modules.drac import common as drac_common
|
||||||
from ironic.drivers.modules.drac import job as drac_job
|
from ironic.drivers.modules.drac import job as drac_job
|
||||||
|
|
||||||
|
@ -90,8 +89,8 @@ def get_config(node):
|
||||||
try:
|
try:
|
||||||
return client.list_bios_settings()
|
return client.list_bios_settings()
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to get the BIOS settings for node '
|
LOG.error('DRAC driver failed to get the BIOS settings for node '
|
||||||
'%(node_uuid)s. Reason: %(error)s.'),
|
'%(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid,
|
{'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
@ -117,8 +116,8 @@ def set_config(task, **kwargs):
|
||||||
try:
|
try:
|
||||||
return client.set_bios_settings(kwargs)
|
return client.set_bios_settings(kwargs)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to set the BIOS settings for node '
|
LOG.error('DRAC driver failed to set the BIOS settings for node '
|
||||||
'%(node_uuid)s. Reason: %(error)s.'),
|
'%(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid,
|
{'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
@ -141,8 +140,8 @@ def commit_config(task, reboot=False):
|
||||||
try:
|
try:
|
||||||
return client.commit_pending_bios_changes(reboot)
|
return client.commit_pending_bios_changes(reboot)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to commit the pending BIOS changes '
|
LOG.error('DRAC driver failed to commit the pending BIOS changes '
|
||||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid,
|
{'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
@ -160,8 +159,8 @@ def abandon_config(task):
|
||||||
try:
|
try:
|
||||||
client.abandon_pending_bios_changes()
|
client.abandon_pending_bios_changes()
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to delete the pending BIOS '
|
LOG.error('DRAC driver failed to delete the pending BIOS '
|
||||||
'settings for node %(node_uuid)s. Reason: %(error)s.'),
|
'settings for node %(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid,
|
{'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
|
|
@ -19,7 +19,7 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LW
|
from ironic.common.i18n import _
|
||||||
from ironic.common import utils
|
from ironic.common import utils
|
||||||
|
|
||||||
drac_client = importutils.try_import('dracclient.client')
|
drac_client = importutils.try_import('dracclient.client')
|
||||||
|
@ -64,19 +64,19 @@ def parse_driver_info(node):
|
||||||
parsed_driver_info = {}
|
parsed_driver_info = {}
|
||||||
|
|
||||||
if 'drac_host' in driver_info and 'drac_address' not in driver_info:
|
if 'drac_host' in driver_info and 'drac_address' not in driver_info:
|
||||||
LOG.warning(_LW('The driver_info["drac_host"] property is deprecated '
|
LOG.warning('The driver_info["drac_host"] property is deprecated '
|
||||||
'and will be removed in the Pike release. Please '
|
'and will be removed in the Pike release. Please '
|
||||||
'update the node %s driver_info field to use '
|
'update the node %s driver_info field to use '
|
||||||
'"drac_address" instead'), node.uuid)
|
'"drac_address" instead', node.uuid)
|
||||||
address = driver_info.pop('drac_host', None)
|
address = driver_info.pop('drac_host', None)
|
||||||
if address:
|
if address:
|
||||||
driver_info['drac_address'] = address
|
driver_info['drac_address'] = address
|
||||||
elif 'drac_host' in driver_info and 'drac_address' in driver_info:
|
elif 'drac_host' in driver_info and 'drac_address' in driver_info:
|
||||||
LOG.warning(_LW('Both driver_info["drac_address"] and '
|
LOG.warning('Both driver_info["drac_address"] and '
|
||||||
'driver_info["drac_host"] properties are '
|
'driver_info["drac_host"] properties are '
|
||||||
'specified for node %s. Please remove the '
|
'specified for node %s. Please remove the '
|
||||||
'"drac_host" property from the node. Ignoring '
|
'"drac_host" property from the node. Ignoring '
|
||||||
'"drac_host" for now'), node.uuid)
|
'"drac_host" for now', node.uuid)
|
||||||
|
|
||||||
error_msgs = []
|
error_msgs = []
|
||||||
for param in REQUIRED_PROPERTIES:
|
for param in REQUIRED_PROPERTIES:
|
||||||
|
|
|
@ -21,7 +21,7 @@ from oslo_utils import importutils
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LE, _LI, _LW
|
from ironic.common.i18n import _
|
||||||
from ironic.common import states
|
from ironic.common import states
|
||||||
from ironic.drivers import base
|
from ironic.drivers import base
|
||||||
from ironic.drivers.modules.drac import common as drac_common
|
from ironic.drivers.modules.drac import common as drac_common
|
||||||
|
@ -94,8 +94,8 @@ class DracInspect(base.InspectInterface):
|
||||||
properties['local_gb'] = int(
|
properties['local_gb'] = int(
|
||||||
root_disk.size_mb / units.Ki)
|
root_disk.size_mb / units.Ki)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to introspect node '
|
LOG.error('DRAC driver failed to introspect node '
|
||||||
'%(node_uuid)s. Reason: %(error)s.'),
|
'%(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid, 'error': exc})
|
{'node_uuid': node.uuid, 'error': exc})
|
||||||
raise exception.HardwareInspectionFailure(error=exc)
|
raise exception.HardwareInspectionFailure(error=exc)
|
||||||
|
|
||||||
|
@ -113,8 +113,8 @@ class DracInspect(base.InspectInterface):
|
||||||
try:
|
try:
|
||||||
nics = client.list_nics()
|
nics = client.list_nics()
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to introspect node '
|
LOG.error('DRAC driver failed to introspect node '
|
||||||
'%(node_uuid)s. Reason: %(error)s.'),
|
'%(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid, 'error': exc})
|
{'node_uuid': node.uuid, 'error': exc})
|
||||||
raise exception.HardwareInspectionFailure(error=exc)
|
raise exception.HardwareInspectionFailure(error=exc)
|
||||||
|
|
||||||
|
@ -123,17 +123,17 @@ class DracInspect(base.InspectInterface):
|
||||||
port = objects.Port(task.context, address=nic.mac,
|
port = objects.Port(task.context, address=nic.mac,
|
||||||
node_id=node.id)
|
node_id=node.id)
|
||||||
port.create()
|
port.create()
|
||||||
LOG.info(_LI('Port created with MAC address %(mac)s '
|
LOG.info('Port created with MAC address %(mac)s '
|
||||||
'for node %(node_uuid)s during inspection'),
|
'for node %(node_uuid)s during inspection',
|
||||||
{'mac': nic.mac, 'node_uuid': node.uuid})
|
{'mac': nic.mac, 'node_uuid': node.uuid})
|
||||||
except exception.MACAlreadyExists:
|
except exception.MACAlreadyExists:
|
||||||
LOG.warning(_LW('Failed to create a port with MAC address '
|
LOG.warning('Failed to create a port with MAC address '
|
||||||
'%(mac)s when inspecting the node '
|
'%(mac)s when inspecting the node '
|
||||||
'%(node_uuid)s because the address is already '
|
'%(node_uuid)s because the address is already '
|
||||||
'registered'),
|
'registered',
|
||||||
{'mac': nic.mac, 'node_uuid': node.uuid})
|
{'mac': nic.mac, 'node_uuid': node.uuid})
|
||||||
|
|
||||||
LOG.info(_LI('Node %s successfully inspected.'), node.uuid)
|
LOG.info('Node %s successfully inspected.', node.uuid)
|
||||||
return states.MANAGEABLE
|
return states.MANAGEABLE
|
||||||
|
|
||||||
def _guess_root_disk(self, disks, min_size_required_mb=4 * units.Ki):
|
def _guess_root_disk(self, disks, min_size_required_mb=4 * units.Ki):
|
||||||
|
|
|
@ -19,7 +19,7 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LE
|
from ironic.common.i18n import _
|
||||||
from ironic.drivers.modules.drac import common as drac_common
|
from ironic.drivers.modules.drac import common as drac_common
|
||||||
|
|
||||||
drac_exceptions = importutils.try_import('dracclient.exceptions')
|
drac_exceptions = importutils.try_import('dracclient.exceptions')
|
||||||
|
@ -56,8 +56,8 @@ def get_job(node, job_id):
|
||||||
try:
|
try:
|
||||||
return client.get_job(job_id)
|
return client.get_job(job_id)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to get the job %(job_id)s '
|
LOG.error('DRAC driver failed to get the job %(job_id)s '
|
||||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid,
|
{'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
@ -75,8 +75,8 @@ def list_unfinished_jobs(node):
|
||||||
try:
|
try:
|
||||||
return client.list_jobs(only_unfinished=True)
|
return client.list_jobs(only_unfinished=True)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to get the list of unfinished jobs '
|
LOG.error('DRAC driver failed to get the list of unfinished jobs '
|
||||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid,
|
{'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
|
|
@ -25,7 +25,7 @@ from oslo_utils import importutils
|
||||||
|
|
||||||
from ironic.common import boot_devices
|
from ironic.common import boot_devices
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LE
|
from ironic.common.i18n import _
|
||||||
from ironic.conductor import task_manager
|
from ironic.conductor import task_manager
|
||||||
from ironic.drivers import base
|
from ironic.drivers import base
|
||||||
from ironic.drivers.modules.drac import common as drac_common
|
from ironic.drivers.modules.drac import common as drac_common
|
||||||
|
@ -68,8 +68,8 @@ def _get_boot_device(node, drac_boot_devices=None):
|
||||||
return {'boot_device': boot_device,
|
return {'boot_device': boot_device,
|
||||||
'persistent': next_boot_mode == PERSISTENT_BOOT_MODE}
|
'persistent': next_boot_mode == PERSISTENT_BOOT_MODE}
|
||||||
except (drac_exceptions.BaseClientException, IndexError) as exc:
|
except (drac_exceptions.BaseClientException, IndexError) as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to get next boot mode for '
|
LOG.error('DRAC driver failed to get next boot mode for '
|
||||||
'node %(node_uuid)s. Reason: %(error)s.'),
|
'node %(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid, 'error': exc})
|
{'node_uuid': node.uuid, 'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
|
||||||
|
@ -114,8 +114,8 @@ def set_boot_device(node, device, persistent=False):
|
||||||
client.change_boot_device_order(boot_list, drac_boot_device)
|
client.change_boot_device_order(boot_list, drac_boot_device)
|
||||||
client.commit_pending_bios_changes()
|
client.commit_pending_bios_changes()
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to change boot device order for '
|
LOG.error('DRAC driver failed to change boot device order for '
|
||||||
'node %(node_uuid)s. Reason: %(error)s.'),
|
'node %(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid, 'error': exc})
|
{'node_uuid': node.uuid, 'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _LE
|
|
||||||
from ironic.common import states
|
from ironic.common import states
|
||||||
from ironic.conductor import task_manager
|
from ironic.conductor import task_manager
|
||||||
from ironic.drivers import base
|
from ironic.drivers import base
|
||||||
|
@ -58,8 +57,8 @@ def _get_power_state(node):
|
||||||
try:
|
try:
|
||||||
drac_power_state = client.get_power_state()
|
drac_power_state = client.get_power_state()
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to get power state for node '
|
LOG.error('DRAC driver failed to get power state for node '
|
||||||
'%(node_uuid)s. Reason: %(error)s.'),
|
'%(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid, 'error': exc})
|
{'node_uuid': node.uuid, 'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
|
||||||
|
@ -106,9 +105,9 @@ def _set_power_state(node, power_state):
|
||||||
try:
|
try:
|
||||||
client.set_power_state(target_power_state)
|
client.set_power_state(target_power_state)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to set power state for node '
|
LOG.error('DRAC driver failed to set power state for node '
|
||||||
'%(node_uuid)s to %(power_state)s. '
|
'%(node_uuid)s to %(power_state)s. '
|
||||||
'Reason: %(error)s.'),
|
'Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid,
|
{'node_uuid': node.uuid,
|
||||||
'power_state': power_state,
|
'power_state': power_state,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
|
|
|
@ -24,9 +24,9 @@ from oslo_utils import importutils
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
|
from ironic.common.i18n import _
|
||||||
from ironic.common import raid as raid_common
|
from ironic.common import raid as raid_common
|
||||||
from ironic.common import states
|
from ironic.common import states
|
||||||
from ironic.common.i18n import _, _LE, _LI
|
|
||||||
from ironic.conductor import task_manager
|
from ironic.conductor import task_manager
|
||||||
from ironic.conf import CONF
|
from ironic.conf import CONF
|
||||||
from ironic.drivers import base
|
from ironic.drivers import base
|
||||||
|
@ -92,8 +92,8 @@ def list_raid_controllers(node):
|
||||||
try:
|
try:
|
||||||
return client.list_raid_controllers()
|
return client.list_raid_controllers()
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to get the list of RAID controllers '
|
LOG.error('DRAC driver failed to get the list of RAID controllers '
|
||||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid, 'error': exc})
|
{'node_uuid': node.uuid, 'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
|
||||||
|
@ -110,8 +110,8 @@ def list_virtual_disks(node):
|
||||||
try:
|
try:
|
||||||
return client.list_virtual_disks()
|
return client.list_virtual_disks()
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to get the list of virtual disks '
|
LOG.error('DRAC driver failed to get the list of virtual disks '
|
||||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid, 'error': exc})
|
{'node_uuid': node.uuid, 'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
|
||||||
|
@ -128,8 +128,8 @@ def list_physical_disks(node):
|
||||||
try:
|
try:
|
||||||
return client.list_physical_disks()
|
return client.list_physical_disks()
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to get the list of physical disks '
|
LOG.error('DRAC driver failed to get the list of physical disks '
|
||||||
'for node %(node_uuid)s. Reason: %(error)s.'),
|
'for node %(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid, 'error': exc})
|
{'node_uuid': node.uuid, 'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
|
||||||
|
@ -165,8 +165,8 @@ def create_virtual_disk(node, raid_controller, physical_disks, raid_level,
|
||||||
raid_level, size_mb, disk_name,
|
raid_level, size_mb, disk_name,
|
||||||
span_length, span_depth)
|
span_length, span_depth)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to create virtual disk for node '
|
LOG.error('DRAC driver failed to create virtual disk for node '
|
||||||
'%(node_uuid)s. Reason: %(error)s.'),
|
'%(node_uuid)s. Reason: %(error)s.',
|
||||||
{'node_uuid': node.uuid,
|
{'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
raise exception.DracOperationError(error=exc)
|
raise exception.DracOperationError(error=exc)
|
||||||
|
@ -193,9 +193,9 @@ def delete_virtual_disk(node, virtual_disk):
|
||||||
try:
|
try:
|
||||||
return client.delete_virtual_disk(virtual_disk)
|
return client.delete_virtual_disk(virtual_disk)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to delete virtual disk '
|
LOG.error('DRAC driver failed to delete virtual disk '
|
||||||
'%(virtual_disk_fqdd)s for node %(node_uuid)s. '
|
'%(virtual_disk_fqdd)s for node %(node_uuid)s. '
|
||||||
'Reason: %(error)s.'),
|
'Reason: %(error)s.',
|
||||||
{'virtual_disk_fqdd': virtual_disk,
|
{'virtual_disk_fqdd': virtual_disk,
|
||||||
'node_uuid': node.uuid,
|
'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
|
@ -217,9 +217,9 @@ def commit_config(node, raid_controller, reboot=False):
|
||||||
try:
|
try:
|
||||||
return client.commit_pending_raid_changes(raid_controller, reboot)
|
return client.commit_pending_raid_changes(raid_controller, reboot)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to commit pending RAID config for'
|
LOG.error('DRAC driver failed to commit pending RAID config for'
|
||||||
' controller %(raid_controller_fqdd)s on node '
|
' controller %(raid_controller_fqdd)s on node '
|
||||||
'%(node_uuid)s. Reason: %(error)s.'),
|
'%(node_uuid)s. Reason: %(error)s.',
|
||||||
{'raid_controller_fqdd': raid_controller,
|
{'raid_controller_fqdd': raid_controller,
|
||||||
'node_uuid': node.uuid,
|
'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
|
@ -238,9 +238,9 @@ def abandon_config(node, raid_controller):
|
||||||
try:
|
try:
|
||||||
client.abandon_pending_raid_changes(raid_controller)
|
client.abandon_pending_raid_changes(raid_controller)
|
||||||
except drac_exceptions.BaseClientException as exc:
|
except drac_exceptions.BaseClientException as exc:
|
||||||
LOG.error(_LE('DRAC driver failed to delete pending RAID config '
|
LOG.error('DRAC driver failed to delete pending RAID config '
|
||||||
'for controller %(raid_controller_fqdd)s on node '
|
'for controller %(raid_controller_fqdd)s on node '
|
||||||
'%(node_uuid)s. Reason: %(error)s.'),
|
'%(node_uuid)s. Reason: %(error)s.',
|
||||||
{'raid_controller_fqdd': raid_controller,
|
{'raid_controller_fqdd': raid_controller,
|
||||||
'node_uuid': node.uuid,
|
'node_uuid': node.uuid,
|
||||||
'error': exc})
|
'error': exc})
|
||||||
|
@ -467,8 +467,8 @@ def _find_configuration(logical_disks, physical_disks):
|
||||||
if not result:
|
if not result:
|
||||||
error_msg = _('failed to find matching physical disks for all '
|
error_msg = _('failed to find matching physical disks for all '
|
||||||
'logical disks')
|
'logical disks')
|
||||||
LOG.error(_LE('DRAC driver failed to create RAID '
|
LOG.error('DRAC driver failed to create RAID '
|
||||||
'configuration. Reason: %(error)s.'),
|
'configuration. Reason: %(error)s.',
|
||||||
{'error': error_msg})
|
{'error': error_msg})
|
||||||
raise exception.DracOperationError(error=error_msg)
|
raise exception.DracOperationError(error=error_msg)
|
||||||
|
|
||||||
|
@ -646,9 +646,9 @@ def _commit_to_controllers(node, controllers):
|
||||||
job_id = commit_config(node, raid_controller=controller,
|
job_id = commit_config(node, raid_controller=controller,
|
||||||
reboot=False)
|
reboot=False)
|
||||||
|
|
||||||
LOG.info(_LI('Change has been committed to RAID controller '
|
LOG.info('Change has been committed to RAID controller '
|
||||||
'%(controller)s on node %(node)s. '
|
'%(controller)s on node %(node)s. '
|
||||||
'DRAC job id: %(job_id)s'),
|
'DRAC job id: %(job_id)s',
|
||||||
{'controller': controller, 'node': node.uuid,
|
{'controller': controller, 'node': node.uuid,
|
||||||
'job_id': job_id})
|
'job_id': job_id})
|
||||||
|
|
||||||
|
@ -815,13 +815,13 @@ class DracRAID(base.RAIDInterface):
|
||||||
self._check_node_raid_jobs(task)
|
self._check_node_raid_jobs(task)
|
||||||
|
|
||||||
except exception.NodeNotFound:
|
except exception.NodeNotFound:
|
||||||
LOG.info(_LI("During query_raid_config_job_status, node "
|
LOG.info("During query_raid_config_job_status, node "
|
||||||
"%(node)s was not found and presumed deleted by "
|
"%(node)s was not found and presumed deleted by "
|
||||||
"another process."), {'node': node_uuid})
|
"another process.", {'node': node_uuid})
|
||||||
except exception.NodeLocked:
|
except exception.NodeLocked:
|
||||||
LOG.info(_LI("During query_raid_config_job_status, node "
|
LOG.info("During query_raid_config_job_status, node "
|
||||||
"%(node)s was already locked by another process. "
|
"%(node)s was already locked by another process. "
|
||||||
"Skip."), {'node': node_uuid})
|
"Skip.", {'node': node_uuid})
|
||||||
|
|
||||||
@METRICS.timer('DracRAID._check_node_raid_jobs')
|
@METRICS.timer('DracRAID._check_node_raid_jobs')
|
||||||
def _check_node_raid_jobs(self, task):
|
def _check_node_raid_jobs(self, task):
|
||||||
|
@ -878,9 +878,9 @@ class DracRAID(base.RAIDInterface):
|
||||||
node.save()
|
node.save()
|
||||||
|
|
||||||
def _set_clean_failed(self, task, config_job):
|
def _set_clean_failed(self, task, config_job):
|
||||||
LOG.error(_LE("RAID configuration job failed for node %(node)s. "
|
LOG.error("RAID configuration job failed for node %(node)s. "
|
||||||
"Failed config job: %(config_job_id)s. "
|
"Failed config job: %(config_job_id)s. "
|
||||||
"Message: '%(message)s'."),
|
"Message: '%(message)s'.",
|
||||||
{'node': task.node.uuid, 'config_job_id': config_job.id,
|
{'node': task.node.uuid, 'config_job_id': config_job.id,
|
||||||
'message': config_job.message})
|
'message': config_job.message})
|
||||||
task.node.last_error = config_job.message
|
task.node.last_error = config_job.message
|
||||||
|
|
|
@ -21,7 +21,7 @@ from oslo_log import log
|
||||||
|
|
||||||
from ironic.common import dhcp_factory
|
from ironic.common import dhcp_factory
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LW
|
from ironic.common.i18n import _
|
||||||
from ironic.common import neutron
|
from ironic.common import neutron
|
||||||
from ironic.common import states
|
from ironic.common import states
|
||||||
from ironic.common import utils
|
from ironic.common import utils
|
||||||
|
@ -254,10 +254,10 @@ class VIFPortIDMixin(object):
|
||||||
# Log warning if there is no VIF and an instance
|
# Log warning if there is no VIF and an instance
|
||||||
# is associated with the node.
|
# is associated with the node.
|
||||||
elif node.instance_uuid:
|
elif node.instance_uuid:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"No VIF found for instance %(instance)s "
|
"No VIF found for instance %(instance)s "
|
||||||
"port %(port)s when attempting to update port "
|
"port %(port)s when attempting to update port "
|
||||||
"client-id."),
|
"client-id.",
|
||||||
{'port': port_uuid,
|
{'port': port_uuid,
|
||||||
'instance': node.instance_uuid})
|
'instance': node.instance_uuid})
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LI, _LW
|
from ironic.common.i18n import _
|
||||||
from ironic.common import neutron
|
from ironic.common import neutron
|
||||||
from ironic.drivers import base
|
from ironic.drivers import base
|
||||||
from ironic.drivers.modules.network import common
|
from ironic.drivers.modules.network import common
|
||||||
|
@ -37,11 +37,11 @@ class FlatNetwork(common.VIFPortIDMixin, neutron.NeutronNetworkInterfaceMixin,
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
cleaning_net = CONF.neutron.cleaning_network
|
cleaning_net = CONF.neutron.cleaning_network
|
||||||
if not cleaning_net:
|
if not cleaning_net:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
'Please specify a valid UUID or name for '
|
'Please specify a valid UUID or name for '
|
||||||
'[neutron]/cleaning_network configuration option so that '
|
'[neutron]/cleaning_network configuration option so that '
|
||||||
'this interface is able to perform cleaning. Otherwise, '
|
'this interface is able to perform cleaning. Otherwise, '
|
||||||
'cleaning operations will fail to start.'))
|
'cleaning operations will fail to start.')
|
||||||
|
|
||||||
def validate(self, task):
|
def validate(self, task):
|
||||||
"""Validates the network interface.
|
"""Validates the network interface.
|
||||||
|
@ -117,7 +117,7 @@ class FlatNetwork(common.VIFPortIDMixin, neutron.NeutronNetworkInterfaceMixin,
|
||||||
"""
|
"""
|
||||||
# If we have left over ports from a previous cleaning, remove them
|
# If we have left over ports from a previous cleaning, remove them
|
||||||
neutron.rollback_ports(task, self.get_cleaning_network_uuid())
|
neutron.rollback_ports(task, self.get_cleaning_network_uuid())
|
||||||
LOG.info(_LI('Adding cleaning network to node %s'), task.node.uuid)
|
LOG.info('Adding cleaning network to node %s', task.node.uuid)
|
||||||
vifs = neutron.add_ports_to_network(
|
vifs = neutron.add_ports_to_network(
|
||||||
task, self.get_cleaning_network_uuid())
|
task, self.get_cleaning_network_uuid())
|
||||||
for port in task.ports:
|
for port in task.ports:
|
||||||
|
@ -134,7 +134,7 @@ class FlatNetwork(common.VIFPortIDMixin, neutron.NeutronNetworkInterfaceMixin,
|
||||||
:param task: A TaskManager instance.
|
:param task: A TaskManager instance.
|
||||||
:raises: NetworkError
|
:raises: NetworkError
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('Removing ports from cleaning network for node %s'),
|
LOG.info('Removing ports from cleaning network for node %s',
|
||||||
task.node.uuid)
|
task.node.uuid)
|
||||||
neutron.remove_ports_from_network(task,
|
neutron.remove_ports_from_network(task,
|
||||||
self.get_cleaning_network_uuid())
|
self.get_cleaning_network_uuid())
|
||||||
|
|
|
@ -18,7 +18,7 @@ from oslo_config import cfg
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LI
|
from ironic.common.i18n import _
|
||||||
from ironic.common import neutron
|
from ironic.common import neutron
|
||||||
from ironic.drivers import base
|
from ironic.drivers import base
|
||||||
from ironic.drivers.modules.network import common
|
from ironic.drivers.modules.network import common
|
||||||
|
@ -69,7 +69,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||||
# If we have left over ports from a previous provision attempt, remove
|
# If we have left over ports from a previous provision attempt, remove
|
||||||
# them
|
# them
|
||||||
neutron.rollback_ports(task, self.get_provisioning_network_uuid())
|
neutron.rollback_ports(task, self.get_provisioning_network_uuid())
|
||||||
LOG.info(_LI('Adding provisioning network to node %s'),
|
LOG.info('Adding provisioning network to node %s',
|
||||||
task.node.uuid)
|
task.node.uuid)
|
||||||
vifs = neutron.add_ports_to_network(
|
vifs = neutron.add_ports_to_network(
|
||||||
task, self.get_provisioning_network_uuid(),
|
task, self.get_provisioning_network_uuid(),
|
||||||
|
@ -87,7 +87,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||||
:param task: A TaskManager instance.
|
:param task: A TaskManager instance.
|
||||||
:raises: NetworkError
|
:raises: NetworkError
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('Removing provisioning network from node %s'),
|
LOG.info('Removing provisioning network from node %s',
|
||||||
task.node.uuid)
|
task.node.uuid)
|
||||||
neutron.remove_ports_from_network(
|
neutron.remove_ports_from_network(
|
||||||
task, self.get_provisioning_network_uuid())
|
task, self.get_provisioning_network_uuid())
|
||||||
|
@ -107,7 +107,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||||
"""
|
"""
|
||||||
# If we have left over ports from a previous cleaning, remove them
|
# If we have left over ports from a previous cleaning, remove them
|
||||||
neutron.rollback_ports(task, self.get_cleaning_network_uuid())
|
neutron.rollback_ports(task, self.get_cleaning_network_uuid())
|
||||||
LOG.info(_LI('Adding cleaning network to node %s'), task.node.uuid)
|
LOG.info('Adding cleaning network to node %s', task.node.uuid)
|
||||||
security_groups = CONF.neutron.cleaning_network_security_groups
|
security_groups = CONF.neutron.cleaning_network_security_groups
|
||||||
vifs = neutron.add_ports_to_network(task,
|
vifs = neutron.add_ports_to_network(task,
|
||||||
self.get_cleaning_network_uuid(),
|
self.get_cleaning_network_uuid(),
|
||||||
|
@ -126,7 +126,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||||
:param task: a TaskManager instance.
|
:param task: a TaskManager instance.
|
||||||
:raises: NetworkError
|
:raises: NetworkError
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('Removing cleaning network from node %s'),
|
LOG.info('Removing cleaning network from node %s',
|
||||||
task.node.uuid)
|
task.node.uuid)
|
||||||
neutron.remove_ports_from_network(task,
|
neutron.remove_ports_from_network(task,
|
||||||
self.get_cleaning_network_uuid())
|
self.get_cleaning_network_uuid())
|
||||||
|
@ -145,7 +145,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||||
"""
|
"""
|
||||||
node = task.node
|
node = task.node
|
||||||
ports = task.ports
|
ports = task.ports
|
||||||
LOG.info(_LI('Mapping instance ports to %s'), node.uuid)
|
LOG.info('Mapping instance ports to %s', node.uuid)
|
||||||
|
|
||||||
# TODO(russell_h): this is based on the broken assumption that the
|
# TODO(russell_h): this is based on the broken assumption that the
|
||||||
# number of Neutron ports will match the number of physical ports.
|
# number of Neutron ports will match the number of physical ports.
|
||||||
|
@ -186,7 +186,7 @@ class NeutronNetwork(common.VIFPortIDMixin,
|
||||||
:raises: NetworkError
|
:raises: NetworkError
|
||||||
"""
|
"""
|
||||||
node = task.node
|
node = task.node
|
||||||
LOG.info(_LI('Unbinding instance ports from node %s'), node.uuid)
|
LOG.info('Unbinding instance ports from node %s', node.uuid)
|
||||||
|
|
||||||
ports = [p for p in task.ports if not p.portgroup_id]
|
ports = [p for p in task.ports if not p.portgroup_id]
|
||||||
portgroups = task.portgroups
|
portgroups = task.portgroups
|
||||||
|
|
|
@ -17,7 +17,7 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LE
|
from ironic.common.i18n import _
|
||||||
from ironic.common import states
|
from ironic.common import states
|
||||||
from ironic.conf import CONF
|
from ironic.conf import CONF
|
||||||
from ironic.drivers import utils
|
from ironic.drivers import utils
|
||||||
|
@ -247,8 +247,8 @@ def node_has_server_profile(func):
|
||||||
)
|
)
|
||||||
except oneview_exceptions.OneViewException as oneview_exc:
|
except oneview_exceptions.OneViewException as oneview_exc:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("Failed to get server profile from OneView appliance for"
|
"Failed to get server profile from OneView appliance for"
|
||||||
" node %(node)s. Error: %(message)s"),
|
" node %(node)s. Error: %(message)s",
|
||||||
{"node": task.node.uuid, "message": oneview_exc}
|
{"node": task.node.uuid, "message": oneview_exc}
|
||||||
)
|
)
|
||||||
raise exception.OneViewError(error=oneview_exc)
|
raise exception.OneViewError(error=oneview_exc)
|
||||||
|
|
|
@ -23,7 +23,7 @@ import retrying
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LE, _LI, _LW
|
from ironic.common.i18n import _
|
||||||
from ironic.common import states
|
from ironic.common import states
|
||||||
from ironic.conductor import utils as manager_utils
|
from ironic.conductor import utils as manager_utils
|
||||||
from ironic.conf import CONF
|
from ironic.conf import CONF
|
||||||
|
@ -81,18 +81,18 @@ class OneViewPeriodicTasks(object):
|
||||||
# remaining nodes. This node will be checked in
|
# remaining nodes. This node will be checked in
|
||||||
# the next periodic call.
|
# the next periodic call.
|
||||||
|
|
||||||
LOG.error(_LE("Error while determining if node "
|
LOG.error("Error while determining if node "
|
||||||
"%(node_uuid)s is in use by OneView. "
|
"%(node_uuid)s is in use by OneView. "
|
||||||
"Error: %(error)s"),
|
"Error: %(error)s",
|
||||||
{'node_uuid': node.uuid, 'error': e})
|
{'node_uuid': node.uuid, 'error': e})
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if oneview_using:
|
if oneview_using:
|
||||||
purpose = (_LI('Updating node %(node_uuid)s in use '
|
purpose = ('Updating node %(node_uuid)s in use '
|
||||||
'by OneView from %(provision_state)s state '
|
'by OneView from %(provision_state)s state '
|
||||||
'to %(target_state)s state and maintenance '
|
'to %(target_state)s state and maintenance '
|
||||||
'mode %(maintenance)s.'),
|
'mode %(maintenance)s.',
|
||||||
{'node_uuid': node_uuid,
|
{'node_uuid': node_uuid,
|
||||||
'provision_state': states.AVAILABLE,
|
'provision_state': states.AVAILABLE,
|
||||||
'target_state': states.MANAGEABLE,
|
'target_state': states.MANAGEABLE,
|
||||||
|
@ -143,18 +143,18 @@ class OneViewPeriodicTasks(object):
|
||||||
# remaining nodes. This node will be checked in
|
# remaining nodes. This node will be checked in
|
||||||
# the next periodic call.
|
# the next periodic call.
|
||||||
|
|
||||||
LOG.error(_LE("Error while determining if node "
|
LOG.error("Error while determining if node "
|
||||||
"%(node_uuid)s is in use by OneView. "
|
"%(node_uuid)s is in use by OneView. "
|
||||||
"Error: %(error)s"),
|
"Error: %(error)s",
|
||||||
{'node_uuid': node.uuid, 'error': e})
|
{'node_uuid': node.uuid, 'error': e})
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not oneview_using:
|
if not oneview_using:
|
||||||
purpose = (_LI('Bringing node %(node_uuid)s back from '
|
purpose = ('Bringing node %(node_uuid)s back from '
|
||||||
'use by OneView from %(provision_state)s '
|
'use by OneView from %(provision_state)s '
|
||||||
'state to %(target_state)s state and '
|
'state to %(target_state)s state and '
|
||||||
'maintenance mode %(maintenance)s.'),
|
'maintenance mode %(maintenance)s.',
|
||||||
{'node_uuid': node_uuid,
|
{'node_uuid': node_uuid,
|
||||||
'provision_state': states.MANAGEABLE,
|
'provision_state': states.MANAGEABLE,
|
||||||
'target_state': states.AVAILABLE,
|
'target_state': states.AVAILABLE,
|
||||||
|
@ -202,10 +202,10 @@ class OneViewPeriodicTasks(object):
|
||||||
|
|
||||||
node = objects.Node.get(context, node_uuid)
|
node = objects.Node.get(context, node_uuid)
|
||||||
|
|
||||||
purpose = (_LI('Bringing node %(node_uuid)s back from use '
|
purpose = ('Bringing node %(node_uuid)s back from use '
|
||||||
'by OneView from %(provision_state)s state '
|
'by OneView from %(provision_state)s state '
|
||||||
'to %(target_state)s state and '
|
'to %(target_state)s state and '
|
||||||
'maintenance mode %(maintenance)s.'),
|
'maintenance mode %(maintenance)s.',
|
||||||
{'node_uuid': node_uuid,
|
{'node_uuid': node_uuid,
|
||||||
'provision_state': states.CLEANFAIL,
|
'provision_state': states.CLEANFAIL,
|
||||||
'target_state': states.MANAGEABLE,
|
'target_state': states.MANAGEABLE,
|
||||||
|
@ -288,7 +288,7 @@ class OneViewAgentDeployMixin(object):
|
||||||
ironic_deploy_utils.set_failed_state(task, msg)
|
ironic_deploy_utils.set_failed_state(task, msg)
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI('Image successfully written to node %s'), node.uuid)
|
LOG.info('Image successfully written to node %s', node.uuid)
|
||||||
LOG.debug('Rebooting node %s to instance', node.uuid)
|
LOG.debug('Rebooting node %s to instance', node.uuid)
|
||||||
|
|
||||||
self.reboot_and_finish_deploy(task)
|
self.reboot_and_finish_deploy(task)
|
||||||
|
@ -332,8 +332,8 @@ class OneViewAgentDeployMixin(object):
|
||||||
_wait_until_powered_off(task)
|
_wait_until_powered_off(task)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW('Failed to soft power off node %(node_uuid)s '
|
'Failed to soft power off node %(node_uuid)s '
|
||||||
'in at least %(timeout)d seconds. Error: %(error)s'),
|
'in at least %(timeout)d seconds. Error: %(error)s',
|
||||||
{'node_uuid': node.uuid,
|
{'node_uuid': node.uuid,
|
||||||
'timeout': (wait * (attempts - 1)) / 1000,
|
'timeout': (wait * (attempts - 1)) / 1000,
|
||||||
'error': e})
|
'error': e})
|
||||||
|
@ -349,7 +349,7 @@ class OneViewAgentDeployMixin(object):
|
||||||
agent_base_vendor.log_and_raise_deployment_error(task, msg)
|
agent_base_vendor.log_and_raise_deployment_error(task, msg)
|
||||||
|
|
||||||
task.process_event('done')
|
task.process_event('done')
|
||||||
LOG.info(_LI('Deployment to node %s done'), task.node.uuid)
|
LOG.info('Deployment to node %s done', task.node.uuid)
|
||||||
|
|
||||||
|
|
||||||
class OneViewAgentDeploy(OneViewAgentDeployMixin, agent.AgentDeploy,
|
class OneViewAgentDeploy(OneViewAgentDeployMixin, agent.AgentDeploy,
|
||||||
|
|
|
@ -20,7 +20,7 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LE, _LI, _LW
|
from ironic.common.i18n import _
|
||||||
from ironic.common import states
|
from ironic.common import states
|
||||||
from ironic.drivers.modules.oneview import common
|
from ironic.drivers.modules.oneview import common
|
||||||
|
|
||||||
|
@ -303,10 +303,10 @@ def allocate_server_hardware_to_ironic(oneview_client, node,
|
||||||
applied_sp_uri is not (None, '')):
|
applied_sp_uri is not (None, '')):
|
||||||
|
|
||||||
_del_applied_server_profile_uri_field(node)
|
_del_applied_server_profile_uri_field(node)
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"Inconsistent 'applied_server_profile_uri' parameter "
|
"Inconsistent 'applied_server_profile_uri' parameter "
|
||||||
"value in driver_info. There is no Server Profile "
|
"value in driver_info. There is no Server Profile "
|
||||||
"applied to node %(node_uuid)s. Value deleted."),
|
"applied to node %(node_uuid)s. Value deleted.",
|
||||||
{"node_uuid": node.uuid}
|
{"node_uuid": node.uuid}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -314,9 +314,9 @@ def allocate_server_hardware_to_ironic(oneview_client, node,
|
||||||
# applied on Hardware. Do not apply again.
|
# applied on Hardware. Do not apply again.
|
||||||
if (applied_sp_uri and server_hardware.server_profile_uri and
|
if (applied_sp_uri and server_hardware.server_profile_uri and
|
||||||
server_hardware.server_profile_uri == applied_sp_uri):
|
server_hardware.server_profile_uri == applied_sp_uri):
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"The Server Profile %(applied_sp_uri)s was already applied "
|
"The Server Profile %(applied_sp_uri)s was already applied "
|
||||||
"by ironic on node %(node_uuid)s. Reusing."),
|
"by ironic on node %(node_uuid)s. Reusing.",
|
||||||
{"node_uuid": node.uuid, "applied_sp_uri": applied_sp_uri}
|
{"node_uuid": node.uuid, "applied_sp_uri": applied_sp_uri}
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
@ -328,15 +328,15 @@ def allocate_server_hardware_to_ironic(oneview_client, node,
|
||||||
_add_applied_server_profile_uri_field(node, applied_profile)
|
_add_applied_server_profile_uri_field(node, applied_profile)
|
||||||
|
|
||||||
LOG.info(
|
LOG.info(
|
||||||
_LI("Server Profile %(server_profile_uuid)s was successfully"
|
"Server Profile %(server_profile_uuid)s was successfully"
|
||||||
" applied to node %(node_uuid)s."),
|
" applied to node %(node_uuid)s.",
|
||||||
{"node_uuid": node.uuid,
|
{"node_uuid": node.uuid,
|
||||||
"server_profile_uuid": applied_profile.uri}
|
"server_profile_uuid": applied_profile.uri}
|
||||||
)
|
)
|
||||||
|
|
||||||
except oneview_exception.OneViewServerProfileAssignmentError as e:
|
except oneview_exception.OneViewServerProfileAssignmentError as e:
|
||||||
LOG.error(_LE("An error occurred during allocating server "
|
LOG.error("An error occurred during allocating server "
|
||||||
"hardware to ironic during prepare: %s"), e)
|
"hardware to ironic during prepare: %s", e)
|
||||||
raise exception.OneViewError(error=e)
|
raise exception.OneViewError(error=e)
|
||||||
else:
|
else:
|
||||||
msg = (_("Node %s is already in use by OneView.") %
|
msg = (_("Node %s is already in use by OneView.") %
|
||||||
|
@ -367,8 +367,8 @@ def deallocate_server_hardware_from_ironic(oneview_client, node):
|
||||||
oneview_client.delete_server_profile(server_profile_uuid)
|
oneview_client.delete_server_profile(server_profile_uuid)
|
||||||
_del_applied_server_profile_uri_field(node)
|
_del_applied_server_profile_uri_field(node)
|
||||||
|
|
||||||
LOG.info(_LI("Server Profile %(server_profile_uuid)s was deleted "
|
LOG.info("Server Profile %(server_profile_uuid)s was deleted "
|
||||||
"from node %(node_uuid)s in OneView."),
|
"from node %(node_uuid)s in OneView.",
|
||||||
{'server_profile_uuid': server_profile_uuid,
|
{'server_profile_uuid': server_profile_uuid,
|
||||||
'node_uuid': node.uuid})
|
'node_uuid': node.uuid})
|
||||||
except (ValueError, oneview_exception.OneViewException) as e:
|
except (ValueError, oneview_exception.OneViewException) as e:
|
||||||
|
@ -378,6 +378,6 @@ def deallocate_server_hardware_from_ironic(oneview_client, node):
|
||||||
raise exception.OneViewError(error=msg)
|
raise exception.OneViewError(error=msg)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Cannot deallocate node %(node_uuid)s "
|
LOG.warning("Cannot deallocate node %(node_uuid)s "
|
||||||
"in OneView because it is not in use by "
|
"in OneView because it is not in use by "
|
||||||
"ironic."), {'node_uuid': node.uuid})
|
"ironic.", {'node_uuid': node.uuid})
|
||||||
|
|
|
@ -19,7 +19,7 @@ from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from ironic.common import exception
|
from ironic.common import exception
|
||||||
from ironic.common.i18n import _, _LE
|
from ironic.common.i18n import _
|
||||||
from ironic.common import states
|
from ironic.common import states
|
||||||
from ironic.conductor import task_manager
|
from ironic.conductor import task_manager
|
||||||
from ironic.drivers import base
|
from ironic.drivers import base
|
||||||
|
@ -95,8 +95,8 @@ class OneViewPower(base.PowerInterface):
|
||||||
)
|
)
|
||||||
except oneview_exceptions.OneViewException as oneview_exc:
|
except oneview_exceptions.OneViewException as oneview_exc:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("Error getting power state for node %(node)s. Error:"
|
"Error getting power state for node %(node)s. Error:"
|
||||||
"%(error)s"),
|
"%(error)s",
|
||||||
{'node': task.node.uuid, 'error': oneview_exc}
|
{'node': task.node.uuid, 'error': oneview_exc}
|
||||||
)
|
)
|
||||||
raise exception.OneViewError(error=oneview_exc)
|
raise exception.OneViewError(error=oneview_exc)
|
||||||
|
|
|
@ -50,7 +50,6 @@ from oslo_utils import uuidutils
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
import sqlalchemy.exc
|
import sqlalchemy.exc
|
||||||
|
|
||||||
from ironic.common.i18n import _LE
|
|
||||||
from ironic.conf import CONF
|
from ironic.conf import CONF
|
||||||
from ironic.db.sqlalchemy import migration
|
from ironic.db.sqlalchemy import migration
|
||||||
from ironic.db.sqlalchemy import models
|
from ironic.db.sqlalchemy import models
|
||||||
|
@ -133,8 +132,8 @@ class WalkVersionsMixin(object):
|
||||||
if check:
|
if check:
|
||||||
check(engine, data)
|
check(engine, data)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_LE("Failed to migrate to version %(version)s on engine "
|
LOG.error("Failed to migrate to version %(version)s on engine "
|
||||||
"%(engine)s"),
|
"%(engine)s",
|
||||||
{'version': version, 'engine': engine})
|
{'version': version, 'engine': engine})
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue