Merge "Use driver_internal_info methods for drac driver"

This commit is contained in:
Zuul 2022-01-13 02:38:39 +00:00 committed by Gerrit Code Review
commit aed88ed93e
4 changed files with 63 additions and 95 deletions

View File

@ -128,13 +128,14 @@ class DracWSManBIOS(base.BIOSInterface):
raise exception.DracOperationError(error=exc)
# Store JobID for the async job handler _check_node_bios_jobs
driver_internal_info = node.driver_internal_info
driver_internal_info.setdefault(
'bios_config_job_ids', []).append(commit_result)
node.driver_internal_info = driver_internal_info
bios_config_job_ids = node.driver_internal_info.get(
'bios_config_job_ids', [])
bios_config_job_ids.append(commit_result)
node.set_driver_internal_info('bios_config_job_ids',
bios_config_job_ids)
# This method calls node.save(), bios_config_job_ids will be saved
# automatically
# This method calls node.save(), bios_config_job_ids will then be
# saved.
# These flags are for the conductor to manage the asynchronous
# jobs that have been initiated by this method
deploy_utils.set_async_step_flags(
@ -300,15 +301,15 @@ class DracWSManBIOS(base.BIOSInterface):
"""
if finished_job_ids is None:
finished_job_ids = []
driver_internal_info = node.driver_internal_info
# take out the unfinished job ids from all the jobs
unfinished_job_ids = [job_id for job_id
in driver_internal_info['bios_config_job_ids']
unfinished_job_ids = [
job_id for job_id
in node.driver_internal_info['bios_config_job_ids']
if job_id not in finished_job_ids]
# assign the unfinished job ids back to the total list
# this will clear the finished jobs from the list
driver_internal_info['bios_config_job_ids'] = unfinished_job_ids
node.driver_internal_info = driver_internal_info
node.set_driver_internal_info('bios_config_job_ids',
unfinished_job_ids)
node.save()
def _delete_cached_reboot_time(self, node):
@ -316,12 +317,9 @@ class DracWSManBIOS(base.BIOSInterface):
:param node: an ironic node object
"""
driver_internal_info = node.driver_internal_info
# Remove the last reboot time and factory reset time
driver_internal_info.pop(
'factory_reset_time_before_reboot')
driver_internal_info.pop('factory_reset_time')
node.driver_internal_info = driver_internal_info
node.del_driver_internal_info('factory_reset_time_before_reboot')
node.del_driver_internal_info('factory_reset_time')
node.save()
def _set_failed(self, task, error_message):
@ -414,14 +412,11 @@ class DracWSManBIOS(base.BIOSInterface):
raise exception.DracOperationError(error=exc)
# Store the last inventory time on reboot for async job handler
# _check_last_system_inventory_changed
driver_internal_info = node.driver_internal_info
driver_internal_info['factory_reset_time_before_reboot'] = \
factory_reset_time_before_reboot
node.set_driver_internal_info('factory_reset_time_before_reboot',
factory_reset_time_before_reboot)
# Store the current time to later check if factory reset times out
driver_internal_info['factory_reset_time'] = str(
timeutils.utcnow(with_timezone=True))
node.timestamp_driver_internal_info('factory_reset_time')
node.driver_internal_info = driver_internal_info
# rebooting the server to apply factory reset value
client.set_power_state('REBOOT')

View File

@ -445,9 +445,8 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
lambda m: m.import_system_configuration(
json.dumps(configuration["oem"]["data"])),)
info = task.node.driver_internal_info
info['import_task_monitor_url'] = task_monitor.task_monitor_uri
task.node.driver_internal_info = info
task.node.set_driver_internal_info('import_task_monitor_url',
task_monitor.task_monitor_uri)
deploy_utils.set_async_step_flags(
task.node,
@ -476,9 +475,8 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
"""
# Import is async operation, setting sub-step to store export config
# and indicate that it's being executed as part of composite step
info = task.node.driver_internal_info
info['export_configuration_location'] = export_configuration_location
task.node.driver_internal_info = info
task.node.set_driver_internal_info('export_configuration_location',
export_configuration_location)
task.node.save()
return self.import_configuration(task, import_configuration_location)
@ -521,9 +519,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
log_msg = ("Import configuration task failed for node "
"%(node)s. %(error)s" % {'node': task.node.uuid,
'error': error_msg})
info = node.driver_internal_info
info.pop('import_task_monitor_url', None)
node.driver_internal_info = info
node.del_driver_internal_info('import_task_monitor_url')
node.save()
self._set_failed(task, log_msg, error_msg)
return
@ -532,9 +528,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
import_task = task_monitor.get_task()
task.upgrade_lock()
info = node.driver_internal_info
info.pop('import_task_monitor_url', None)
node.driver_internal_info = info
node.del_driver_internal_info('import_task_monitor_url')
succeeded = False
if (import_task.task_state == sushy.TASK_STATE_COMPLETED
@ -557,8 +551,8 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
'task_monitor_url': task_monitor_url})
# If import executed as part of import_export_configuration
export_configuration_location =\
info.get('export_configuration_location')
export_configuration_location = node.driver_internal_info.get(
'export_configuration_location')
if export_configuration_location:
# then do sync export configuration before finishing
self._cleanup_export_substep(node)
@ -613,9 +607,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement):
manager_utils.deploying_error_handler(task, log_msg, error_msg)
def _cleanup_export_substep(self, node):
driver_internal_info = node.driver_internal_info
driver_internal_info.pop('export_configuration_location', None)
node.driver_internal_info = driver_internal_info
node.del_driver_internal_info('export_configuration_location')
@METRICS.timer('DracRedfishManagement.clear_job_queue')
@base.verify_step(priority=0)
@ -752,10 +744,9 @@ class DracWSManManagement(base.ManagementInterface):
# at the next boot. As a workaround, saving it to
# driver_internal_info and committing the change during
# power state change.
driver_internal_info = node.driver_internal_info
driver_internal_info['drac_boot_device'] = {'boot_device': device,
'persistent': persistent}
node.driver_internal_info = driver_internal_info
node.set_driver_internal_info('drac_boot_device',
{'boot_device': device,
'persistent': persistent})
node.save()
@METRICS.timer('DracManagement.get_sensors_data')

View File

@ -74,7 +74,6 @@ def _get_power_state(node):
def _commit_boot_list_change(node):
driver_internal_info = node.driver_internal_info
boot_device = node.driver_internal_info.get('drac_boot_device')
if boot_device is None:
@ -83,8 +82,7 @@ def _commit_boot_list_change(node):
drac_management.set_boot_device(node, boot_device['boot_device'],
boot_device['persistent'])
driver_internal_info['drac_boot_device'] = None
node.driver_internal_info = driver_internal_info
node.set_driver_internal_info('drac_boot_device', None)
node.save()

View File

@ -1007,19 +1007,14 @@ def _commit_to_controllers(node, controllers, substep="completed"):
if not controllers:
LOG.debug('No changes on any of the controllers on node %s',
node.uuid)
driver_internal_info = node.driver_internal_info
driver_internal_info['raid_config_substep'] = substep
driver_internal_info['raid_config_parameters'] = []
node.driver_internal_info = driver_internal_info
node.set_driver_internal_info('raid_config_substep', substep)
node.set_driver_internal_info('raid_config_parameters', [])
node.save()
return
driver_internal_info = node.driver_internal_info
driver_internal_info['raid_config_substep'] = substep
driver_internal_info['raid_config_parameters'] = []
if 'raid_config_job_ids' not in driver_internal_info:
driver_internal_info['raid_config_job_ids'] = []
i_raid_config_parameters = []
i_raid_config_job_ids = node.driver_internal_info.get(
'raid_config_job_ids', [])
optional = drac_constants.RebootRequired.optional
@ -1083,13 +1078,12 @@ def _commit_to_controllers(node, controllers, substep="completed"):
raid_config_job_ids=raid_config_job_ids,
raid_config_parameters=raid_config_parameters)
driver_internal_info['raid_config_job_ids'].extend(job_details[
'raid_config_job_ids'])
driver_internal_info['raid_config_parameters'].extend(job_details[
'raid_config_parameters'])
node.driver_internal_info = driver_internal_info
i_raid_config_job_ids.extend(job_details['raid_config_job_ids'])
i_raid_config_parameters.extend(job_details['raid_config_parameters'])
node.set_driver_internal_info('raid_config_substep', substep)
node.set_driver_internal_info('raid_config_parameters',
i_raid_config_parameters)
node.set_driver_internal_info('raid_config_job_ids', i_raid_config_job_ids)
# Signal whether the node has been rebooted, that we do not need to execute
# the step again, and that this completion of this step is triggered
@ -1472,10 +1466,9 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
deploy_utils.prepare_agent_boot(task)
# Reboot already done by non real time task
task.upgrade_lock()
info = task.node.driver_internal_info
info['raid_task_monitor_uris'] = [
tm.task_monitor_uri for tm in task_mons]
task.node.driver_internal_info = info
task.node.set_driver_internal_info(
'raid_task_monitor_uris',
[tm.task_monitor_uri for tm in task_mons])
task.node.save()
return True
@ -1526,27 +1519,25 @@ class DracRedfishRAID(redfish_raid.RedfishRAID):
'message': ', '.join(messages)}))
task.upgrade_lock()
info = node.driver_internal_info
if failed_msgs:
error_msg = (_("Failed RAID configuration tasks: %(messages)s")
% {'messages': ', '.join(failed_msgs)})
log_msg = ("RAID configuration task failed for node "
"%(node)s. %(error)s" % {'node': node.uuid,
'error': error_msg})
info.pop('raid_task_monitor_uris', None)
node.del_driver_internal_info('raid_task_monitor_uris')
self._set_failed(task, log_msg, error_msg)
else:
running_task_mon_uris = [x for x in task_mon_uris
if x not in completed_task_mon_uris]
if running_task_mon_uris:
info['raid_task_monitor_uris'] = running_task_mon_uris
node.driver_internal_info = info
node.set_driver_internal_info('raid_task_monitor_uris',
running_task_mon_uris)
# will check remaining jobs in the next period
else:
# all tasks completed and none of them failed
info.pop('raid_task_monitor_uris', None)
node.del_driver_internal_info('raid_task_monitor_uris')
self._set_success(task)
node.driver_internal_info = info
node.save()
def _set_failed(self, task, log_msg, error_msg):
@ -1671,9 +1662,8 @@ class DracWSManRAID(base.RAIDInterface):
physical_disk_name)
# adding logical_disks to driver_internal_info to create virtual disks
driver_internal_info = node.driver_internal_info
driver_internal_info[
"logical_disks_to_create"] = logical_disks_to_create
node.set_driver_internal_info('logical_disks_to_create',
logical_disks_to_create)
commit_results = None
if logical_disks_to_create:
@ -1688,8 +1678,8 @@ class DracWSManRAID(base.RAIDInterface):
substep="create_virtual_disks")
volume_validation = True if commit_results else False
driver_internal_info['volume_validation'] = volume_validation
node.driver_internal_info = driver_internal_info
node.set_driver_internal_info('volume_validation',
volume_validation)
node.save()
if commit_results:
@ -1843,33 +1833,27 @@ class DracWSManRAID(base.RAIDInterface):
self._complete_raid_substep(task, node)
def _clear_raid_substep(self, node):
driver_internal_info = node.driver_internal_info
driver_internal_info.pop('raid_config_substep', None)
driver_internal_info.pop('raid_config_parameters', None)
node.driver_internal_info = driver_internal_info
node.del_driver_internal_info('raid_config_substep')
node.del_driver_internal_info('raid_config_parameters')
node.save()
def _set_raid_config_job_failure(self, node):
driver_internal_info = node.driver_internal_info
driver_internal_info['raid_config_job_failure'] = True
node.driver_internal_info = driver_internal_info
node.set_driver_internal_info('raid_config_job_failure', True)
node.save()
def _clear_raid_config_job_failure(self, node):
driver_internal_info = node.driver_internal_info
del driver_internal_info['raid_config_job_failure']
node.driver_internal_info = driver_internal_info
node.del_driver_internal_info('raid_config_job_failure')
node.save()
def _delete_cached_config_job_id(self, node, finished_config_job_ids=None):
if finished_config_job_ids is None:
finished_config_job_ids = []
driver_internal_info = node.driver_internal_info
unfinished_job_ids = [job_id for job_id
in driver_internal_info['raid_config_job_ids']
unfinished_job_ids = [
job_id for job_id
in node.driver_internal_info['raid_config_job_ids']
if job_id not in finished_config_job_ids]
driver_internal_info['raid_config_job_ids'] = unfinished_job_ids
node.driver_internal_info = driver_internal_info
node.set_driver_internal_info('raid_config_job_ids',
unfinished_job_ids)
node.save()
def _set_failed(self, task, config_job):