Fix "'NoneType' object is not iterable" in RAID

Do not update `raid_configs` if operation is synchronous.
First, it is not needed, second, it will not be cleaned
up by async periodics. As the result the data remains
on the node and causes errors the next time node is in
cleaning state.

Story: 2010476
Task: 47037

Change-Id: Ib1850c58d1670c3555ac9b02eb7958a1b440a339
This commit is contained in:
Aija Jauntēva 2022-12-09 08:47:20 -05:00
parent 4d66609e95
commit 17c9e58c9e
3 changed files with 17 additions and 2 deletions

View File

@ -1120,6 +1120,8 @@ class RedfishRAID(base.RAIDInterface):
raid_configs['pending'].setdefault(controller, []).append(
logical_disk)
# Store only when async operation
if reboot_required:
node.set_driver_internal_info('raid_configs', raid_configs)
return raid_configs, reboot_required
@ -1182,6 +1184,8 @@ class RedfishRAID(base.RAIDInterface):
response.task_monitor_uri)
reboot_required = True
# Store only when async operation
if reboot_required:
node.set_driver_internal_info('raid_configs', raid_configs)
return raid_configs, reboot_required

View File

@ -336,6 +336,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(mock_node_power_action.call_count, 0)
self.assertEqual(mock_build_agent_options.call_count, 0)
self.assertEqual(mock_prepare_ramdisk.call_count, 0)
self.assertIsNone(
task.node.driver_internal_info.get('raid_configs'))
self.assertEqual(
[{'controller': 'RAID controller 1',
'id': '1',
@ -1066,6 +1068,8 @@ class RedfishRAIDTestCase(db_base.DbTestCase):
self.assertEqual(mock_node_power_action.call_count, 0)
self.assertEqual(mock_build_agent_options.call_count, 0)
self.assertEqual(mock_prepare_ramdisk.call_count, 0)
self.assertIsNone(
task.node.driver_internal_info.get('raid_configs'))
self.assertEqual([], task.node.raid_config['logical_disks'])
self.assertNotEqual(
last_updated, task.node.raid_config['last_updated'])

View File

@ -0,0 +1,7 @@
---
fixes:
- |
Fixes ``'NoneType' object is not iterable`` in conductor logs for
``redfish`` and ``idrac-redfish`` RAID clean and deploy steps. The message
should no longer appear. For affected nodes re-create the node or delete
``raid_configs`` entry from ``driver_internal_info`` field.