From cedb2ad0eadac9e9c9d39c59895071b681363add Mon Sep 17 00:00:00 2001 From: Rachit7194 Date: Mon, 2 Mar 2020 13:57:36 -0500 Subject: [PATCH] DRAC: Fix a failure to create virtual disk bug Certain RAID controllers (PERC H730P) require physical disks to be switched from non-RAID (JBOD) mode to RAID mode to be included in a virtual disk. When this conversion happens, the available free space on the physical disk is reduced due to some space being allocated to RAID mode housekeeping. If the user requests a virtual disk (a RAID 1 for example) with a size close to the max size of the physical disks when they are in JBOD mode, then creation of the virtual disk following conversion of the physical disks from JBOD to RAID mode will fail since there is not enough space due to the space used by RAID mode housekeeping. This patch works around this issue by recalculating the RAID volume size after physical disk conversion has completed and the free space on the converted drives is known. Note that this may result in a virtual disk that is slightly smaller than the requested size, but still the max size that the drives can support. Conflicts: ironic/tests/unit/drivers/modules/drac/test_raid.py Change-Id: I720ab15e811f498aa15b88bfe8bb35fc49df292b Story: 2007359 Task: 38912 (cherry picked from commit 84e8b11a6d1ec04da188f5b34a1396cd92d88d5a) --- ironic/drivers/modules/drac/raid.py | 48 +++++++- .../unit/drivers/modules/drac/test_raid.py | 108 ++++++++++++++++++ ...before-raid-creation-ea1f7eb425f79f2f.yaml | 22 ++++ 3 files changed, 176 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/fix-drives-conversion-before-raid-creation-ea1f7eb425f79f2f.yaml diff --git a/ironic/drivers/modules/drac/raid.py b/ironic/drivers/modules/drac/raid.py index d9aafb0093..e1e1a78b7c 100644 --- a/ironic/drivers/modules/drac/raid.py +++ b/ironic/drivers/modules/drac/raid.py @@ -848,6 +848,40 @@ def _create_config_job(node, controller, reboot=False, realtime=False, 'raid_config_parameters': raid_config_parameters} +def _validate_volume_size(node, logical_disks): + new_physical_disks = list_physical_disks(node) + free_space_mb = {} + new_processed_volumes = [] + for disk in new_physical_disks: + free_space_mb[disk] = disk.free_size_mb + + for logical_disk in logical_disks: + selected_disks = [disk for disk in new_physical_disks + if disk.id in logical_disk['physical_disks']] + + spans_count = _calculate_spans( + logical_disk['raid_level'], len(selected_disks)) + + new_max_vol_size_mb = _max_volume_size_mb( + logical_disk['raid_level'], + selected_disks, + free_space_mb, + spans_count=spans_count) + + if logical_disk['size_mb'] > new_max_vol_size_mb: + logical_disk['size_mb'] = new_max_vol_size_mb + LOG.info("Logical size does not match so calculating volume " + "properties for current logical_disk") + _calculate_volume_props( + logical_disk, new_physical_disks, free_space_mb) + new_processed_volumes.append(logical_disk) + + if new_processed_volumes: + return new_processed_volumes + + return logical_disks + + def _commit_to_controllers(node, controllers, substep="completed"): """Commit changes to RAID controllers on the node. @@ -940,6 +974,13 @@ def _create_virtual_disks(task, node): logical_disks_to_create = node.driver_internal_info[ 'logical_disks_to_create'] + # Check valid properties attached to voiume after drives conversion + isVolValidationNeeded = node.driver_internal_info[ + 'volume_validation'] + if isVolValidationNeeded: + logical_disks_to_create = _validate_volume_size( + node, logical_disks_to_create) + controllers = list() for logical_disk in logical_disks_to_create: controller = dict() @@ -1085,8 +1126,6 @@ class DracWSManRAID(base.RAIDInterface): driver_internal_info = node.driver_internal_info driver_internal_info[ "logical_disks_to_create"] = logical_disks_to_create - node.driver_internal_info = driver_internal_info - node.save() commit_results = None if logical_disks_to_create: @@ -1100,6 +1139,11 @@ class DracWSManRAID(base.RAIDInterface): controllers_to_physical_disk_ids, substep="create_virtual_disks") + volume_validation = True if commit_results else False + driver_internal_info['volume_validation'] = volume_validation + node.driver_internal_info = driver_internal_info + node.save() + if commit_results: return commit_results else: diff --git a/ironic/tests/unit/drivers/modules/drac/test_raid.py b/ironic/tests/unit/drivers/modules/drac/test_raid.py index 415ee36ea8..5ab435524a 100644 --- a/ironic/tests/unit/drivers/modules/drac/test_raid.py +++ b/ironic/tests/unit/drivers/modules/drac/test_raid.py @@ -720,6 +720,74 @@ class DracCreateRaidConfigurationHelpersTestCase(test_utils.BaseDracTest): self.assertEqual(expected_physical_disk_ids, logical_disks[0]['physical_disks']) + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + def test__validate_volume_size_requested_more_than_actual_size( + self, mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + self.logical_disk = { + 'physical_disks': [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + 'raid_level': '1+0', 'is_root_volume': True, + 'size_mb': 102400000, + 'controller': 'RAID.Integrated.1-1'} + + self.logical_disks = [self.logical_disk.copy()] + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + processed_logical_disks = drac_raid._validate_volume_size( + self.node, self.node.target_raid_config['logical_disks']) + + self.assertEqual(2287104, processed_logical_disks[0]['size_mb']) + + @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, + autospec=True) + @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) + def test__validate_volume_size_requested_less_than_actual_size( + self, mock_list_physical_disks, mock_get_drac_client): + mock_client = mock.Mock() + mock_get_drac_client.return_value = mock_client + self.logical_disk = { + 'physical_disks': [ + 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + 'raid_level': '1+0', 'is_root_volume': True, + 'size_mb': 204800, + 'controller': 'RAID.Integrated.1-1'} + + self.logical_disks = [self.logical_disk.copy()] + self.target_raid_configuration = {'logical_disks': self.logical_disks} + self.node.target_raid_config = self.target_raid_configuration + self.node.save() + + physical_disks = self._generate_physical_disks() + mock_list_physical_disks.return_value = physical_disks + + processed_logical_disks = drac_raid._validate_volume_size( + self.node, self.node.target_raid_config['logical_disks']) + + self.assertEqual(self.logical_disk, processed_logical_disks[0]) + class DracRaidInterfaceTestCase(test_utils.BaseDracTest): @@ -853,6 +921,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): self.assertEqual(1, mock_change_physical_disk_state.call_count) self.node.refresh() + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) self.assertEqual(next_substep, task.node.driver_internal_info[ 'raid_config_substep']) @@ -931,6 +1002,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): self.assertEqual(1, mock_client.create_virtual_disk.call_count) self.node.refresh() + self.assertEqual(False, + task.node.driver_internal_info[ + 'volume_validation']) self.assertEqual(next_substep, task.node.driver_internal_info[ 'raid_config_substep']) @@ -955,6 +1029,10 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): mock_list_physical_disks.return_value = physical_disks mock_change_physical_disk_state.return_value = { 'is_reboot_required': constants.RebootRequired.optional, + 'conversion_results': { + 'RAID.Integrated.1-1': { + 'is_reboot_required': constants.RebootRequired.false, + 'is_commit_required': False}}, 'commit_required_ids': ['RAID.Integrated.1-1']} mock_commit_config.return_value = '42' @@ -964,6 +1042,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=False, create_nonroot_volumes=False, delete_existing=False) + self.assertEqual(False, + task.node.driver_internal_info[ + 'volume_validation']) self.assertEqual(0, mock_client.create_virtual_disk.call_count) self.assertEqual(0, mock_commit_config.call_count) @@ -1029,6 +1110,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=True, create_nonroot_volumes=False, delete_existing=True) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) mock_commit_config.assert_called_with( task.node, raid_controller='RAID.Integrated.1-1', realtime=True, reboot=False) @@ -1084,6 +1168,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, @@ -1140,6 +1227,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, @@ -1189,6 +1279,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) self.node.refresh() self.assertEqual(['42'], self.node.driver_internal_info['raid_config_job_ids']) @@ -1236,6 +1329,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', @@ -1286,6 +1382,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', @@ -1343,6 +1442,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, @@ -1423,6 +1525,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, @@ -1525,6 +1630,9 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest): task, create_root_volume=True, create_nonroot_volumes=True, delete_existing=False) + self.assertEqual(True, + task.node.driver_internal_info[ + 'volume_validation']) # Commits to the controller mock_commit_config.assert_called_with( mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, diff --git a/releasenotes/notes/fix-drives-conversion-before-raid-creation-ea1f7eb425f79f2f.yaml b/releasenotes/notes/fix-drives-conversion-before-raid-creation-ea1f7eb425f79f2f.yaml new file mode 100644 index 0000000000..030177a39d --- /dev/null +++ b/releasenotes/notes/fix-drives-conversion-before-raid-creation-ea1f7eb425f79f2f.yaml @@ -0,0 +1,22 @@ +fixes: + - | + Certain RAID controllers (PERC H730P) require physical disks + to be switched from non-RAID (JBOD) mode to RAID mode to be + included in a virtual disk. When this conversion happens, + the available free space on the physical disk is reduced due + to some space being allocated to RAID mode housekeeping. + If the user requests a virtual disk (a RAID 1 for example) + with a size close to the max size of the physical disks when + they are in JBOD mode, then creation of the virtual disk + following conversion of the physical disks from JBOD to RAID + mode will fail since there is not enough space due to the + space used by RAID mode housekeeping. + This patch works around this issue by recalculating the RAID + volume size after physical disk conversion has completed and + the free space on the converted drives is known. Note that + this may result in a virtual disk that is slightly smaller + than the requested size, but still the max size that the + drives can support. + See bug + `bug 2007359 `_ + for more details