diff --git a/proliantutils/ilo/client.py b/proliantutils/ilo/client.py index 6adaf87f..55cd27f2 100644 --- a/proliantutils/ilo/client.py +++ b/proliantutils/ilo/client.py @@ -75,6 +75,8 @@ SUPPORTED_RIS_METHODS = [ SUPPORTED_REDFISH_METHODS = [ 'create_raid_configuration', 'delete_raid_configuration', + 'do_disk_erase', + 'has_disk_erase_completed', 'get_product_name', 'get_host_post_state', 'get_host_power_status', @@ -814,3 +816,23 @@ class IloClient(operations.IloOperations): not supported on the server. """ return self._call_method('get_bios_settings_result') + + def has_disk_erase_completed(self): + """Get out-of-band sanitize disk erase status. + + :returns: True if disk erase completed on all controllers + otherwise False + :raises: IloError, on an error from iLO. + """ + return self._call_method('has_disk_erase_completed') + + def do_disk_erase(self, disk_type, pattern=None): + """Perform the out-of-band sanitize disk erase on the hardware. + + :param disk_type: Media type of disk drives. + :param pattern: Erase pattern, if nothing passed default + ('overwrite' for 'HDD', and 'block' for 'SSD') will + be used. + :raises: IloError, on an error from iLO. + """ + return self._call_method('do_disk_erase', disk_type, pattern) diff --git a/proliantutils/ilo/operations.py b/proliantutils/ilo/operations.py index fdbe4583..dbffb5f9 100644 --- a/proliantutils/ilo/operations.py +++ b/proliantutils/ilo/operations.py @@ -497,3 +497,27 @@ class IloOperations(object): not supported on the server. """ raise exception.IloCommandNotSupportedError(ERRMSG) + + def has_disk_erase_completed(self): + """Get out of band sanitize disk erase status. + + :returns: True if disk erase completed on all controllers + otherwise False + :raises: IloError, on an error from iLO. + :raises: IloCommandNotSupportedError, if the command is + not supported on the server. + """ + raise exception.IloCommandNotSupportedError(ERRMSG) + + def do_disk_erase(self, disk_type, pattern=None): + """Perform the out of band sanitize disk erase on the hardware. + + :param disk_type: Media type of disk drives. + :param pattern: Erase pattern, if nothing passed default + ('overwrite' for 'HDD', and 'block' for 'SSD') will + be used. + :raises: IloError, on an error from iLO. + :raises: IloCommandNotSupportedError, if the command is + not supported on the server. + """ + raise exception.IloCommandNotSupportedError(ERRMSG) diff --git a/proliantutils/redfish/redfish.py b/proliantutils/redfish/redfish.py index 8a7346b0..fc0c6b04 100644 --- a/proliantutils/redfish/redfish.py +++ b/proliantutils/redfish/redfish.py @@ -1074,6 +1074,27 @@ class RedfishOperations(operations.IloOperations): sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) sushy_system.delete_raid() + def do_disk_erase(self, disk_type, pattern=None): + """Perform the out-of-band sanitize disk erase on the hardware. + + :param disk_type: Media type of disk drives either 'HDD' or 'SSD'. + :param pattern: Erase pattern, if nothing passed default + ('overwrite' for 'HDD', and 'block' for 'SSD') will + be used. + :raises: IloError, on an error from iLO. + """ + sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) + sushy_system.do_disk_erase(disk_type, pattern) + + def has_disk_erase_completed(self): + """Get out-of-band sanitize disk erase status. + + :returns: True if disk erase completed on all controllers + otherwise False + """ + sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) + return sushy_system.has_disk_erase_completed() + def get_current_bios_settings(self, only_allowed_settings=True): """Get current BIOS settings. diff --git a/proliantutils/redfish/resources/system/smart_storage_config.py b/proliantutils/redfish/resources/system/smart_storage_config.py index d0720f56..0350a715 100644 --- a/proliantutils/redfish/resources/system/smart_storage_config.py +++ b/proliantutils/redfish/resources/system/smart_storage_config.py @@ -18,12 +18,18 @@ from sushy.resources import base from proliantutils.hpssa import constants from proliantutils.hpssa import manager +from proliantutils.redfish.resources.system.storage import \ + constants as storage_const +from proliantutils.redfish.resources.system.storage import \ + mappings as storage_map LOG = log.get_logger(__name__) class LogicalDriveListField(base.ListField): + data_drives = base.Field("DataDrives", adapter=list, default=[]) + volume_unique_identifier = base.Field('VolumeUniqueIdentifier', required=True) @@ -178,3 +184,35 @@ class HPESmartStorageConfig(base.ResourceBase): "LogicalDrives": redfish_logical_disk } self._conn.put(self.settings_uri, data=data) + + def disk_erase(self, disks, disk_type, pattern): + """Performs out of band sanitize disk erase on the hardware. + + :param disks: List of location of disk drives. + :param disk_type: Media type of disk drives. + :param pattern: Erase pattern, if nothing passed default + ('overwrite' for 'HDD', and 'block' for 'SSD') will + be used. + """ + hdd = storage_map.MEDIA_TYPE_MAP_REV[storage_const.MEDIA_TYPE_HDD] + if not pattern: + erase_pattern = storage_const.ERASE_PATTERN_OVERWRITE if ( + disk_type == hdd) else storage_const.ERASE_PATTERN_BLOCK + else: + erase_pattern = storage_map.DISK_ERASE_PATTERN[pattern] + + data = {"Actions": [{"Action": "PhysicalDriveErase", + "ErasePattern": erase_pattern, + "PhysicalDriveList": disks}], + "DataGuard": "Disabled"} + self._conn.patch(self.settings_uri, data=data) + + def get_drives_has_raid(self): + """Return the list of drives have raid + + :return: List of disk drives + """ + drives = [] + for ld in self.logical_drives: + drives.extend(ld.data_drives) + return drives diff --git a/proliantutils/redfish/resources/system/storage/array_controller.py b/proliantutils/redfish/resources/system/storage/array_controller.py index af58b3cd..60f62b5d 100644 --- a/proliantutils/redfish/resources/system/storage/array_controller.py +++ b/proliantutils/redfish/resources/system/storage/array_controller.py @@ -156,3 +156,13 @@ class HPEArrayControllerCollection(base.ResourceCollectionBase): for member in self.get_members(): if member.model == model: return member + + def get_all_controllers_model(self): + """Returns list of model of all array controllers + + :returns List of model of array controllers + """ + models = [] + for member in self.get_members(): + models.append(member.model) + return models diff --git a/proliantutils/redfish/resources/system/storage/constants.py b/proliantutils/redfish/resources/system/storage/constants.py index cc20af68..f25b8b6a 100644 --- a/proliantutils/redfish/resources/system/storage/constants.py +++ b/proliantutils/redfish/resources/system/storage/constants.py @@ -51,3 +51,9 @@ RAID_6 = 'raid 6' RAID_6_0 = 'raid 6+0' RAID_1ADM = 'raid 1adm' RAID_10ADM = 'raid_10adm' + +# Erase patterns +ERASE_PATTERN_BLOCK = 'SanitizeRestrictedBlockErase' +ERASE_PATTERN_OVERWRITE = 'SanitizeRestrictedOverwrite' +ERASE_PATTERN_CRYPTO = 'SanitizeRestrictedCryptoScramble' +ERASE_PATTERN_ZERO = 'OnePass' diff --git a/proliantutils/redfish/resources/system/storage/mappings.py b/proliantutils/redfish/resources/system/storage/mappings.py index 3c8b7cb8..281cb6d7 100644 --- a/proliantutils/redfish/resources/system/storage/mappings.py +++ b/proliantutils/redfish/resources/system/storage/mappings.py @@ -46,6 +46,9 @@ MEDIA_TYPE_MAP = { 'HDD': constants.MEDIA_TYPE_HDD } +MEDIA_TYPE_MAP_REV = ( + utils.revert_dictionary(MEDIA_TYPE_MAP)) + RAID_LEVEL_MAP = { '0': constants.RAID_0, '1': constants.RAID_1, @@ -60,3 +63,10 @@ RAID_LEVEL_MAP = { RAID_LEVEL_MAP_REV = ( utils.revert_dictionary(RAID_LEVEL_MAP)) + +DISK_ERASE_PATTERN = { + 'block': constants.ERASE_PATTERN_BLOCK, + 'overwrite': constants.ERASE_PATTERN_OVERWRITE, + 'crypto': constants.ERASE_PATTERN_CRYPTO, + 'zero': constants.ERASE_PATTERN_ZERO +} diff --git a/proliantutils/redfish/resources/system/storage/physical_drive.py b/proliantutils/redfish/resources/system/storage/physical_drive.py index fab7277e..43108012 100644 --- a/proliantutils/redfish/resources/system/storage/physical_drive.py +++ b/proliantutils/redfish/resources/system/storage/physical_drive.py @@ -30,8 +30,12 @@ class HPEPhysicalDrive(base.ResourceBase): description = base.Field('Description') + disk_erase_reason = base.Field('DiskDriveStatusReasons', adapter=list) + capacity_mib = base.Field('CapacityMiB', adapter=int) + location = base.Field('Location') + media_type = base.MappedField('MediaType', mappings.MEDIA_TYPE_MAP) rotational_speed_rpm = base.Field('RotationalSpeedRpm', adapter=int) @@ -63,6 +67,14 @@ class HPEPhysicalDriveCollection(base.ResourceCollectionBase): return True return False + @property + def has_disk_erase_completed(self): + """Return true if disk erase completed in all drives""" + for member in self.get_members(): + if 'EraseCompleted' not in member.disk_erase_reason: + return False + return True + @property @sushy_utils.cache_it def has_rotational(self): @@ -81,3 +93,25 @@ class HPEPhysicalDriveCollection(base.ResourceCollectionBase): if member.rotational_speed_rpm is not None: drv_rot_speed_rpm.add(member.rotational_speed_rpm) return drv_rot_speed_rpm + + def get_all_hdd_drives_locations(self): + """Returns a list of location of all HDD drives + + :returns: List of HDD drives + """ + hdds = [] + for member in self.get_members(): + if member.media_type == constants.MEDIA_TYPE_HDD: + hdds.append(member.location) + return hdds + + def get_all_ssd_drives_locations(self): + """Returns a list of location of all SSD drives + + :returns: List of SSD drives + """ + ssds = [] + for member in self.get_members(): + if member.media_type == constants.MEDIA_TYPE_SSD: + ssds.append(member.location) + return ssds diff --git a/proliantutils/redfish/resources/system/system.py b/proliantutils/redfish/resources/system/system.py index ba86da22..9d067643 100644 --- a/proliantutils/redfish/resources/system/system.py +++ b/proliantutils/redfish/resources/system/system.py @@ -14,6 +14,7 @@ __author__ = 'HPE' +import re import sushy from sushy.resources import base from sushy.resources.system import system @@ -29,6 +30,10 @@ from proliantutils.redfish.resources.system import memory from proliantutils.redfish.resources.system import pci_device from proliantutils.redfish.resources.system import secure_boot from proliantutils.redfish.resources.system import smart_storage_config +from proliantutils.redfish.resources.system.storage import \ + constants as storage_const +from proliantutils.redfish.resources.system.storage import \ + mappings as storage_map from proliantutils.redfish.resources.system.storage import simple_storage from proliantutils.redfish.resources.system.storage import \ smart_storage as hpe_smart_storage @@ -326,6 +331,107 @@ class HPESystem(system.System): 'to delete.') raise exception.IloLogicalDriveNotFoundError(msg) + def _get_drives_has_raid(self): + drives = [] + ssc_ids = self.smart_storage_config_identities + for ssc_id in ssc_ids: + ssc_obj = self.get_smart_storage_config(ssc_id) + drives.extend(ssc_obj.get_drives_has_raid()) + return drives + + def do_disk_erase(self, disk_type, pattern): + """Performs out-of-band sanitize disk erase on the hardware. + + :param disk_type: Media type of disk drives either 'HDD' or 'SSD'. + :param pattern: Erase pattern, if nothing passed default + ('overwrite' for 'HDD', and 'block' for 'SSD') will + be used. + :raises: IloError, on an error from iLO. + """ + try: + current_controller = None + controllers = ( + self.smart_storage.array_controllers. + get_all_controllers_model()) + for controller in controllers: + current_controller = controller + # Will filter out S controller by controller model ex. + # 'HPE Smart Array S100i SR Gen10'. + if re.search("^HPE Smart Array S[0-9]{3}", controller) is None: + controller_obj = ( + self.smart_storage.array_controllers. + array_controller_by_model(controller)) + ssc_obj = ( + self._get_smart_storage_config_by_controller_model( + controller)) + if disk_type == (storage_map.MEDIA_TYPE_MAP_REV[ + storage_const.MEDIA_TYPE_HDD]): + disks = ( + controller_obj. + physical_drives.get_all_hdd_drives_locations()) + else: + disks = ( + controller_obj. + physical_drives.get_all_ssd_drives_locations()) + + assigned_disks = self._get_drives_has_raid() + + unassigned_disks = [] + + not_erasable_disks = [] + + for disk in disks: + if disk in assigned_disks: + not_erasable_disks.append(disk) + else: + unassigned_disks.append(disk) + + if unassigned_disks: + ssc_obj.disk_erase(unassigned_disks, disk_type, + pattern) + + if not_erasable_disks: + LOG.info("This disks have raid in it: %(disks)s, " + "skipping disks since can't erase disks " + "with raid." + % {'disks': not_erasable_disks}) + else: + LOG.warn("Smart array controller: %(controller)s, doesn't " + "support sanitize disk erase. All the disks of " + "the controller are ignored." + % {'controller': current_controller}) + except sushy.exceptions.SushyError as e: + msg = ("The Redfish controller failed to perform the sanitize " + "disk erase on smart storage controller: %(controller)s, " + "on disk_type: %(disk_type)s with error: %(error)s" + % {'controller': current_controller, 'disk_type': disk_type, + 'error': str(e)}) + raise exception.IloError(msg) + + def has_disk_erase_completed(self): + """Get out-of-band sanitize disk erase status. + + :returns: True if disk erase completed on all controllers + otherwise False + :raises: IloError, on an error from iLO. + """ + try: + controllers = (self.smart_storage.array_controllers. + get_all_controllers_model()) + for controller in controllers: + controller_obj = (self.smart_storage.array_controllers. + array_controller_by_model(controller)) + if controller_obj.physical_drives.has_disk_erase_completed: + continue + else: + return False + return True + except sushy.exceptions.SushyError as e: + msg = ('The Redfish controller failed to get the status of ' + 'sanitize disk erase. Error: %(error)s' + % {'error': str(e)}) + raise exception.IloError(msg) + def _parse_raid_config_data(self, raid_config): """It will parse raid config data based on raid controllers diff --git a/proliantutils/tests/ilo/test_client.py b/proliantutils/tests/ilo/test_client.py index ab5de7d2..ba0a1511 100644 --- a/proliantutils/tests/ilo/test_client.py +++ b/proliantutils/tests/ilo/test_client.py @@ -829,6 +829,55 @@ class IloClientTestCase(testtools.TestCase): 'on ProLiant DL380 G8', self.client.delete_raid_configuration) + @mock.patch.object(client.IloClient, '_call_method') + def test_do_disk_erase(self, call_mock): + self.client.do_disk_erase('SSD', None) + call_mock.assert_called_once_with( + 'do_disk_erase', 'SSD', None) + + @mock.patch.object(ris.RISOperations, 'get_product_name') + def test_do_disk_erase_gen9(self, get_product_mock): + self.client.model = 'Gen9' + get_product_mock.return_value = 'ProLiant BL460c Gen9' + self.assertRaisesRegexp(exception.IloCommandNotSupportedError, + 'The specified operation is not supported ' + 'on current platform.', + self.client.do_disk_erase, + 'SSD', None) + + @mock.patch.object(ribcl.RIBCLOperations, 'get_product_name') + def test_do_disk_erase_gen8(self, get_product_mock): + self.client.model = 'Gen8' + get_product_mock.return_value = 'ProLiant DL380 G8' + self.assertRaisesRegexp(exception.IloCommandNotSupportedError, + 'The specified operation is not supported ' + 'on current platform.', + self.client.do_disk_erase, + 'SSD', None) + + @mock.patch.object(client.IloClient, '_call_method') + def test_has_disk_erase_completed(self, call_mock): + self.client.has_disk_erase_completed() + call_mock.assert_called_once_with('has_disk_erase_completed') + + @mock.patch.object(ris.RISOperations, 'get_product_name') + def test_has_disk_erase_completed_gen9(self, get_product_mock): + self.client.model = 'Gen9' + get_product_mock.return_value = 'ProLiant BL460c Gen9' + self.assertRaisesRegexp(exception.IloCommandNotSupportedError, + 'The specified operation is not supported ' + 'on current platform.', + self.client.has_disk_erase_completed) + + @mock.patch.object(ribcl.RIBCLOperations, 'get_product_name') + def test_has_disk_erase_completed_gen8(self, get_product_mock): + self.client.model = 'Gen8' + get_product_mock.return_value = 'ProLiant DL380 G8' + self.assertRaisesRegexp(exception.IloCommandNotSupportedError, + 'The specified operation is not supported ' + 'on current platform.', + self.client.has_disk_erase_completed) + @mock.patch.object(client.IloClient, '_call_method') def test_create_raid_configuration(self, call_mock): ld1 = {"size_gb": 150, "raid_level": '0', "is_root_volume": True} diff --git a/proliantutils/tests/redfish/json_samples/disk_drive.json b/proliantutils/tests/redfish/json_samples/disk_drive.json index 73403a7b..a78aac20 100644 --- a/proliantutils/tests/redfish/json_samples/disk_drive.json +++ b/proliantutils/tests/redfish/json_samples/disk_drive.json @@ -1,4 +1,66 @@ { + "disk-erase-progress": { + "@odata.context": "/redfish/v1/$metadata#HpeSmartStorageDiskDrive.HpeSmartStorageDiskDrive", + "@odata.etag": "W/\"E852783E\"", + "@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers/0/DiskDrives/1/", + "@odata.type": "#HpeSmartStorageDiskDrive.v2_0_0.HpeSmartStorageDiskDrive", + "Id": "1", + "BlockSizeBytes": 512, + "CapacityGB": 1200, + "CapacityLogicalBlocks": 2344225968, + "CapacityMiB": 1144641, + "CarrierApplicationVersion": "11", + "CarrierAuthenticationStatus": "OK", + "CurrentTemperatureCelsius": 0, + "Description": "HPE Smart Storage Disk Drive View", + "DiskDriveStatusReasons": [ + "EraseInProgress" + ], + "DiskDriveUse": "Raw", + "EncryptedDrive": false, + "FirmwareVersion": { + "Current": { + "VersionString": "HPD6" + } + }, + "MediaType": "HDD", + "InterfaceSpeedMbps": 12000, + "InterfaceType": "SAS", + "LegacyBootPriority": "Primary", + "Location": "1I:2:3", + "LocationFormat": "ControllerPort:Box:Bay" + }, + "disk-erase-completed": { + "@odata.context": "/redfish/v1/$metadata#HpeSmartStorageDiskDrive.HpeSmartStorageDiskDrive", + "@odata.etag": "W/\"E852783E\"", + "@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers/0/DiskDrives/1/", + "@odata.type": "#HpeSmartStorageDiskDrive.v2_0_0.HpeSmartStorageDiskDrive", + "BlockSizeBytes": 512, + "CapacityGB": 1200, + "CapacityLogicalBlocks": 2344225968, + "CapacityMiB": 1144641, + "CarrierApplicationVersion": "11", + "CarrierAuthenticationStatus": "OK", + "CurrentTemperatureCelsius": 0, + "Description": "HPE Smart Storage Disk Drive View", + "DiskDriveStatusReasons": [ + "EraseCompleted" + ], + "DiskDriveUse": "Raw", + "EncryptedDrive": false, + "FirmwareVersion": { + "Current": { + "VersionString": "HPD6" + } + }, + "Id": "1", + "MediaType": "HDD", + "InterfaceSpeedMbps": 12000, + "InterfaceType": "SAS", + "LegacyBootPriority": "Primary", + "Location": "1I:2:3", + "LocationFormat": "ControllerPort:Box:Bay" + }, "drive1": { "@odata.context": "/redfish/v1/$metadata#Systems/Members/1/SmartStorage/ArrayControllers/Members/0/DiskDrives/Members/$entity", "@odata.etag": "W/\"559F84DD\"", diff --git a/proliantutils/tests/redfish/resources/system/storage/test_array_controller.py b/proliantutils/tests/redfish/resources/system/storage/test_array_controller.py index fed76586..6be0a56a 100644 --- a/proliantutils/tests/redfish/resources/system/storage/test_array_controller.py +++ b/proliantutils/tests/redfish/resources/system/storage/test_array_controller.py @@ -276,3 +276,13 @@ class HPEArrayControllerCollectionTestCase(testtools.TestCase): model = 'HPE Smart Array P408i-a SR Gen10' result_model = self.sys_stor_col.array_controller_by_model(model).model self.assertEqual(result_model, model) + + def test_get_all_controllers_model(self): + self.conn.get.return_value.json.reset_mock() + with open('proliantutils/tests/redfish/' + 'json_samples/array_controller.json', 'r') as f: + ac_json = json.loads(f.read()) + self.conn.get.return_value.json.return_value = ac_json + model = ['HPE Smart Array P408i-a SR Gen10'] + result_model = self.sys_stor_col.get_all_controllers_model() + self.assertEqual(result_model, model) diff --git a/proliantutils/tests/redfish/resources/system/storage/test_physical_drive.py b/proliantutils/tests/redfish/resources/system/storage/test_physical_drive.py index 3a38a532..d71120c3 100644 --- a/proliantutils/tests/redfish/resources/system/storage/test_physical_drive.py +++ b/proliantutils/tests/redfish/resources/system/storage/test_physical_drive.py @@ -116,6 +116,48 @@ class HPEPhysicalDriveCollectionTestCase(testtools.TestCase): actual = self.sys_stor_col.has_ssd self.assertTrue(actual) + def test_get_all_hdd_drives_locations(self): + self.conn.get.return_value.json.reset_mock() + path = ('proliantutils/tests/redfish/json_samples/' + 'disk_drive.json') + with open(path, 'r') as f: + dr_json = json.loads(f.read()) + val = [dr_json['drive1'], dr_json['drive2']] + self.conn.get.return_value.json.side_effect = val + self.assertEqual(self.sys_stor_col.get_all_hdd_drives_locations(), + ['1I:0:1']) + + def test_get_all_ssd_drives_locations(self): + self.conn.get.return_value.json.reset_mock() + path = ('proliantutils/tests/redfish/json_samples/' + 'disk_drive.json') + with open(path, 'r') as f: + dr_json = json.loads(f.read()) + val = [dr_json['drive1'], dr_json['drive2']] + self.conn.get.return_value.json.side_effect = val + self.assertEqual(self.sys_stor_col.get_all_ssd_drives_locations(), + ['1I:0:1']) + + def test_has_disk_erase_completed_true(self): + self.conn.get.return_value.json.reset_mock() + path = ('proliantutils/tests/redfish/json_samples/' + 'disk_drive.json') + with open(path, 'r') as f: + dr_json = json.loads(f.read()) + self.conn.get.return_value.json.return_value = dr_json[ + 'disk-erase-completed'] + self.assertTrue(self.sys_stor_col.has_disk_erase_completed) + + def test_has_disk_erase_completed_false(self): + self.conn.get.return_value.json.reset_mock() + path = ('proliantutils/tests/redfish/json_samples/' + 'disk_drive.json') + with open(path, 'r') as f: + dr_json = json.loads(f.read()) + self.conn.get.return_value.json.return_value = dr_json[ + 'disk-erase-progress'] + self.assertFalse(self.sys_stor_col.has_disk_erase_completed) + def test_has_rotational(self): self.conn.get.return_value.json.reset_mock() path = ('proliantutils/tests/redfish/json_samples/' diff --git a/proliantutils/tests/redfish/resources/system/test_smart_storage_config.py b/proliantutils/tests/redfish/resources/system/test_smart_storage_config.py index 79917a28..64aa58b3 100644 --- a/proliantutils/tests/redfish/resources/system/test_smart_storage_config.py +++ b/proliantutils/tests/redfish/resources/system/test_smart_storage_config.py @@ -21,6 +21,8 @@ import testtools from proliantutils import exception from proliantutils.hpssa import manager from proliantutils.redfish.resources.system import smart_storage_config +from proliantutils.redfish.resources.system.storage import \ + mappings as storage_map class HPESmartStorageConfigTestCase(testtools.TestCase): @@ -47,6 +49,9 @@ class HPESmartStorageConfigTestCase(testtools.TestCase): self.assertEqual( '600508B1001C045A9BAAC9F4F49498AE', self.ssc_inst.logical_drives[0].volume_unique_identifier) + self.assertEqual( + ["2I:1:2", "2I:1:1"], + self.ssc_inst.logical_drives[0].data_drives) self.assertEqual("/redfish/v1/systems/1/smartstorageconfig/settings/", self.ssc_inst.settings_uri) @@ -198,3 +203,39 @@ class HPESmartStorageConfigTestCase(testtools.TestCase): self.ssc_inst.read_raid() self.assertTrue(message_mock.called) self.assertFalse(format_mock.called) + + def test_disk_erase_hdd(self): + settings_uri = "/redfish/v1/systems/1/smartstorageconfig/settings/" + self.ssc_inst.disk_erase(['1I:0:1'], 'HDD', None) + data = {"Actions": [{"Action": "PhysicalDriveErase", + "ErasePattern": "SanitizeRestrictedOverwrite", + "PhysicalDriveList": ['1I:0:1']}], + "DataGuard": "Disabled"} + self.ssc_inst._conn.patch.assert_called_once_with(settings_uri, + data=data) + + def test_disk_erase_ssd(self): + settings_uri = "/redfish/v1/systems/1/smartstorageconfig/settings/" + self.ssc_inst.disk_erase(['1I:0:1', '1I:0:2'], 'SSD', None) + data = {"Actions": [{"Action": "PhysicalDriveErase", + "ErasePattern": "SanitizeRestrictedBlockErase", + "PhysicalDriveList": ['1I:0:1', '1I:0:2']}], + "DataGuard": "Disabled"} + self.ssc_inst._conn.patch.assert_called_once_with(settings_uri, + data=data) + + def test_disk_erase_ssd_user_pattern_zero(self): + settings_uri = "/redfish/v1/systems/1/smartstorageconfig/settings/" + self.ssc_inst.disk_erase(['1I:0:1', '1I:0:2'], 'SSD', 'zero') + data = { + "Actions": [{"Action": "PhysicalDriveErase", + "ErasePattern": storage_map.DISK_ERASE_PATTERN[ + 'zero'], + "PhysicalDriveList": ['1I:0:1', '1I:0:2']}], + "DataGuard": "Disabled"} + self.ssc_inst._conn.patch.assert_called_once_with(settings_uri, + data=data) + + def test_get_drives_has_raid(self): + result = self.ssc_inst.get_drives_has_raid() + self.assertEqual(result, ["2I:1:2", "2I:1:1"]) diff --git a/proliantutils/tests/redfish/resources/system/test_system.py b/proliantutils/tests/redfish/resources/system/test_system.py index 69bf5f1d..8ff0f41c 100644 --- a/proliantutils/tests/redfish/resources/system/test_system.py +++ b/proliantutils/tests/redfish/resources/system/test_system.py @@ -821,3 +821,243 @@ class HPESystemTestCase(testtools.TestCase): post_delete_read_raid_mock.return_value = result self.assertEqual( result, self.sys_inst.read_raid()) + + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'get_all_controllers_model') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'array_controller_by_model') + def test_has_disk_erase_completed_true( + self, array_controller_by_model_mock, + get_all_controllers_model_mock): + get_all_controllers_model_mock.return_value = [ + 'HPE Smart Array P408i-p SR Gen10'] + with open('proliantutils/tests/redfish/' + 'json_samples/smart_storage.json', 'r') as f: + ss_json = json.loads(f.read()) + with open('proliantutils/tests/redfish/' + 'json_samples/array_controller_collection.json', 'r') as f: + acc_json = json.loads(f.read()) + self.conn.get.return_value.json.reset_mock() + self.conn.get.return_value.json.side_effect = [ss_json, acc_json] + type(array_controller_by_model_mock. + return_value.physical_drives).has_disk_erase_completed = True + self.assertEqual(True, self.sys_inst.has_disk_erase_completed()) + + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'get_all_controllers_model') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'array_controller_by_model') + def test_has_disk_erase_completed_false( + self, array_controller_by_model_mock, + get_all_controllers_model_mock): + get_all_controllers_model_mock.return_value = [ + 'HPE Smart Array P408i-p SR Gen10'] + with open('proliantutils/tests/redfish/' + 'json_samples/smart_storage.json', 'r') as f: + ss_json = json.loads(f.read()) + with open('proliantutils/tests/redfish/' + 'json_samples/array_controller_collection.json', 'r') as f: + acc_json = json.loads(f.read()) + self.conn.get.return_value.json.reset_mock() + self.conn.get.return_value.json.side_effect = [ss_json, acc_json] + type(array_controller_by_model_mock. + return_value.physical_drives).has_disk_erase_completed = False + self.assertEqual(False, self.sys_inst.has_disk_erase_completed()) + + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'get_all_controllers_model') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'array_controller_by_model') + def test_has_disk_erase_completed_failed( + self, array_controller_by_model_mock, + get_all_controllers_model_mock): + get_all_controllers_model_mock.return_value = [ + 'HPE Smart Array P408i-p SR Gen10'] + with open('proliantutils/tests/redfish/' + 'json_samples/smart_storage.json', 'r') as f: + ss_json = json.loads(f.read()) + self.conn.get.return_value.json.reset_mock() + (self.conn.get.return_value. + json.side_effect) = [ss_json, sushy.exceptions.SushyError] + self.assertRaisesRegexp( + exception.IloError, + "The Redfish controller failed to get the status of sanitize disk " + "erase. Error:", + self.sys_inst.has_disk_erase_completed) + + @mock.patch.object(system.HPESystem, + '_get_drives_has_raid') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'get_all_controllers_model') + @mock.patch.object(system.HPESystem, + '_get_smart_storage_config_by_controller_model') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'array_controller_by_model') + def test_do_disk_erase_hdd( + self, array_controller_by_model_mock, + get_ssc_by_controller_model_mock, + get_all_controllers_model_mock, + drives_raid_mock): + get_all_controllers_model_mock.return_value = [ + 'HPE Smart Array P408i-p SR Gen10'] + drives_raid_mock.return_value = [] + with open('proliantutils/tests/redfish/' + 'json_samples/smart_storage.json', 'r') as f: + ss_json = json.loads(f.read()) + with open('proliantutils/tests/redfish/' + 'json_samples/array_controller_collection.json', 'r') as f: + acc_json = json.loads(f.read()) + self.conn.get.return_value.json.reset_mock() + self.conn.get.return_value.json.side_effect = [ss_json, acc_json] + (array_controller_by_model_mock.return_value.physical_drives. + get_all_hdd_drives_locations.return_value) = ['2I:1:1'] + self.sys_inst.do_disk_erase('HDD', None) + get_ssc_by_controller_model_mock.assert_called_once_with( + 'HPE Smart Array P408i-p SR Gen10') + (get_ssc_by_controller_model_mock.return_value. + disk_erase.assert_called_once_with(['2I:1:1'], 'HDD', None)) + + @mock.patch.object(system.HPESystem, + '_get_drives_has_raid') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'get_all_controllers_model') + @mock.patch.object(system.HPESystem, + '_get_smart_storage_config_by_controller_model') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'array_controller_by_model') + def test_do_disk_erase_ssd( + self, array_controller_by_model_mock, + get_ssc_by_controller_model_mock, + get_all_controllers_model_mock, + drives_raid_mock): + drives_raid_mock.return_value = [] + get_all_controllers_model_mock.return_value = [ + 'HPE Smart Array P408i-p SR Gen10'] + with open('proliantutils/tests/redfish/' + 'json_samples/smart_storage.json', 'r') as f: + ss_json = json.loads(f.read()) + with open('proliantutils/tests/redfish/' + 'json_samples/array_controller_collection.json', 'r') as f: + acc_json = json.loads(f.read()) + self.conn.get.return_value.json.reset_mock() + self.conn.get.return_value.json.side_effect = [ss_json, acc_json] + (array_controller_by_model_mock.return_value.physical_drives. + get_all_ssd_drives_locations.return_value) = ['2I:1:1'] + self.sys_inst.do_disk_erase('SSD', None) + get_ssc_by_controller_model_mock.assert_called_once_with( + 'HPE Smart Array P408i-p SR Gen10') + (get_ssc_by_controller_model_mock.return_value. + disk_erase.assert_called_once_with(['2I:1:1'], 'SSD', None)) + + @mock.patch.object(system.LOG, 'info', autospec=True) + @mock.patch.object(system.HPESystem, + '_get_drives_has_raid') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'get_all_controllers_model') + @mock.patch.object(system.HPESystem, + '_get_smart_storage_config_by_controller_model') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'array_controller_by_model') + def test_do_disk_erase_with_raid( + self, array_controller_by_model_mock, + get_ssc_by_controller_model_mock, + get_all_controllers_model_mock, + drives_raid_mock, + system_log_mock): + get_all_controllers_model_mock.return_value = [ + 'HPE Smart Array P408i-p SR Gen10'] + drives_raid_mock.return_value = ['2I:1:2'] + with open('proliantutils/tests/redfish/' + 'json_samples/smart_storage.json', 'r') as f: + ss_json = json.loads(f.read()) + with open('proliantutils/tests/redfish/' + 'json_samples/array_controller_collection.json', 'r') as f: + acc_json = json.loads(f.read()) + self.conn.get.return_value.json.reset_mock() + self.conn.get.return_value.json.side_effect = [ss_json, acc_json] + (array_controller_by_model_mock.return_value.physical_drives. + get_all_hdd_drives_locations.return_value) = ['2I:1:1', '2I:1:2'] + self.sys_inst.do_disk_erase('HDD', None) + get_ssc_by_controller_model_mock.assert_called_once_with( + 'HPE Smart Array P408i-p SR Gen10') + (get_ssc_by_controller_model_mock.return_value. + disk_erase.assert_called_once_with(['2I:1:1'], 'HDD', None)) + system_log_mock.assert_called_once_with( + "This disks have raid in it: ['2I:1:2'], skipping disks since " + "can't erase disks with raid.") + + @mock.patch.object(system.HPESystem, + '_get_drives_has_raid') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'get_all_controllers_model') + @mock.patch.object(system.HPESystem, + '_get_smart_storage_config_by_controller_model') + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'array_controller_by_model') + @mock.patch.object(system.LOG, 'warn', autospec=True) + def test_do_disk_erase_with_S_and_P_series_controller( + self, system_log_mock, array_controller_by_model_mock, + get_ssc_by_controller_model_mock, get_all_controllers_model_mock, + drives_raid_mock): + drives_raid_mock.return_value = [] + get_all_controllers_model_mock.return_value = [ + 'HPE Smart Array S100i SR Gen10', + 'HPE Smart Array P408i-p SR Gen10'] + with open('proliantutils/tests/redfish/' + 'json_samples/smart_storage.json', 'r') as f: + ss_json = json.loads(f.read()) + with open('proliantutils/tests/redfish/' + 'json_samples/array_controller_collection.json', 'r') as f: + acc_json = json.loads(f.read()) + self.conn.get.return_value.json.reset_mock() + self.conn.get.return_value.json.side_effect = [ss_json, acc_json] + (array_controller_by_model_mock.return_value.physical_drives. + get_all_ssd_drives_locations.return_value) = ['2I:1:1'] + self.sys_inst.do_disk_erase('SSD', None) + get_ssc_by_controller_model_mock.assert_called_once_with( + 'HPE Smart Array P408i-p SR Gen10') + (get_ssc_by_controller_model_mock.return_value. + disk_erase.assert_called_once_with(['2I:1:1'], 'SSD', None)) + system_log_mock.assert_called_once_with( + "Smart array controller: HPE Smart Array S100i SR Gen10, doesn't " + "support sanitize disk erase. All the disks of the controller are " + "ignored.") + + @mock.patch.object(array_controller.HPEArrayControllerCollection, + 'get_all_controllers_model') + def test_do_disk_erase_failed( + self, get_all_controllers_model_mock): + get_all_controllers_model_mock.return_value = [ + 'HPE Smart Array P408i-p SR Gen10'] + with open('proliantutils/tests/redfish/' + 'json_samples/smart_storage.json', 'r') as f: + ss_json = json.loads(f.read()) + with open('proliantutils/tests/redfish/' + 'json_samples/array_controller_collection.json', 'r') as f: + acc_json = json.loads(f.read()) + self.conn.get.return_value.json.reset_mock() + (self.conn.get.return_value. + json.side_effect) = [ss_json, acc_json, sushy.exceptions.SushyError] + self.assertRaisesRegexp( + exception.IloError, + "The Redfish controller failed to perform the sanitize disk erase " + "on smart storage controller: HPE Smart Array P408i-p SR Gen10, " + "on disk_type: SSD with error:", + self.sys_inst.do_disk_erase, 'SSD', None) + + @mock.patch.object(system.HPESystem, 'get_smart_storage_config') + def test__get_drives_has_raid( + self, get_smart_storage_config_mock): + config_id = ['/redfish/v1/systems/1/smartstorageconfig/'] + with open('proliantutils/tests/redfish/' + 'json_samples/smart_storage.json', 'r') as f: + ss_json = json.loads(f.read()) + self.conn.get.return_value.json.reset_mock() + self.conn.get.return_value.json.side_effect = ss_json + type(self.sys_inst).smart_storage_config_identities = ( + mock.PropertyMock(return_value=config_id)) + (get_smart_storage_config_mock. + return_value.get_drives_has_raid.return_value) = ["2I:1:2", "2I:1:1"] + result = self.sys_inst._get_drives_has_raid() + self.assertEqual(result, ["2I:1:2", "2I:1:1"]) + get_smart_storage_config_mock.assert_called_once_with(config_id[0]) diff --git a/proliantutils/tests/redfish/test_redfish.py b/proliantutils/tests/redfish/test_redfish.py index 9fbf77ad..483cd54d 100644 --- a/proliantutils/tests/redfish/test_redfish.py +++ b/proliantutils/tests/redfish/test_redfish.py @@ -1700,6 +1700,30 @@ class RedfishOperationsTestCase(testtools.TestCase): result = self.rf_client.get_host_post_state() self.assertEqual('PowerOff', result) + @mock.patch.object(redfish.RedfishOperations, '_get_sushy_system') + def test_do_disk_erase_hdd(self, get_system_mock): + self.rf_client.do_disk_erase('HDD') + get_system_mock.return_value.do_disk_erase.assert_called_once_with( + 'HDD', None) + + @mock.patch.object(redfish.RedfishOperations, '_get_sushy_system') + def test_do_disk_erase_ssd(self, get_system_mock): + self.rf_client.do_disk_erase('SSD') + get_system_mock.return_value.do_disk_erase.assert_called_once_with( + 'SSD', None) + + @mock.patch.object(redfish.RedfishOperations, '_get_sushy_system') + def test_do_disk_erase_ssd_pattern_zero(self, get_system_mock): + self.rf_client.do_disk_erase('SSD', 'zero') + get_system_mock.return_value.do_disk_erase.assert_called_once_with( + 'SSD', 'zero') + + @mock.patch.object(redfish.RedfishOperations, '_get_sushy_system') + def test_has_disk_erase_completed(self, get_system_mock): + (get_system_mock.return_value. + has_disk_erase_completed.return_value) = True + self.assertEqual(True, self.rf_client.has_disk_erase_completed()) + @mock.patch.object(redfish.RedfishOperations, '_get_sushy_system') def test_delete_raid_configuration(self, get_system_mock): self.rf_client.delete_raid_configuration()