diff --git a/dracclient/client.py b/dracclient/client.py index 9866f89..74decc3 100644 --- a/dracclient/client.py +++ b/dracclient/client.py @@ -717,8 +717,8 @@ class DRACClient(object): value indicating whether the server must be rebooted to complete disk conversion. """ - return self._raid_mgmt.convert_physical_disks( - physical_disks, raid_enable) + return self._raid_mgmt.convert_physical_disks(physical_disks, + raid_enable) def create_virtual_disk(self, raid_controller, physical_disks, raid_level, size_mb, disk_name=None, span_length=None, @@ -952,8 +952,8 @@ class DRACClient(object): """Find out if raid controller supports jbod :param raid_controller_fqdd: The raid controller's fqdd - being being checked to see if it is jbod - capable. + being checked to see if it is jbod + capable. :raises: DRACRequestFailed if unable to find any disks in the Ready or non-RAID states :raises: DRACOperationFailed on error reported back by the DRAC @@ -962,6 +962,61 @@ class DRACClient(object): """ return self._raid_mgmt.is_jbod_capable(raid_controller_fqdd) + def is_raid_controller(self, raid_controller_fqdd): + """Find out if object's fqdd is for a raid controller or not + + :param raid_controller_fqdd: The object's fqdd we are testing to see + if it is a raid controller or not. + :returns: boolean, True if the device is a RAID controller, + False if not. + """ + return self._raid_mgmt.is_raid_controller(raid_controller_fqdd) + + def is_boss_controller(self, raid_controller_fqdd): + """Find out if a RAID controller a BOSS card or not + + :param raid_controller_fqdd: The object's fqdd we are testing to see + if it is a BOSS card or not. + :returns: boolean, True if the device is a BOSS card, False if not. + """ + return self._raid_mgmt.is_boss_controller(raid_controller_fqdd) + + def change_physical_disk_state(self, mode, + controllers_to_physical_disk_ids=None): + """Convert disks RAID status and return a list of controller IDs + + Builds a list of controller ids that have had disks converted to the + specified RAID status by: + - Examining all the disks in the system and filtering out any that are + not attached to a RAID/BOSS controller. + - Inspect the controllers' disks to see if there are any that need to + be converted, if so convert them. If a disk is already in the desired + status the disk is ignored. Also check for failed or unknown disk + statuses and raise an exception where appropriate. + - Return a list of controller IDs for controllers whom have had any of + their disks converted, and whether a reboot is required. + + The caller typically should then create a config job for the list of + controllers returned to finalize the RAID configuration. + + :param mode: constants.RaidStatus enumeration used to determine what + raid status to check for. + :param controllers_to_physical_disk_ids: Dictionary of controllers and + corresponding disk ids we are inspecting and creating jobs for + when needed. + :returns: a dict containing the following key/values: + - is_reboot_required, a boolean stating whether a reboot is + required or not. + - commit_required_ids, a list of controller ids that will + need to commit their pending RAID changes via a config job. + :raises: DRACOperationFailed on error reported back by the DRAC and the + exception message does not contain NOT_SUPPORTED_MSG constant. + :raises: Exception on unknown error. + """ + return (self._raid_mgmt + .change_physical_disk_state(mode, + controllers_to_physical_disk_ids)) + class WSManClient(wsman.Client): """Wrapper for wsman.Client that can wait until iDRAC is ready @@ -1081,8 +1136,8 @@ class WSManClient(wsman.Client): message_elems] raise exceptions.DRACOperationFailed(drac_messages=messages) - if (expected_return_value is not None and - return_value != expected_return_value): + if (expected_return_value is not None + and return_value != expected_return_value): raise exceptions.DRACUnexpectedReturnValue( expected_return_value=expected_return_value, actual_return_value=return_value) diff --git a/dracclient/constants.py b/dracclient/constants.py index 85cfe8f..9356060 100644 --- a/dracclient/constants.py +++ b/dracclient/constants.py @@ -20,6 +20,8 @@ DEFAULT_IDRAC_IS_READY_RETRY_DELAY_SEC = 10 DEFAULT_WSMAN_SSL_ERROR_RETRIES = 3 DEFAULT_WSMAN_SSL_ERROR_RETRY_DELAY_SEC = 0 +NOT_SUPPORTED_MSG = " operation is not supported on th" + # power states POWER_ON = 'POWER_ON' POWER_OFF = 'POWER_OFF' @@ -71,3 +73,18 @@ class RebootJobType(object): return [cls.power_cycle, cls.graceful_reboot, cls.reboot_forced_shutdown] + + +class RaidStatus(object): + """Enumeration of different volume types.""" + + jbod = 'JBOD' + """Just a Bunch of Disks""" + + raid = 'RAID' + """Redundant Array of Independent Disks""" + + @classmethod + def all(cls): + return [cls.jbod, + cls.raid] diff --git a/dracclient/resources/raid.py b/dracclient/resources/raid.py index c4f1973..b5701af 100644 --- a/dracclient/resources/raid.py +++ b/dracclient/resources/raid.py @@ -151,8 +151,6 @@ class VirtualDisk(VirtualDiskTuple): class RAIDManagement(object): - NOT_SUPPORTED_MSG = " operation is not supported on th" - def __init__(self, client): """Creates RAIDManagement object @@ -348,10 +346,10 @@ class RAIDManagement(object): Disks can be enabled or disabled for RAID mode. :param physical_disks: list of FQDD ID strings of the physical disks - to update + to update :param raid_enable: boolean flag, set to True if the disk is to - become part of the RAID. The same flag is applied to all - listed disks + become part of the RAID. The same flag is applied + to all listed disks :returns: a dictionary containing: - The commit_required key with a boolean value indicating whether a config job must be created for the values to be @@ -556,24 +554,212 @@ class RAIDManagement(object): # Try moving a disk in the Ready state to JBOD mode try: - self.convert_physical_disks( - [ready_disk.id], - False) + self.convert_physical_disks([ready_disk.id], False) is_jbod_capable = True # Flip the disk back to the Ready state. This results in the # pending value being reset to nothing, so it effectively # undoes the last command and makes the check non-destructive - self.convert_physical_disks( - [ready_disk.id], - True) + self.convert_physical_disks([ready_disk.id], True) except exceptions.DRACOperationFailed as ex: # Fix for python 3, Exception.message no longer # a valid attribute, str(ex) works for both 2.7 # and 3.x - if self.NOT_SUPPORTED_MSG in str(ex): + if constants.NOT_SUPPORTED_MSG in str(ex): pass else: raise return is_jbod_capable + + def is_raid_controller(self, raid_controller_fqdd): + """Find out if object's fqdd is for a raid controller or not + + :param raid_controller_fqdd: The object's fqdd we are testing to see + if it is a raid controller or not. + :returns: boolean, True if the device is a RAID controller, + False if not. + """ + return raid_controller_fqdd.startswith('RAID.') + + def is_boss_controller(self, raid_controller_fqdd): + """Find out if a RAID controller a BOSS card or not + + :param raid_controller_fqdd: The object's fqdd we are testing to see + if it is a BOSS card or not. + :returns: boolean, True if the device is a BOSS card, False if not. + """ + return raid_controller_fqdd.startswith('AHCI.') + + def _check_disks_status(self, mode, physical_disks, + controllers_to_physical_disk_ids): + """Find disks that failed, need to be configured, or need no change. + + Inspect all the controllers drives and: + - See if there are any disks in a failed or unknown state and raise + a ValueException where appropriate. + - If a controller has disks that still need to be configured add + them to the controllers_to_physical_disk_ids dict for the + appropriate controller. + - If a disk is already in the appropriate state, do nothing, this + function should behave in an idempotent manner. + + :param mode: constants.RaidStatus enumeration used to + determine what raid status to check for. + :param physical_disks: all physical disks + :param controllers_to_physical_disk_ids: Dictionary of controllers + we are inspecting and creating jobs for when needed. If + needed modify this dict so that only drives that need to + be changed to RAID or JBOD are in the list of disk keys + for corresponding controller. + :raises: ValueError: Exception message will list failed drives and + drives whose state cannot be changed at this time, drive + state is not "ready" or "non-RAID". + """ + p_disk_id_to_status = {} + for physical_disk in physical_disks: + p_disk_id_to_status[physical_disk.id] = physical_disk.raid_status + failed_disks = [] + bad_disks = [] + + jbod = constants.RaidStatus.jbod + raid = constants.RaidStatus.raid + for controller, physical_disk_ids \ + in controllers_to_physical_disk_ids.items(): + final_physical_disk_ids = [] + for physical_disk_id in physical_disk_ids: + raid_status = p_disk_id_to_status[physical_disk_id] + LOG.debug("RAID status for disk id: %s is: %s", + physical_disk_id, raid_status) + if ((mode == jbod and raid_status == "non-RAID") or + (mode == raid and raid_status == "ready")): + # This means the disk is already in the desired state, + # so skip it + continue + elif ((mode == jbod and raid_status == "ready") or + (mode == raid and raid_status == "non-RAID")): + # This disk is moving from a state we expect to RAID or + # JBOD, so keep it + final_physical_disk_ids.append(physical_disk_id) + elif raid_status == "failed": + failed_disks.append(physical_disk_id) + else: + # This disk is in one of many states that we don't know + # what to do with, so pitch it + bad_disks.append("{} ({})".format(physical_disk_id, + raid_status)) + + controllers_to_physical_disk_ids[controller] = ( + final_physical_disk_ids) + + if failed_disks or bad_disks: + error_msg = "" + + if failed_disks: + error_msg += ("The following drives have failed: " + "{failed_disks}. Manually check the status" + " of all drives and replace as necessary, then" + " try again.").format( + failed_disks=" ".join(failed_disks)) + + if bad_disks: + if failed_disks: + error_msg += "\n" + error_msg += ("Unable to change the state of the following " + "drives because their status is not ready " + "or non-RAID: {}. Bring up the RAID " + "controller GUI on this node and change the " + "drives' status to ready or non-RAID.").format( + ", ".join(bad_disks)) + + raise ValueError(error_msg) + + def change_physical_disk_state(self, mode, + controllers_to_physical_disk_ids=None): + """Convert disks RAID status and return a list of controller IDs + + Builds a list of controller ids that have had disks converted to the + specified RAID status by: + - Examining all the disks in the system and filtering out any that are + not attached to a RAID/BOSS controller. + - Inspect the controllers' disks to see if there are any that need to + be converted, if so convert them. If a disk is already in the desired + status the disk is ignored. Also check for failed or unknown disk + statuses and raise an exception where appropriate. + - Return a list of controller IDs for controllers whom have had any of + their disks converted, and whether a reboot is required. + + The caller typically should then create a config job for the list of + controllers returned to finalize the RAID configuration. + + :param mode: constants.RaidStatus enumeration used to determine what + raid status to check for. + :param controllers_to_physical_disk_ids: Dictionary of controllers and + corresponding disk ids we are inspecting and creating jobs for + when needed. + :returns: a dict containing the following key/values: + - is_reboot_required, a boolean stating whether a reboot is + required or not. + - commit_required_ids, a list of controller ids that will + need to commit their pending RAID changes via a config job. + :raises: DRACOperationFailed on error reported back by the DRAC and the + exception message does not contain NOT_SUPPORTED_MSG constant. + :raises: Exception on unknown error. + """ + physical_disks = self.list_physical_disks() + + raid = constants.RaidStatus.raid + + if not controllers_to_physical_disk_ids: + controllers_to_physical_disk_ids = collections.defaultdict(list) + + for physical_d in physical_disks: + # Weed out disks that are not attached to a RAID controller + if (self.is_raid_controller(physical_d.controller) + or self.is_boss_controller(physical_d.controller)): + physical_disk_ids = controllers_to_physical_disk_ids[ + physical_d.controller] + + physical_disk_ids.append(physical_d.id) + + '''Modify controllers_to_physical_disk_ids dict by inspecting desired + status vs current status of each controller's disks. + Raise exception if there are any failed drives or + drives not in status 'ready' or 'non-RAID' + ''' + self._check_disks_status(mode, physical_disks, + controllers_to_physical_disk_ids) + + is_reboot_required = False + controllers = [] + for controller, physical_disk_ids \ + in controllers_to_physical_disk_ids.items(): + if physical_disk_ids: + LOG.debug("Converting the following disks to {} on RAID " + "controller {}: {}".format( + mode, controller, str(physical_disk_ids))) + try: + conversion_results = \ + self.convert_physical_disks(physical_disk_ids, + mode == raid) + except exceptions.DRACOperationFailed as ex: + if constants.NOT_SUPPORTED_MSG in str(ex): + LOG.debug("Controller {} does not support " + "JBOD mode".format(controller)) + pass + else: + raise + else: + if conversion_results: + reboot_true = constants.RebootRequired.true + reboot_optional = constants.RebootRequired.optional + _is_reboot_required = \ + conversion_results["is_reboot_required"] + is_reboot_required = is_reboot_required \ + or (_is_reboot_required + in [reboot_true, reboot_optional]) + if conversion_results["is_commit_required"]: + controllers.append(controller) + + return {'is_reboot_required': is_reboot_required, + 'commit_required_ids': controllers} diff --git a/dracclient/tests/test_raid.py b/dracclient/tests/test_raid.py index fac24c2..5fbf60c 100644 --- a/dracclient/tests/test_raid.py +++ b/dracclient/tests/test_raid.py @@ -11,6 +11,8 @@ # License for the specific language governing permissions and limitations # under the License. + +import collections import lxml.etree import mock import random @@ -35,6 +37,80 @@ class ClientRAIDManagementTestCase(base.BaseTest): self.drac_client = dracclient.client.DRACClient( **test_utils.FAKE_ENDPOINT) self.raid_controller_fqdd = "RAID.Integrated.1-1" + cntl_dict = {'RAID.Integrated.1-1': + ['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'], + 'AHCI.Integrated.1-1': + ['Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1', + 'Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1']} + self.controllers_to_physical_disk_ids = cntl_dict + self.disk_1 = raid.PhysicalDisk( + id='Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', + description='Disk 0 in Backplane 1 of Int RAID Controller 1', + controller='RAID.Integrated.1-1', + manufacturer='ATA', + model='ST91000640NS', + media_type='hdd', + interface_type='sata', + size_mb=953344, + free_size_mb=953344, + serial_number='9XG4SLGZ', + firmware_version='AA09', + status='ok', + raid_status='ready', + sas_address='500056B37789ABE3', + device_protocol=None) + + self.disk_2 = raid.PhysicalDisk( + id='Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', + description='Disk 1 in Backplane 1 of Int RAID Controller 1', + controller='RAID.Integrated.1-1', + manufacturer='ATA', + model='ST91000640NS', + media_type='hdd', + interface_type='sata', + size_mb=953344, + free_size_mb=953344, + serial_number='9XG4SLGZ', + firmware_version='AA09', + status='online', + raid_status='ready', + sas_address='500056B37789ABE3', + device_protocol=None) + + self.disk_3 = raid.PhysicalDisk( + id='Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1', + description='Disk 1 in Backplane 1 of Int BOSS Controller 1', + controller='AHCI.Integrated.1-1', + manufacturer='ATA', + model='ST91000640NS', + media_type='hdd', + interface_type='sata', + size_mb=953344, + free_size_mb=953344, + serial_number='9XG4SLGZ', + firmware_version='AA09', + status='online', + raid_status='ready', + sas_address='500056B37789ABE3', + device_protocol=None) + + self.disk_4 = raid.PhysicalDisk( + id='Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1', + description='Disk 1 in Backplane 1 of Int RAID Controller 1', + controller='AHCI.Integrated.1-1', + manufacturer='ATA', + model='ST91000640NS', + media_type='hdd', + interface_type='sata', + size_mb=953344, + free_size_mb=953344, + serial_number='9XG4SLGZ', + firmware_version='AA09', + status='online', + raid_status='ready', + sas_address='500056B37789ABE3', + device_protocol=None) @mock.patch.object(dracclient.client.WSManClient, 'wait_until_idrac_is_ready', spec_set=True, @@ -696,3 +772,384 @@ class ClientRAIDManagementTestCase(base.BaseTest): self.assertRaises( exceptions.DRACOperationFailed, self.drac_client.is_jbod_capable, self.raid_controller_fqdd) + + def test_is_raid_controller(self, mock_requests): + self.assertTrue(self.drac_client + .is_raid_controller("RAID.Integrated.1-1")) + self.assertFalse(self.drac_client + .is_raid_controller("notRAID.Integrated.1-1")) + + def test_is_boss_controller(self, mock_requests): + self.assertTrue(self.drac_client + .is_boss_controller("AHCI.Integrated.1-1")) + self.assertFalse(self.drac_client + .is_boss_controller("notAHCI.Integrated.1-1")) + + def test_check_disks_status_no_controllers(self, mock_requests): + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + raid_mgt = self.drac_client._raid_mgmt + + cont_to_phys_disk_ids = collections.defaultdict(list) + mode = constants.RaidStatus.jbod + + raid_mgt._check_disks_status(mode, physical_disks, + cont_to_phys_disk_ids) + jbod_len = len(cont_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(jbod_len, 0) + + # Switch mode to RAID and try again + cont_to_phys_disk_ids = collections.defaultdict(list) + mode = constants.RaidStatus.raid + raid_mgt._check_disks_status(mode, physical_disks, + cont_to_phys_disk_ids) + raid_len = len(cont_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(raid_len, 0) + + def test_check_disks_status_bad(self, mock_requests): + mode = constants.RaidStatus.raid + disk_2 = self.disk_2._replace(raid_status='FAKE_STATUS') + physical_disks = [self.disk_1, disk_2, self.disk_3, self.disk_4] + raid_mgt = self.drac_client._raid_mgmt + + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + mode = constants.RaidStatus.jbod + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + + def test_check_disks_status_fail(self, mock_requests): + mode = constants.RaidStatus.raid + disk_2_failed = self.disk_2._replace(raid_status='failed') + physical_disks = [self.disk_1, disk_2_failed, self.disk_3, self.disk_4] + raid_mgt = self.drac_client._raid_mgmt + + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + mode = constants.RaidStatus.jbod + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + + def test_check_disks_status_no_change(self, mock_requests): + raid_mgt = self.drac_client._raid_mgmt + mode = constants.RaidStatus.raid + physical_disks = [self.disk_1, self.disk_2, + self.disk_3, self.disk_4] + + raid_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. + copy()) + + raid_mgt._check_disks_status(mode, physical_disks, + raid_cntl_to_phys_disk_ids) + raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(raid_len, 0) + + mode = constants.RaidStatus.jbod + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + + jbod_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. + copy()) + raid_mgt._check_disks_status(mode, physical_disks, + jbod_cntl_to_phys_disk_ids) + jbod_len = len(jbod_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(jbod_len, 0) + + def test_check_disks_status_change_state(self, mock_requests): + raid_mgt = self.drac_client._raid_mgmt + mode = constants.RaidStatus.jbod + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + + jbod_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. + copy()) + + raid_mgt._check_disks_status(mode, physical_disks, + jbod_cntl_to_phys_disk_ids) + jbod_len = len(jbod_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(jbod_len, 2) + + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + raid_cntl_to_phys_disk_ids = (self.controllers_to_physical_disk_ids. + copy()) + raid_mgt._check_disks_status(mode, physical_disks, + raid_cntl_to_phys_disk_ids) + raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1']) + self.assertEqual(raid_len, 2) + + def test_check_disks_status_bad_and_fail(self, mock_requests): + mode = constants.RaidStatus.raid + disk_1_bad = self.disk_1._replace(raid_status='FAKE_STATUS') + disk_2_failed = self.disk_2._replace(raid_status='failed') + physical_disks = [disk_1_bad, disk_2_failed, self.disk_3, self.disk_4] + raid_mgt = self.drac_client._raid_mgmt + + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + mode = constants.RaidStatus.jbod + self.assertRaises(ValueError, + raid_mgt._check_disks_status, + mode, + physical_disks, + self.controllers_to_physical_disk_ids.copy()) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_jbod( + self, mock_requests, + mock_convert_physical_disks, + wait_until_idrac_is_ready): + mode = constants.RaidStatus.jbod + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_PhysicalDiskView]['ok']) + mock_convert_physical_disks.return_value = {'commit_required': True, + 'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired + .true} + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + results = self.drac_client.change_physical_disk_state( + mode, cntl_to_phys_d_ids) + self.assertTrue(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 2) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_raid( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + mock_list_physical_disks.return_value = physical_disks + mock_convert_physical_disks.return_value = {'commit_required': True, + 'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired + .true} + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + results = self.drac_client.change_physical_disk_state( + mode, cntl_to_phys_d_ids) + self.assertTrue(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 1) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_none( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + mock_convert_physical_disks.return_value = {'commit_required': True, + 'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired + .true} + mock_list_physical_disks.return_value = physical_disks + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + results = self.drac_client.change_physical_disk_state( + mode, cntl_to_phys_d_ids) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True, + side_effect=exceptions.DRACOperationFailed( + drac_messages=constants.NOT_SUPPORTED_MSG)) + def test_change_physical_disk_state_not_supported( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + mock_list_physical_disks.return_value = physical_disks + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + results = self.drac_client.change_physical_disk_state( + mode, cntl_to_phys_d_ids) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True, + side_effect=exceptions.DRACOperationFailed( + drac_messages="OTHER_MESSAGE")) + def test_change_physical_disk_state_raise_drac_operation_other( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + mock_list_physical_disks.return_value = physical_disks + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + self.assertRaisesRegexp( + exceptions.DRACOperationFailed, + "OTHER_MESSAGE", + self.drac_client.change_physical_disk_state, + mode, cntl_to_phys_d_ids) + + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True, side_effect=Exception( + "SOMETHING_BAD_HAPPENED")) + def test_change_physical_disk_state_raise_other( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks): + mode = constants.RaidStatus.raid + disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID') + disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID') + physical_disks = [disk_1_non_raid, disk_2_non_raid, + self.disk_3, self.disk_4] + mock_list_physical_disks.return_value = physical_disks + cntl_to_phys_d_ids = self.controllers_to_physical_disk_ids + self.assertRaisesRegexp( + Exception, "SOMETHING_BAD_HAPPENED", + self.drac_client.change_physical_disk_state, + mode, cntl_to_phys_d_ids) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_with_no_dict( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + mode = constants.RaidStatus.jbod + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + mock_convert_physical_disks.return_value = {'commit_required': True, + 'is_commit_required': True, + 'is_reboot_required': + constants.RebootRequired + .true} + mock_list_physical_disks.return_value = physical_disks + results = self.drac_client.change_physical_disk_state(mode) + self.assertTrue(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 2) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_with_no_raid_or_boss_card_match( + self, mock_requests, + mock_list_physical_disks, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + mode = constants.RaidStatus.jbod + _disk_1 = self.disk_1._replace(controller='NOT_RAID.Integrated.1-1') + _disk_2 = self.disk_2._replace(controller='NOT_RAID.Integrated.1-1') + _disk_3 = self.disk_3._replace(controller='NOT_AHCI.Integrated.1-1') + _disk_4 = self.disk_4._replace(controller='NOT_AHCI.Integrated.1-1') + physical_disks = [_disk_1, _disk_2, _disk_3, _disk_4] + mock_list_physical_disks.return_value = physical_disks + results = self.drac_client.change_physical_disk_state(mode) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) + + @mock.patch.object(dracclient.client.WSManClient, + 'wait_until_idrac_is_ready', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'list_physical_disks', spec_set=True, + autospec=True) + @mock.patch.object(dracclient.resources.raid.RAIDManagement, + 'convert_physical_disks', spec_set=True, + autospec=True) + def test_change_physical_disk_state_conversion_return_values( + self, mock_requests, + mock_convert_physical_disks, + mock_list_physical_disks, + mock_wait_until_idrac_is_ready): + mock_requests.post( + 'https://1.2.3.4:443/wsman', + text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok']) + mode = constants.RaidStatus.jbod + physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4] + '''Test all logic branches for 100% coverage, it is unlikely + convert_physical_disks() will return empty dict but we do check + for this case in change_physical_disk_state()''' + mock_convert_physical_disks.return_value = {} + mock_list_physical_disks.return_value = physical_disks + results = self.drac_client.change_physical_disk_state(mode) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) + '''Where convert_physical_disks() does not require a commit after + executing, unlikely case but provides 100% code coverage of all + logic branches.''' + mock_convert_physical_disks.return_value = {'commit_required': + True, + 'is_commit_required': + False, + 'is_reboot_required': + constants.RebootRequired + .false} + results = self.drac_client.change_physical_disk_state(mode) + self.assertFalse(results["is_reboot_required"]) + self.assertEqual(len(results["commit_required_ids"]), 0) diff --git a/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml b/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml index 74c6488..2188685 100644 --- a/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml +++ b/dracclient/tests/wsman_mocks/controller_view-enum-ok.xml @@ -52,9 +52,48 @@ 0 0 + + 2 + 512 + 1 + 2.5.13.2009 + 1 + Unknown + DELL + 2 + Unknown + AHCI.Integrated.1-1 + + 1 + 0 + AHCI.Integrated.1-1 + 0 + AHCI.Integrated.1-1 + + 20150226175957.000000+000 + 20150226175950.000000+000 + Generation 2 + Generation 3 + 5B + 1 + 1F38 + 1028 + 1000 + 0 + 1 + BOSS-S1 + 1 + 5B083FE0D2D0F201 + 1 + 1 + 1 + 1 + 0 + 0 + - \ No newline at end of file + diff --git a/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml b/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml index aa9dca1..1ebf03e 100644 --- a/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml +++ b/dracclient/tests/wsman_mocks/physical_disk_view-enum-ok.xml @@ -125,6 +125,80 @@ 0 0 + + 512 + 5 + 0 + Disk 1 on Integrated BOSS Controller 1 + 2 + Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1 + 599550590976 + 0 + Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1 + 20150226180025.000000+000 + 20150226180025.000000+000 + ATA + 2 + 33 + 2014 + 3 + 1 + ST600MM0007 + None + 0 + CN07YX587262248G01PZA02 + 0 + 1 + 1 + 255 + LS0B + 1 + 5000C5007764F409 + 0 + S0M3EY3Z + 599550590976 + 1 + None + 0 + 0 + + + 512 + 5 + 0 + Disk 2 on Integrated BOSS Controller 1 + 2 + Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1 + 599550590976 + 0 + Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1 + 20150226180025.000000+000 + 20150226180025.000000+000 + ATA + 2 + 33 + 2014 + 3 + 1 + ST600MM0007 + None + 0 + CN07YX587262248G01PZA02 + 0 + 1 + 1 + 255 + LS0B + 1 + 5000C5007764F409 + 0 + S0M3EY3Z + 599550590976 + 2 + None + 0 + 0 + 7 PCIe SSD in Slot 20 in Bay 1