Fix for H755 RAID controller from converting to JBOD mode
In case of H755 RAID controller, exisiting virtual disks get deleted and moves physical disks into 'ready' state. Moreover, it creates another job to convert each physical disk to 'RAID-0'. This change excludes H755 RAID controller from conversion of physical disks to 'RAID-0' when mode is 'JBOD'. Change-Id: Id6de320c8e8a9ca21fac277718c405c657f58a3a
This commit is contained in:
@@ -911,6 +911,12 @@ class RAIDManagement(object):
|
|||||||
RAID to JBOD or vice versa. It does this by only converting the
|
RAID to JBOD or vice versa. It does this by only converting the
|
||||||
disks that are not already in the correct state.
|
disks that are not already in the correct state.
|
||||||
|
|
||||||
|
In case of H755 RAID controller, exisiting virtual disks get deleted
|
||||||
|
and moves physical disks into 'ready' state. Moreover, it creates
|
||||||
|
another job to convert each physical disk to 'RAID-0'.
|
||||||
|
Modified change excludes H755 RAID controller from conversion of
|
||||||
|
physical disks to 'RAID-0' when mode is 'JBOD'.
|
||||||
|
|
||||||
:param mode: constants.RaidStatus enumeration that indicates the mode
|
:param mode: constants.RaidStatus enumeration that indicates the mode
|
||||||
to change the disks to.
|
to change the disks to.
|
||||||
:param controllers_to_physical_disk_ids: Dictionary of controllers and
|
:param controllers_to_physical_disk_ids: Dictionary of controllers and
|
||||||
@@ -932,11 +938,12 @@ class RAIDManagement(object):
|
|||||||
physical_disks = self.list_physical_disks()
|
physical_disks = self.list_physical_disks()
|
||||||
|
|
||||||
raid = constants.RaidStatus.raid
|
raid = constants.RaidStatus.raid
|
||||||
|
jbod = constants.RaidStatus.jbod
|
||||||
|
|
||||||
|
all_controllers = self.list_raid_controllers()
|
||||||
if not controllers_to_physical_disk_ids:
|
if not controllers_to_physical_disk_ids:
|
||||||
controllers_to_physical_disk_ids = collections.defaultdict(list)
|
controllers_to_physical_disk_ids = collections.defaultdict(list)
|
||||||
|
|
||||||
all_controllers = self.list_raid_controllers()
|
|
||||||
for physical_d in physical_disks:
|
for physical_d in physical_disks:
|
||||||
# Weed out disks that are not attached to a RAID controller
|
# Weed out disks that are not attached to a RAID controller
|
||||||
if self.is_raid_controller(physical_d.controller,
|
if self.is_raid_controller(physical_d.controller,
|
||||||
@@ -946,6 +953,20 @@ class RAIDManagement(object):
|
|||||||
|
|
||||||
physical_disk_ids.append(physical_d.id)
|
physical_disk_ids.append(physical_d.id)
|
||||||
|
|
||||||
|
'''In case of H755 RAID controller, exisiting virtual disks get deleted
|
||||||
|
and moves physical disks into 'ready' state. Moreover, it creates
|
||||||
|
another job to convert each physical disk to RAID-0.
|
||||||
|
With below change, it excludes the H755 RAID controller from conversion
|
||||||
|
of physical disks when mode is 'JBOD'
|
||||||
|
'''
|
||||||
|
if mode == jbod:
|
||||||
|
for cntlr in all_controllers:
|
||||||
|
if cntlr.model.startswith("PERC H755") and \
|
||||||
|
cntlr.id in controllers_to_physical_disk_ids:
|
||||||
|
LOG.debug("Excluding {} from converting to "
|
||||||
|
"non-RAID mode".format(cntlr.model))
|
||||||
|
del controllers_to_physical_disk_ids[cntlr.id]
|
||||||
|
|
||||||
'''Modify controllers_to_physical_disk_ids dict by inspecting desired
|
'''Modify controllers_to_physical_disk_ids dict by inspecting desired
|
||||||
status vs current status of each controller's disks.
|
status vs current status of each controller's disks.
|
||||||
Raise exception if there are any failed drives or
|
Raise exception if there are any failed drives or
|
||||||
|
|||||||
@@ -40,12 +40,15 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
**test_utils.FAKE_ENDPOINT)
|
**test_utils.FAKE_ENDPOINT)
|
||||||
self.raid_controller_fqdd = "RAID.Integrated.1-1"
|
self.raid_controller_fqdd = "RAID.Integrated.1-1"
|
||||||
self.boss_controller_fqdd = "AHCI.Slot.3-1"
|
self.boss_controller_fqdd = "AHCI.Slot.3-1"
|
||||||
|
self.h755_controller_fqdd = "RAID.SL.8-1"
|
||||||
cntl_dict = {'RAID.Integrated.1-1':
|
cntl_dict = {'RAID.Integrated.1-1':
|
||||||
['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
|
['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
|
||||||
'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'],
|
'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'],
|
||||||
'AHCI.Integrated.1-1':
|
'AHCI.Integrated.1-1':
|
||||||
['Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1',
|
['Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1',
|
||||||
'Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1']}
|
'Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1'],
|
||||||
|
'RAID.SL.8-1':
|
||||||
|
['Disk.Bay.0:Enclosure.Internal.0-1:RAID.SL.8-1']}
|
||||||
self.controllers_to_physical_disk_ids = cntl_dict
|
self.controllers_to_physical_disk_ids = cntl_dict
|
||||||
self.disk_1 = raid.PhysicalDisk(
|
self.disk_1 = raid.PhysicalDisk(
|
||||||
id='Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
|
id='Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
|
||||||
@@ -119,6 +122,24 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
device_protocol=None,
|
device_protocol=None,
|
||||||
bus=None)
|
bus=None)
|
||||||
|
|
||||||
|
self.disk_5 = raid.PhysicalDisk(
|
||||||
|
id='Disk.Bay.0:Enclosure.Internal.0-1:RAID.SL.8-1',
|
||||||
|
description='Disk 0 in Backplane 1 of Int RAID Controller 1',
|
||||||
|
controller='RAID.SL.8-1',
|
||||||
|
manufacturer='ATA',
|
||||||
|
model='ST91000640NS',
|
||||||
|
media_type='hdd',
|
||||||
|
interface_type='sata',
|
||||||
|
size_mb=953344,
|
||||||
|
free_size_mb=953344,
|
||||||
|
serial_number='9XG4SLGZ',
|
||||||
|
firmware_version='AA09',
|
||||||
|
status='ok',
|
||||||
|
raid_status='ready',
|
||||||
|
sas_address='500056B37789ABE3',
|
||||||
|
device_protocol=None,
|
||||||
|
bus=None)
|
||||||
|
|
||||||
@mock.patch.object(dracclient.client.WSManClient,
|
@mock.patch.object(dracclient.client.WSManClient,
|
||||||
'wait_until_idrac_is_ready', spec_set=True,
|
'wait_until_idrac_is_ready', spec_set=True,
|
||||||
autospec=True)
|
autospec=True)
|
||||||
@@ -1226,7 +1247,8 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
def test_check_disks_status_bad(self, mock_requests):
|
def test_check_disks_status_bad(self, mock_requests):
|
||||||
mode = constants.RaidStatus.raid
|
mode = constants.RaidStatus.raid
|
||||||
disk_2 = self.disk_2._replace(raid_status='FAKE_STATUS')
|
disk_2 = self.disk_2._replace(raid_status='FAKE_STATUS')
|
||||||
physical_disks = [self.disk_1, disk_2, self.disk_3, self.disk_4]
|
physical_disks = [self.disk_1, disk_2, self.disk_3, self.disk_4,
|
||||||
|
self.disk_5]
|
||||||
raid_mgt = self.drac_client._raid_mgmt
|
raid_mgt = self.drac_client._raid_mgmt
|
||||||
|
|
||||||
self.assertRaises(ValueError,
|
self.assertRaises(ValueError,
|
||||||
@@ -1244,7 +1266,8 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
def test_check_disks_status_fail(self, mock_requests):
|
def test_check_disks_status_fail(self, mock_requests):
|
||||||
mode = constants.RaidStatus.raid
|
mode = constants.RaidStatus.raid
|
||||||
disk_2_failed = self.disk_2._replace(raid_status='failed')
|
disk_2_failed = self.disk_2._replace(raid_status='failed')
|
||||||
physical_disks = [self.disk_1, disk_2_failed, self.disk_3, self.disk_4]
|
physical_disks = [self.disk_1, disk_2_failed, self.disk_3, self.disk_4,
|
||||||
|
self.disk_5]
|
||||||
raid_mgt = self.drac_client._raid_mgmt
|
raid_mgt = self.drac_client._raid_mgmt
|
||||||
|
|
||||||
self.assertRaises(ValueError,
|
self.assertRaises(ValueError,
|
||||||
@@ -1263,7 +1286,7 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
raid_mgt = self.drac_client._raid_mgmt
|
raid_mgt = self.drac_client._raid_mgmt
|
||||||
mode = constants.RaidStatus.raid
|
mode = constants.RaidStatus.raid
|
||||||
physical_disks = [self.disk_1, self.disk_2,
|
physical_disks = [self.disk_1, self.disk_2,
|
||||||
self.disk_3, self.disk_4]
|
self.disk_3, self.disk_4, self.disk_5]
|
||||||
|
|
||||||
raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
|
raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
|
||||||
mode, physical_disks, self.controllers_to_physical_disk_ids)
|
mode, physical_disks, self.controllers_to_physical_disk_ids)
|
||||||
@@ -1274,7 +1297,7 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
||||||
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
||||||
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
||||||
self.disk_3, self.disk_4]
|
self.disk_3, self.disk_4, self.disk_5]
|
||||||
|
|
||||||
jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
|
jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
|
||||||
mode, physical_disks, self.controllers_to_physical_disk_ids)
|
mode, physical_disks, self.controllers_to_physical_disk_ids)
|
||||||
@@ -1284,7 +1307,8 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
def test_check_disks_status_change_state(self, mock_requests):
|
def test_check_disks_status_change_state(self, mock_requests):
|
||||||
raid_mgt = self.drac_client._raid_mgmt
|
raid_mgt = self.drac_client._raid_mgmt
|
||||||
mode = constants.RaidStatus.jbod
|
mode = constants.RaidStatus.jbod
|
||||||
physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4]
|
physical_disks = [self.disk_1, self.disk_2, self.disk_3,
|
||||||
|
self.disk_4, self.disk_5]
|
||||||
|
|
||||||
jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
|
jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
|
||||||
mode, physical_disks, self.controllers_to_physical_disk_ids)
|
mode, physical_disks, self.controllers_to_physical_disk_ids)
|
||||||
@@ -1295,7 +1319,7 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
||||||
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
||||||
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
||||||
self.disk_3, self.disk_4]
|
self.disk_3, self.disk_4, self.disk_5]
|
||||||
raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
|
raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
|
||||||
mode, physical_disks, self.controllers_to_physical_disk_ids)
|
mode, physical_disks, self.controllers_to_physical_disk_ids)
|
||||||
raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1'])
|
raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1'])
|
||||||
@@ -1305,7 +1329,8 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
mode = constants.RaidStatus.raid
|
mode = constants.RaidStatus.raid
|
||||||
disk_1_bad = self.disk_1._replace(raid_status='FAKE_STATUS')
|
disk_1_bad = self.disk_1._replace(raid_status='FAKE_STATUS')
|
||||||
disk_2_failed = self.disk_2._replace(raid_status='failed')
|
disk_2_failed = self.disk_2._replace(raid_status='failed')
|
||||||
physical_disks = [disk_1_bad, disk_2_failed, self.disk_3, self.disk_4]
|
physical_disks = [disk_1_bad, disk_2_failed, self.disk_3,
|
||||||
|
self.disk_4, self.disk_5]
|
||||||
raid_mgt = self.drac_client._raid_mgmt
|
raid_mgt = self.drac_client._raid_mgmt
|
||||||
|
|
||||||
self.assertRaises(ValueError,
|
self.assertRaises(ValueError,
|
||||||
@@ -1343,11 +1368,40 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
cvt_phys_disks_return_value,
|
cvt_phys_disks_return_value,
|
||||||
'AHCI.Integrated.1-1':
|
'AHCI.Integrated.1-1':
|
||||||
cvt_phys_disks_return_value}
|
cvt_phys_disks_return_value}
|
||||||
|
cntl_dict = {'RAID.Integrated.1-1':
|
||||||
|
['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
|
||||||
|
'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'],
|
||||||
|
'AHCI.Integrated.1-1':
|
||||||
|
['Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1',
|
||||||
|
'Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1']}
|
||||||
results = self.drac_client.change_physical_disk_state(
|
results = self.drac_client.change_physical_disk_state(
|
||||||
mode, self.controllers_to_physical_disk_ids)
|
mode, cntl_dict)
|
||||||
self.assertEqual(results['conversion_results'],
|
self.assertEqual(results['conversion_results'],
|
||||||
expected_return_value)
|
expected_return_value)
|
||||||
|
|
||||||
|
@mock.patch.object(dracclient.client.WSManClient,
|
||||||
|
'wait_until_idrac_is_ready', spec_set=True,
|
||||||
|
autospec=True)
|
||||||
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
|
'convert_physical_disks', spec_set=True,
|
||||||
|
autospec=True)
|
||||||
|
def test_change_physical_disk_state_jbod_h755(
|
||||||
|
self, mock_requests,
|
||||||
|
mock_convert_physical_disks,
|
||||||
|
wait_until_idrac_is_ready):
|
||||||
|
mode = constants.RaidStatus.jbod
|
||||||
|
mock_requests.post(
|
||||||
|
'https://1.2.3.4:443/wsman',
|
||||||
|
text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
|
||||||
|
|
||||||
|
cntl_dict = {'RAID.SL.8-1':
|
||||||
|
['Disk.Bay.0:Enclosure.Internal.0-1:RAID.SL.8-1']}
|
||||||
|
self.drac_client.change_physical_disk_state(
|
||||||
|
mode, cntl_dict)
|
||||||
|
|
||||||
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
|
'list_raid_controllers', spec_set=True,
|
||||||
|
autospec=True)
|
||||||
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
'list_physical_disks', spec_set=True,
|
'list_physical_disks', spec_set=True,
|
||||||
autospec=True)
|
autospec=True)
|
||||||
@@ -1357,12 +1411,14 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
def test_change_physical_disk_state_raid(
|
def test_change_physical_disk_state_raid(
|
||||||
self, mock_requests,
|
self, mock_requests,
|
||||||
mock_convert_physical_disks,
|
mock_convert_physical_disks,
|
||||||
mock_list_physical_disks):
|
mock_list_physical_disks,
|
||||||
|
mock_list_raid_controllers):
|
||||||
mode = constants.RaidStatus.raid
|
mode = constants.RaidStatus.raid
|
||||||
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
||||||
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
||||||
|
disk_5_non_raid = self.disk_5._replace(raid_status='non-RAID')
|
||||||
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
||||||
self.disk_3, self.disk_4]
|
self.disk_3, self.disk_4, disk_5_non_raid]
|
||||||
mock_list_physical_disks.return_value = physical_disks
|
mock_list_physical_disks.return_value = physical_disks
|
||||||
boss_return_value = {'is_commit_required': False,
|
boss_return_value = {'is_commit_required': False,
|
||||||
'is_reboot_required':
|
'is_reboot_required':
|
||||||
@@ -1370,24 +1426,34 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
raid_return_value = {'is_commit_required': True,
|
raid_return_value = {'is_commit_required': True,
|
||||||
'is_reboot_required':
|
'is_reboot_required':
|
||||||
constants.RebootRequired.true}
|
constants.RebootRequired.true}
|
||||||
|
h755_return_value = {'is_commit_required': True,
|
||||||
|
'is_reboot_required':
|
||||||
|
constants.RebootRequired.true}
|
||||||
mock_convert_physical_disks.return_value = raid_return_value
|
mock_convert_physical_disks.return_value = raid_return_value
|
||||||
|
|
||||||
results = self.drac_client.change_physical_disk_state(
|
results = self.drac_client.change_physical_disk_state(
|
||||||
mode, self.controllers_to_physical_disk_ids)
|
mode, self.controllers_to_physical_disk_ids)
|
||||||
self.assertEqual(len(results['conversion_results']), 2)
|
self.assertEqual(len(results['conversion_results']), 3)
|
||||||
self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
|
self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
|
||||||
boss_return_value)
|
boss_return_value)
|
||||||
self.assertEqual(results['conversion_results']['RAID.Integrated.1-1'],
|
self.assertEqual(results['conversion_results']['RAID.Integrated.1-1'],
|
||||||
raid_return_value)
|
raid_return_value)
|
||||||
|
self.assertEqual(results['conversion_results']['RAID.SL.8-1'],
|
||||||
|
h755_return_value)
|
||||||
|
|
||||||
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
|
'list_raid_controllers', spec_set=True,
|
||||||
|
autospec=True)
|
||||||
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
'list_physical_disks', spec_set=True,
|
'list_physical_disks', spec_set=True,
|
||||||
autospec=True)
|
autospec=True)
|
||||||
def test_change_physical_disk_state_none(
|
def test_change_physical_disk_state_none(
|
||||||
self, mock_requests,
|
self, mock_requests,
|
||||||
mock_list_physical_disks):
|
mock_list_physical_disks,
|
||||||
|
mock_list_raid_controllers):
|
||||||
mode = constants.RaidStatus.raid
|
mode = constants.RaidStatus.raid
|
||||||
physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4]
|
physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4,
|
||||||
|
self.disk_5]
|
||||||
mock_list_physical_disks.return_value = physical_disks
|
mock_list_physical_disks.return_value = physical_disks
|
||||||
expected_return_value = {'is_commit_required': False,
|
expected_return_value = {'is_commit_required': False,
|
||||||
'is_reboot_required':
|
'is_reboot_required':
|
||||||
@@ -1399,6 +1465,9 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
|
self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
|
||||||
expected_return_value)
|
expected_return_value)
|
||||||
|
|
||||||
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
|
'list_raid_controllers', spec_set=True,
|
||||||
|
autospec=True)
|
||||||
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
'list_physical_disks', spec_set=True,
|
'list_physical_disks', spec_set=True,
|
||||||
autospec=True)
|
autospec=True)
|
||||||
@@ -1410,12 +1479,13 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
def test_change_physical_disk_state_not_supported(
|
def test_change_physical_disk_state_not_supported(
|
||||||
self, mock_requests,
|
self, mock_requests,
|
||||||
mock_convert_physical_disks,
|
mock_convert_physical_disks,
|
||||||
mock_list_physical_disks):
|
mock_list_physical_disks,
|
||||||
|
mock_list_raid_controllers):
|
||||||
mode = constants.RaidStatus.raid
|
mode = constants.RaidStatus.raid
|
||||||
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
||||||
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
||||||
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
||||||
self.disk_3, self.disk_4]
|
self.disk_3, self.disk_4, self.disk_5]
|
||||||
mock_list_physical_disks.return_value = physical_disks
|
mock_list_physical_disks.return_value = physical_disks
|
||||||
expected_return_value = {'is_commit_required': False,
|
expected_return_value = {'is_commit_required': False,
|
||||||
'is_reboot_required':
|
'is_reboot_required':
|
||||||
@@ -1427,6 +1497,9 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
|
self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
|
||||||
expected_return_value)
|
expected_return_value)
|
||||||
|
|
||||||
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
|
'list_raid_controllers', spec_set=True,
|
||||||
|
autospec=True)
|
||||||
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
'list_physical_disks', spec_set=True,
|
'list_physical_disks', spec_set=True,
|
||||||
autospec=True)
|
autospec=True)
|
||||||
@@ -1438,12 +1511,13 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
def test_change_physical_disk_state_raise_drac_operation_other(
|
def test_change_physical_disk_state_raise_drac_operation_other(
|
||||||
self, mock_requests,
|
self, mock_requests,
|
||||||
mock_convert_physical_disks,
|
mock_convert_physical_disks,
|
||||||
mock_list_physical_disks):
|
mock_list_physical_disks,
|
||||||
|
mock_list_raid_controllers):
|
||||||
mode = constants.RaidStatus.raid
|
mode = constants.RaidStatus.raid
|
||||||
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
||||||
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
||||||
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
||||||
self.disk_3, self.disk_4]
|
self.disk_3, self.disk_4, self.disk_5]
|
||||||
mock_list_physical_disks.return_value = physical_disks
|
mock_list_physical_disks.return_value = physical_disks
|
||||||
self.assertRaisesRegexp(
|
self.assertRaisesRegexp(
|
||||||
exceptions.DRACOperationFailed,
|
exceptions.DRACOperationFailed,
|
||||||
@@ -1452,6 +1526,9 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
mode,
|
mode,
|
||||||
self.controllers_to_physical_disk_ids)
|
self.controllers_to_physical_disk_ids)
|
||||||
|
|
||||||
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
|
'list_raid_controllers', spec_set=True,
|
||||||
|
autospec=True)
|
||||||
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
|
||||||
'list_physical_disks', spec_set=True,
|
'list_physical_disks', spec_set=True,
|
||||||
autospec=True)
|
autospec=True)
|
||||||
@@ -1462,12 +1539,13 @@ class ClientRAIDManagementTestCase(base.BaseTest):
|
|||||||
def test_change_physical_disk_state_raise_other(
|
def test_change_physical_disk_state_raise_other(
|
||||||
self, mock_requests,
|
self, mock_requests,
|
||||||
mock_convert_physical_disks,
|
mock_convert_physical_disks,
|
||||||
mock_list_physical_disks):
|
mock_list_physical_disks,
|
||||||
|
mock_list_raid_controllers):
|
||||||
mode = constants.RaidStatus.raid
|
mode = constants.RaidStatus.raid
|
||||||
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
|
||||||
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
|
||||||
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
physical_disks = [disk_1_non_raid, disk_2_non_raid,
|
||||||
self.disk_3, self.disk_4]
|
self.disk_3, self.disk_4, self.disk_5]
|
||||||
mock_list_physical_disks.return_value = physical_disks
|
mock_list_physical_disks.return_value = physical_disks
|
||||||
self.assertRaisesRegexp(
|
self.assertRaisesRegexp(
|
||||||
Exception,
|
Exception,
|
||||||
|
|||||||
@@ -93,6 +93,55 @@
|
|||||||
<n1:SupportRAID10UnevenSpans>0</n1:SupportRAID10UnevenSpans>
|
<n1:SupportRAID10UnevenSpans>0</n1:SupportRAID10UnevenSpans>
|
||||||
<n1:T10PICapability>0</n1:T10PICapability>
|
<n1:T10PICapability>0</n1:T10PICapability>
|
||||||
</n1:DCIM_ControllerView>
|
</n1:DCIM_ControllerView>
|
||||||
|
<n1:DCIM_ControllerView>
|
||||||
|
<n1:AlarmState>1</n1:AlarmState>
|
||||||
|
<n1:BootVirtualDiskFQDD xsi:nil="true"/>
|
||||||
|
<n1:Bus>1</n1:Bus>
|
||||||
|
<n1:CacheSizeInMB>8192</n1:CacheSizeInMB>
|
||||||
|
<n1:CachecadeCapability>0</n1:CachecadeCapability>
|
||||||
|
<n1:ConfigLockdownCapable>0</n1:ConfigLockdownCapable>
|
||||||
|
<n1:ConfigLockdownState>0</n1:ConfigLockdownState>
|
||||||
|
<n1:ConnectorCount>4</n1:ConnectorCount>
|
||||||
|
<n1:ControllerFirmwareVersion>52.13.2-3661</n1:ControllerFirmwareVersion>
|
||||||
|
<n1:Device>0</n1:Device>
|
||||||
|
<n1:DeviceCardDataBusWidth>Unknown</n1:DeviceCardDataBusWidth>
|
||||||
|
<n1:DeviceCardManufacturer>DELL</n1:DeviceCardManufacturer>
|
||||||
|
<n1:DeviceCardSlotLength>2</n1:DeviceCardSlotLength>
|
||||||
|
<n1:DeviceCardSlotType>Unknown</n1:DeviceCardSlotType>
|
||||||
|
<n1:DeviceDescription>RAID Controller in SL 8</n1:DeviceDescription>
|
||||||
|
<n1:DriverVersion xsi:nil="true"/>
|
||||||
|
<n1:EncryptionCapability>1</n1:EncryptionCapability>
|
||||||
|
<n1:EncryptionMode>0</n1:EncryptionMode>
|
||||||
|
<n1:FQDD>RAID.SL.8-1</n1:FQDD>
|
||||||
|
<n1:Function>0</n1:Function>
|
||||||
|
<n1:InstanceID>RAID.SL.8-1</n1:InstanceID>
|
||||||
|
<n1:KeyID xsi:nil="true"/>
|
||||||
|
<n1:LastSystemInventoryTime>20210831081620.000000+000</n1:LastSystemInventoryTime>
|
||||||
|
<n1:LastUpdateTime>20210831081620.000000+000</n1:LastUpdateTime>
|
||||||
|
<n1:MaxAvailablePCILinkSpeed xsi:nil="true"/>
|
||||||
|
<n1:MaxPossiblePCILinkSpeed xsi:nil="true"/>
|
||||||
|
<n1:PCIDeviceID>10E2</n1:PCIDeviceID>
|
||||||
|
<n1:PCISlot xsi:nil="true"/>
|
||||||
|
<n1:PCISubDeviceID>1AE1</n1:PCISubDeviceID>
|
||||||
|
<n1:PCISubVendorID>1028</n1:PCISubVendorID>
|
||||||
|
<n1:PCIVendorID>1000</n1:PCIVendorID>
|
||||||
|
<n1:PatrolReadState>1</n1:PatrolReadState>
|
||||||
|
<n1:PersistentHotspare>0</n1:PersistentHotspare>
|
||||||
|
<n1:PrimaryStatus>0</n1:PrimaryStatus>
|
||||||
|
<n1:ProductName>PERC H755 Front</n1:ProductName>
|
||||||
|
<n1:RealtimeCapability>0</n1:RealtimeCapability>
|
||||||
|
<n1:RollupStatus>0</n1:RollupStatus>
|
||||||
|
<n1:SASAddress>52CEA7F06A603500</n1:SASAddress>
|
||||||
|
<n1:SecurityStatus>1</n1:SecurityStatus>
|
||||||
|
<n1:SharedSlotAssignmentAllowed>0</n1:SharedSlotAssignmentAllowed>
|
||||||
|
<n1:SlicedVDCapability>1</n1:SlicedVDCapability>
|
||||||
|
<n1:SupportControllerBootMode>0</n1:SupportControllerBootMode>
|
||||||
|
<n1:SupportEnhancedAutoForeignImport>1</n1:SupportEnhancedAutoForeignImport>
|
||||||
|
<n1:SupportRAID10UnevenSpans>1</n1:SupportRAID10UnevenSpans>
|
||||||
|
<n1:T10PICapability>0</n1:T10PICapability>
|
||||||
|
<n1:UpdateLockdownCapable>0</n1:UpdateLockdownCapable>
|
||||||
|
<n1:UpdateLockdownState>0</n1:UpdateLockdownState>
|
||||||
|
</n1:DCIM_ControllerView>
|
||||||
</wsman:Items>
|
</wsman:Items>
|
||||||
<wsen:EnumerationContext/>
|
<wsen:EnumerationContext/>
|
||||||
<wsman:EndOfSequence/>
|
<wsman:EndOfSequence/>
|
||||||
|
|||||||
@@ -199,6 +199,43 @@
|
|||||||
<n1:T10PICapability>0</n1:T10PICapability>
|
<n1:T10PICapability>0</n1:T10PICapability>
|
||||||
<n1:UsedSizeInBytes>0</n1:UsedSizeInBytes>
|
<n1:UsedSizeInBytes>0</n1:UsedSizeInBytes>
|
||||||
</n1:DCIM_PhysicalDiskView>
|
</n1:DCIM_PhysicalDiskView>
|
||||||
|
<n1:DCIM_PhysicalDiskView>
|
||||||
|
<n1:BlockSizeInBytes>512</n1:BlockSizeInBytes>
|
||||||
|
<n1:BusProtocol>6</n1:BusProtocol>
|
||||||
|
<n1:Connector>0</n1:Connector>
|
||||||
|
<n1:DeviceDescription>Disk 0 in Backplane 1 of Int RAID Controller 1</n1:DeviceDescription>
|
||||||
|
<n1:DriveFormFactor>2</n1:DriveFormFactor>
|
||||||
|
<n1:FQDD>Disk.Bay.0:Enclosure.Internal.0-1:RAID.SL.8-1</n1:FQDD>
|
||||||
|
<n1:FreeSizeInBytes>599550590976</n1:FreeSizeInBytes>
|
||||||
|
<n1:HotSpareStatus>0</n1:HotSpareStatus>
|
||||||
|
<n1:InstanceID>Disk.Bay.0:Enclosure.Internal.0-1:RAID.SL.8-1</n1:InstanceID>
|
||||||
|
<n1:LastSystemInventoryTime>20150226180025.000000+000</n1:LastSystemInventoryTime>
|
||||||
|
<n1:LastUpdateTime>20150226180025.000000+000</n1:LastUpdateTime>
|
||||||
|
<n1:Manufacturer>SEAGATE </n1:Manufacturer>
|
||||||
|
<n1:ManufacturingDay>2</n1:ManufacturingDay>
|
||||||
|
<n1:ManufacturingWeek>33</n1:ManufacturingWeek>
|
||||||
|
<n1:ManufacturingYear>2014</n1:ManufacturingYear>
|
||||||
|
<n1:MaxCapableSpeed>3</n1:MaxCapableSpeed>
|
||||||
|
<n1:MediaType>0</n1:MediaType>
|
||||||
|
<n1:Model>ST600MM0006 </n1:Model>
|
||||||
|
<n1:OperationName>None</n1:OperationName>
|
||||||
|
<n1:OperationPercentComplete>0</n1:OperationPercentComplete>
|
||||||
|
<n1:PPID>CN07YX587262248G01MHA02 </n1:PPID>
|
||||||
|
<n1:PredictiveFailureState>0</n1:PredictiveFailureState>
|
||||||
|
<n1:PrimaryStatus>1</n1:PrimaryStatus>
|
||||||
|
<n1:RaidStatus>1</n1:RaidStatus>
|
||||||
|
<n1:RemainingRatedWriteEndurance>255</n1:RemainingRatedWriteEndurance>
|
||||||
|
<n1:Revision>LS0A</n1:Revision>
|
||||||
|
<n1:RollupStatus>1</n1:RollupStatus>
|
||||||
|
<n1:SASAddress>5000C5007764FF6D</n1:SASAddress>
|
||||||
|
<n1:SecurityState>0</n1:SecurityState>
|
||||||
|
<n1:SerialNumber>S0M3EVL6 </n1:SerialNumber>
|
||||||
|
<n1:SizeInBytes>599550590976</n1:SizeInBytes>
|
||||||
|
<n1:Slot>0</n1:Slot>
|
||||||
|
<n1:SupportedEncryptionTypes>None</n1:SupportedEncryptionTypes>
|
||||||
|
<n1:T10PICapability>0</n1:T10PICapability>
|
||||||
|
<n1:UsedSizeInBytes>0</n1:UsedSizeInBytes>
|
||||||
|
</n1:DCIM_PhysicalDiskView>
|
||||||
<n2:DCIM_PCIeSSDView>
|
<n2:DCIM_PCIeSSDView>
|
||||||
<n2:BusProtocol>7</n2:BusProtocol>
|
<n2:BusProtocol>7</n2:BusProtocol>
|
||||||
<n2:Bus>3E</n2:Bus>
|
<n2:Bus>3E</n2:Bus>
|
||||||
|
|||||||
Reference in New Issue
Block a user