Fix for H755 RAID controller when convert to JBOD

When converting Dell EMC PERC H755 RAID controller physical
disks to non-RAID mode, RAID-0 virtual disks get created
for each physical disk and disks moved to 'Online' state.

This is different from other controllers supporting non-RAID
conversion and takes up physical disks that cannot be
later used for user intended RAID configuration. This change
excludes H755 RAID controller's physical disks when converting
to non-RAID mode and leaves disks in 'Ready' state.

Change-Id: Id6de320c8e8a9ca21fac277718c405c657f58a3a
This commit is contained in:
Monica Pardhi 2021-08-31 11:43:56 +00:00
parent 90daeb3c0a
commit 9ebc4afc84
4 changed files with 227 additions and 27 deletions

View File

@ -911,6 +911,15 @@ class RAIDManagement(object):
RAID to JBOD or vice versa. It does this by only converting the
disks that are not already in the correct state.
When converting Dell EMC PERC H755 RAID controller physical disks
to non-RAID mode, RAID-0 virtual disks get created for each physical
disk and disks moved to 'Online' state.
This is different from other controllers supporting non-RAID conversion
and takes up physical disks that cannot be later used for user intended
RAID configuration. H755 RAID controllers are excluded when converting
to non-RAID mode leaving disks in 'Ready' state.
:param mode: constants.RaidStatus enumeration that indicates the mode
to change the disks to.
:param controllers_to_physical_disk_ids: Dictionary of controllers and
@ -932,11 +941,12 @@ class RAIDManagement(object):
physical_disks = self.list_physical_disks()
raid = constants.RaidStatus.raid
jbod = constants.RaidStatus.jbod
all_controllers = self.list_raid_controllers()
if not controllers_to_physical_disk_ids:
controllers_to_physical_disk_ids = collections.defaultdict(list)
all_controllers = self.list_raid_controllers()
for physical_d in physical_disks:
# Weed out disks that are not attached to a RAID controller
if self.is_raid_controller(physical_d.controller,
@ -946,6 +956,27 @@ class RAIDManagement(object):
physical_disk_ids.append(physical_d.id)
controllers_to_results = {}
# Filter out PERC H755 controller as it creates RAID-0 virtual disks
# when in non-RAID mode. Returns conversion result dictionary
# containing is_commit_required and is_reboot_required key with
# the value always set to False.
if mode == jbod:
for cntlr in all_controllers:
if cntlr.model.startswith("PERC H755") and \
cntlr.id in controllers_to_physical_disk_ids:
LOG.debug("Excluding {} from converting to "
"non-RAID mode".format(cntlr.model))
del controllers_to_physical_disk_ids[cntlr.id]
controllers_to_results[cntlr.id] = \
utils.build_return_dict(
doc=None,
resource_uri=None,
is_commit_required_value=False,
is_reboot_required_value=constants.
RebootRequired.false)
'''Modify controllers_to_physical_disk_ids dict by inspecting desired
status vs current status of each controller's disks.
Raise exception if there are any failed drives or
@ -954,7 +985,6 @@ class RAIDManagement(object):
final_ctls_to_phys_disk_ids = self._check_disks_status(
mode, physical_disks, controllers_to_physical_disk_ids)
controllers_to_results = {}
for controller, physical_disk_ids \
in final_ctls_to_phys_disk_ids.items():
if physical_disk_ids:

View File

@ -40,12 +40,15 @@ class ClientRAIDManagementTestCase(base.BaseTest):
**test_utils.FAKE_ENDPOINT)
self.raid_controller_fqdd = "RAID.Integrated.1-1"
self.boss_controller_fqdd = "AHCI.Slot.3-1"
self.h755_controller_fqdd = "RAID.SL.8-1"
cntl_dict = {'RAID.Integrated.1-1':
['Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'],
'AHCI.Integrated.1-1':
['Disk.Bay.0:Enclosure.Internal.0-1:AHCI.Integrated.1-1',
'Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1']}
'Disk.Bay.1:Enclosure.Internal.0-1:AHCI.Integrated.1-1'],
'RAID.SL.8-1':
['Disk.Bay.0:Enclosure.Internal.0-1:RAID.SL.8-1']}
self.controllers_to_physical_disk_ids = cntl_dict
self.disk_1 = raid.PhysicalDisk(
id='Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
@ -119,6 +122,24 @@ class ClientRAIDManagementTestCase(base.BaseTest):
device_protocol=None,
bus=None)
self.disk_5 = raid.PhysicalDisk(
id='Disk.Bay.0:Enclosure.Internal.0-1:RAID.SL.8-1',
description='Disk 0 in Backplane 1 of RAID Controller in SL 8',
controller='RAID.SL.8-1',
manufacturer='ATA',
model='ST91000640NS',
media_type='hdd',
interface_type='sata',
size_mb=953344,
free_size_mb=953344,
serial_number='9XG4SLGZ',
firmware_version='AA09',
status='ok',
raid_status='ready',
sas_address='500056B37789ABE3',
device_protocol=None,
bus=None)
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
@ -1226,7 +1247,8 @@ class ClientRAIDManagementTestCase(base.BaseTest):
def test_check_disks_status_bad(self, mock_requests):
mode = constants.RaidStatus.raid
disk_2 = self.disk_2._replace(raid_status='FAKE_STATUS')
physical_disks = [self.disk_1, disk_2, self.disk_3, self.disk_4]
physical_disks = [self.disk_1, disk_2, self.disk_3, self.disk_4,
self.disk_5]
raid_mgt = self.drac_client._raid_mgmt
self.assertRaises(ValueError,
@ -1244,7 +1266,8 @@ class ClientRAIDManagementTestCase(base.BaseTest):
def test_check_disks_status_fail(self, mock_requests):
mode = constants.RaidStatus.raid
disk_2_failed = self.disk_2._replace(raid_status='failed')
physical_disks = [self.disk_1, disk_2_failed, self.disk_3, self.disk_4]
physical_disks = [self.disk_1, disk_2_failed, self.disk_3, self.disk_4,
self.disk_5]
raid_mgt = self.drac_client._raid_mgmt
self.assertRaises(ValueError,
@ -1263,7 +1286,7 @@ class ClientRAIDManagementTestCase(base.BaseTest):
raid_mgt = self.drac_client._raid_mgmt
mode = constants.RaidStatus.raid
physical_disks = [self.disk_1, self.disk_2,
self.disk_3, self.disk_4]
self.disk_3, self.disk_4, self.disk_5]
raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
mode, physical_disks, self.controllers_to_physical_disk_ids)
@ -1274,7 +1297,7 @@ class ClientRAIDManagementTestCase(base.BaseTest):
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
physical_disks = [disk_1_non_raid, disk_2_non_raid,
self.disk_3, self.disk_4]
self.disk_3, self.disk_4, self.disk_5]
jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
mode, physical_disks, self.controllers_to_physical_disk_ids)
@ -1284,7 +1307,8 @@ class ClientRAIDManagementTestCase(base.BaseTest):
def test_check_disks_status_change_state(self, mock_requests):
raid_mgt = self.drac_client._raid_mgmt
mode = constants.RaidStatus.jbod
physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4]
physical_disks = [self.disk_1, self.disk_2, self.disk_3,
self.disk_4, self.disk_5]
jbod_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
mode, physical_disks, self.controllers_to_physical_disk_ids)
@ -1295,7 +1319,7 @@ class ClientRAIDManagementTestCase(base.BaseTest):
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
physical_disks = [disk_1_non_raid, disk_2_non_raid,
self.disk_3, self.disk_4]
self.disk_3, self.disk_4, self.disk_5]
raid_cntl_to_phys_disk_ids = raid_mgt._check_disks_status(
mode, physical_disks, self.controllers_to_physical_disk_ids)
raid_len = len(raid_cntl_to_phys_disk_ids['RAID.Integrated.1-1'])
@ -1305,7 +1329,8 @@ class ClientRAIDManagementTestCase(base.BaseTest):
mode = constants.RaidStatus.raid
disk_1_bad = self.disk_1._replace(raid_status='FAKE_STATUS')
disk_2_failed = self.disk_2._replace(raid_status='failed')
physical_disks = [disk_1_bad, disk_2_failed, self.disk_3, self.disk_4]
physical_disks = [disk_1_bad, disk_2_failed, self.disk_3,
self.disk_4, self.disk_5]
raid_mgt = self.drac_client._raid_mgmt
self.assertRaises(ValueError,
@ -1331,23 +1356,33 @@ class ClientRAIDManagementTestCase(base.BaseTest):
mock_convert_physical_disks,
wait_until_idrac_is_ready):
mode = constants.RaidStatus.jbod
mock_requests.post(
'https://1.2.3.4:443/wsman',
text=test_utils.RAIDEnumerations[uris.DCIM_PhysicalDiskView]['ok'])
mock_requests.post('https://1.2.3.4:443/wsman',
[{'text': test_utils.RAIDEnumerations[
uris.DCIM_PhysicalDiskView]['ok']},
{'text': test_utils.RAIDEnumerations[
uris.DCIM_ControllerView]['ok']}])
cvt_phys_disks_return_value = {'is_commit_required': True,
'is_reboot_required': constants.
RebootRequired.true}
h755_return_value = {'is_commit_required': False,
'is_reboot_required': constants.
RebootRequired.false}
mock_convert_physical_disks.return_value = cvt_phys_disks_return_value
expected_return_value = {'RAID.Integrated.1-1':
cvt_phys_disks_return_value,
'AHCI.Integrated.1-1':
cvt_phys_disks_return_value}
cvt_phys_disks_return_value,
'RAID.SL.8-1':
h755_return_value}
results = self.drac_client.change_physical_disk_state(
mode, self.controllers_to_physical_disk_ids)
self.assertEqual(results['conversion_results'],
expected_return_value)
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
'list_physical_disks', spec_set=True,
autospec=True)
@ -1357,38 +1392,55 @@ class ClientRAIDManagementTestCase(base.BaseTest):
def test_change_physical_disk_state_raid(
self, mock_requests,
mock_convert_physical_disks,
mock_list_physical_disks):
mock_list_physical_disks,
mock_wait_until_idrac_is_ready):
mode = constants.RaidStatus.raid
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
physical_disks = [disk_1_non_raid, disk_2_non_raid,
self.disk_3, self.disk_4]
self.disk_3, self.disk_4, self.disk_5]
mock_list_physical_disks.return_value = physical_disks
mock_requests.post(
'https://1.2.3.4:443/wsman',
text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
boss_return_value = {'is_commit_required': False,
'is_reboot_required':
constants.RebootRequired.false}
raid_return_value = {'is_commit_required': True,
'is_reboot_required':
constants.RebootRequired.true}
h755_return_value = {'is_commit_required': False,
'is_reboot_required':
constants.RebootRequired.false}
mock_convert_physical_disks.return_value = raid_return_value
results = self.drac_client.change_physical_disk_state(
mode, self.controllers_to_physical_disk_ids)
self.assertEqual(len(results['conversion_results']), 2)
self.assertEqual(len(results['conversion_results']), 3)
self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
boss_return_value)
self.assertEqual(results['conversion_results']['RAID.Integrated.1-1'],
raid_return_value)
self.assertEqual(results['conversion_results']['RAID.SL.8-1'],
h755_return_value)
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
'list_physical_disks', spec_set=True,
autospec=True)
def test_change_physical_disk_state_none(
self, mock_requests,
mock_list_physical_disks):
mock_list_physical_disks,
mock_wait_until_idrac_is_ready):
mode = constants.RaidStatus.raid
physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4]
physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4,
self.disk_5]
mock_list_physical_disks.return_value = physical_disks
mock_requests.post(
'https://1.2.3.4:443/wsman',
text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
expected_return_value = {'is_commit_required': False,
'is_reboot_required':
constants.RebootRequired.false}
@ -1398,7 +1450,12 @@ class ClientRAIDManagementTestCase(base.BaseTest):
expected_return_value)
self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
expected_return_value)
self.assertEqual(results['conversion_results']['RAID.SL.8-1'],
expected_return_value)
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
'list_physical_disks', spec_set=True,
autospec=True)
@ -1410,13 +1467,17 @@ class ClientRAIDManagementTestCase(base.BaseTest):
def test_change_physical_disk_state_not_supported(
self, mock_requests,
mock_convert_physical_disks,
mock_list_physical_disks):
mock_list_physical_disks,
mock_wait_until_idrac_is_ready):
mode = constants.RaidStatus.raid
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
physical_disks = [disk_1_non_raid, disk_2_non_raid,
self.disk_3, self.disk_4]
self.disk_3, self.disk_4, self.disk_5]
mock_list_physical_disks.return_value = physical_disks
mock_requests.post(
'https://1.2.3.4:443/wsman',
text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
expected_return_value = {'is_commit_required': False,
'is_reboot_required':
constants.RebootRequired.false}
@ -1426,7 +1487,12 @@ class ClientRAIDManagementTestCase(base.BaseTest):
expected_return_value)
self.assertEqual(results['conversion_results']['AHCI.Integrated.1-1'],
expected_return_value)
self.assertEqual(results['conversion_results']['RAID.SL.8-1'],
expected_return_value)
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
'list_physical_disks', spec_set=True,
autospec=True)
@ -1438,13 +1504,17 @@ class ClientRAIDManagementTestCase(base.BaseTest):
def test_change_physical_disk_state_raise_drac_operation_other(
self, mock_requests,
mock_convert_physical_disks,
mock_list_physical_disks):
mock_list_physical_disks,
mock_wait_until_idrac_is_ready):
mode = constants.RaidStatus.raid
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
physical_disks = [disk_1_non_raid, disk_2_non_raid,
self.disk_3, self.disk_4]
self.disk_3, self.disk_4, self.disk_5]
mock_list_physical_disks.return_value = physical_disks
mock_requests.post(
'https://1.2.3.4:443/wsman',
text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
self.assertRaisesRegexp(
exceptions.DRACOperationFailed,
"OTHER_MESSAGE",
@ -1452,6 +1522,9 @@ class ClientRAIDManagementTestCase(base.BaseTest):
mode,
self.controllers_to_physical_disk_ids)
@mock.patch.object(dracclient.client.WSManClient,
'wait_until_idrac_is_ready', spec_set=True,
autospec=True)
@mock.patch.object(dracclient.resources.raid.RAIDManagement,
'list_physical_disks', spec_set=True,
autospec=True)
@ -1462,13 +1535,17 @@ class ClientRAIDManagementTestCase(base.BaseTest):
def test_change_physical_disk_state_raise_other(
self, mock_requests,
mock_convert_physical_disks,
mock_list_physical_disks):
mock_list_physical_disks,
mock_wait_until_idrac_is_ready):
mode = constants.RaidStatus.raid
disk_1_non_raid = self.disk_1._replace(raid_status='non-RAID')
disk_2_non_raid = self.disk_2._replace(raid_status='non-RAID')
physical_disks = [disk_1_non_raid, disk_2_non_raid,
self.disk_3, self.disk_4]
self.disk_3, self.disk_4, self.disk_5]
mock_list_physical_disks.return_value = physical_disks
mock_requests.post(
'https://1.2.3.4:443/wsman',
text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
self.assertRaisesRegexp(
Exception,
"SOMETHING_BAD_HAPPENED",
@ -1494,16 +1571,23 @@ class ClientRAIDManagementTestCase(base.BaseTest):
'https://1.2.3.4:443/wsman',
text=test_utils.RAIDEnumerations[uris.DCIM_ControllerView]['ok'])
mode = constants.RaidStatus.jbod
physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4]
physical_disks = [self.disk_1, self.disk_2, self.disk_3, self.disk_4,
self.disk_5]
mock_list_physical_disks.return_value = physical_disks
cvt_phys_disks_return_value = {'is_commit_required': True,
'is_reboot_required': constants.
RebootRequired.true}
h755_return_value = {'is_commit_required': False,
'is_reboot_required':
constants.RebootRequired.false}
mock_convert_physical_disks.return_value = cvt_phys_disks_return_value
expected_return_value = {'RAID.Integrated.1-1':
cvt_phys_disks_return_value,
'AHCI.Integrated.1-1':
cvt_phys_disks_return_value}
cvt_phys_disks_return_value,
'RAID.SL.8-1':
h755_return_value}
results = self.drac_client.change_physical_disk_state(mode)
self.assertDictEqual(results['conversion_results'],
expected_return_value)

View File

@ -93,6 +93,55 @@
<n1:SupportRAID10UnevenSpans>0</n1:SupportRAID10UnevenSpans>
<n1:T10PICapability>0</n1:T10PICapability>
</n1:DCIM_ControllerView>
<n1:DCIM_ControllerView>
<n1:AlarmState>1</n1:AlarmState>
<n1:BootVirtualDiskFQDD xsi:nil="true"/>
<n1:Bus>1</n1:Bus>
<n1:CacheSizeInMB>8192</n1:CacheSizeInMB>
<n1:CachecadeCapability>0</n1:CachecadeCapability>
<n1:ConfigLockdownCapable>0</n1:ConfigLockdownCapable>
<n1:ConfigLockdownState>0</n1:ConfigLockdownState>
<n1:ConnectorCount>4</n1:ConnectorCount>
<n1:ControllerFirmwareVersion>52.13.2-3661</n1:ControllerFirmwareVersion>
<n1:Device>0</n1:Device>
<n1:DeviceCardDataBusWidth>Unknown</n1:DeviceCardDataBusWidth>
<n1:DeviceCardManufacturer>DELL</n1:DeviceCardManufacturer>
<n1:DeviceCardSlotLength>2</n1:DeviceCardSlotLength>
<n1:DeviceCardSlotType>Unknown</n1:DeviceCardSlotType>
<n1:DeviceDescription>RAID Controller in SL 8</n1:DeviceDescription>
<n1:DriverVersion xsi:nil="true"/>
<n1:EncryptionCapability>1</n1:EncryptionCapability>
<n1:EncryptionMode>0</n1:EncryptionMode>
<n1:FQDD>RAID.SL.8-1</n1:FQDD>
<n1:Function>0</n1:Function>
<n1:InstanceID>RAID.SL.8-1</n1:InstanceID>
<n1:KeyID xsi:nil="true"/>
<n1:LastSystemInventoryTime>20210831081620.000000+000</n1:LastSystemInventoryTime>
<n1:LastUpdateTime>20210831081620.000000+000</n1:LastUpdateTime>
<n1:MaxAvailablePCILinkSpeed xsi:nil="true"/>
<n1:MaxPossiblePCILinkSpeed xsi:nil="true"/>
<n1:PCIDeviceID>10E2</n1:PCIDeviceID>
<n1:PCISlot xsi:nil="true"/>
<n1:PCISubDeviceID>1AE1</n1:PCISubDeviceID>
<n1:PCISubVendorID>1028</n1:PCISubVendorID>
<n1:PCIVendorID>1000</n1:PCIVendorID>
<n1:PatrolReadState>1</n1:PatrolReadState>
<n1:PersistentHotspare>0</n1:PersistentHotspare>
<n1:PrimaryStatus>0</n1:PrimaryStatus>
<n1:ProductName>PERC H755 Front</n1:ProductName>
<n1:RealtimeCapability>0</n1:RealtimeCapability>
<n1:RollupStatus>0</n1:RollupStatus>
<n1:SASAddress>52CEA7F06A603500</n1:SASAddress>
<n1:SecurityStatus>1</n1:SecurityStatus>
<n1:SharedSlotAssignmentAllowed>0</n1:SharedSlotAssignmentAllowed>
<n1:SlicedVDCapability>1</n1:SlicedVDCapability>
<n1:SupportControllerBootMode>0</n1:SupportControllerBootMode>
<n1:SupportEnhancedAutoForeignImport>1</n1:SupportEnhancedAutoForeignImport>
<n1:SupportRAID10UnevenSpans>1</n1:SupportRAID10UnevenSpans>
<n1:T10PICapability>0</n1:T10PICapability>
<n1:UpdateLockdownCapable>0</n1:UpdateLockdownCapable>
<n1:UpdateLockdownState>0</n1:UpdateLockdownState>
</n1:DCIM_ControllerView>
</wsman:Items>
<wsen:EnumerationContext/>
<wsman:EndOfSequence/>

View File

@ -199,6 +199,43 @@
<n1:T10PICapability>0</n1:T10PICapability>
<n1:UsedSizeInBytes>0</n1:UsedSizeInBytes>
</n1:DCIM_PhysicalDiskView>
<n1:DCIM_PhysicalDiskView>
<n1:BlockSizeInBytes>512</n1:BlockSizeInBytes>
<n1:BusProtocol>6</n1:BusProtocol>
<n1:Connector>0</n1:Connector>
<n1:DeviceDescription>Disk 0 in Backplane 1 of Int RAID Controller 1</n1:DeviceDescription>
<n1:DriveFormFactor>2</n1:DriveFormFactor>
<n1:FQDD>Disk.Bay.0:Enclosure.Internal.0-1:RAID.SL.8-1</n1:FQDD>
<n1:FreeSizeInBytes>599550590976</n1:FreeSizeInBytes>
<n1:HotSpareStatus>0</n1:HotSpareStatus>
<n1:InstanceID>Disk.Bay.0:Enclosure.Internal.0-1:RAID.SL.8-1</n1:InstanceID>
<n1:LastSystemInventoryTime>20150226180025.000000+000</n1:LastSystemInventoryTime>
<n1:LastUpdateTime>20150226180025.000000+000</n1:LastUpdateTime>
<n1:Manufacturer>SEAGATE </n1:Manufacturer>
<n1:ManufacturingDay>2</n1:ManufacturingDay>
<n1:ManufacturingWeek>33</n1:ManufacturingWeek>
<n1:ManufacturingYear>2014</n1:ManufacturingYear>
<n1:MaxCapableSpeed>3</n1:MaxCapableSpeed>
<n1:MediaType>0</n1:MediaType>
<n1:Model>ST600MM0006 </n1:Model>
<n1:OperationName>None</n1:OperationName>
<n1:OperationPercentComplete>0</n1:OperationPercentComplete>
<n1:PPID>CN07YX587262248G01MHA02 </n1:PPID>
<n1:PredictiveFailureState>0</n1:PredictiveFailureState>
<n1:PrimaryStatus>1</n1:PrimaryStatus>
<n1:RaidStatus>1</n1:RaidStatus>
<n1:RemainingRatedWriteEndurance>255</n1:RemainingRatedWriteEndurance>
<n1:Revision>LS0A</n1:Revision>
<n1:RollupStatus>1</n1:RollupStatus>
<n1:SASAddress>5000C5007764FF6D</n1:SASAddress>
<n1:SecurityState>0</n1:SecurityState>
<n1:SerialNumber>S0M3EVL6 </n1:SerialNumber>
<n1:SizeInBytes>599550590976</n1:SizeInBytes>
<n1:Slot>0</n1:Slot>
<n1:SupportedEncryptionTypes>None</n1:SupportedEncryptionTypes>
<n1:T10PICapability>0</n1:T10PICapability>
<n1:UsedSizeInBytes>0</n1:UsedSizeInBytes>
</n1:DCIM_PhysicalDiskView>
<n2:DCIM_PCIeSSDView>
<n2:BusProtocol>7</n2:BusProtocol>
<n2:Bus>3E</n2:Bus>