Merge "Sort values of raid_config for 'share_physical_disks'"

This commit is contained in:
Jenkins
2016-06-25 08:51:52 +00:00
committed by Gerrit Code Review
3 changed files with 267 additions and 5 deletions

View File

@@ -113,6 +113,11 @@ def create_configuration(raid_config):
key=lambda x: x['size_gb']) +
[x for x in raid_config['logical_disks'] if x['size_gb'] == "MAX"])
if any(logical_disk['share_physical_disks']
for logical_disk in logical_disks_sorted
if 'share_physical_disks' in logical_disk):
logical_disks_sorted = _sort_shared_logical_disks(logical_disks_sorted)
# We figure out the new disk created by recording the wwns
# before and after the create, and then figuring out the
# newly found wwn from it.
@@ -168,6 +173,91 @@ def create_configuration(raid_config):
return raid_config
def _sort_shared_logical_disks(logical_disks):
"""Sort the logical disks based on the following conditions.
When the share_physical_disks is True make sure we create the volume
which needs more disks first. This avoids the situation of insufficient
disks for some logical volume request.
For example,
- two logical disk with number of disks - LD1(3), LD2(4)
- have 4 physical disks
In this case, if we consider LD1 first then LD2 will fail since not
enough disks available to create LD2. So follow a order for allocation
when share_physical_disks is True.
Also RAID1 can share only when there is logical volume with only 2 disks.
So make sure we create RAID 1 first when share_physical_disks is True.
And RAID 1+0 can share only when the logical volume with even number of
disks.
:param logical_disks: 'logical_disks' to be sorted for shared logical
disks.
:returns: the logical disks sorted based the above conditions.
"""
is_shared = (lambda x: True if ('share_physical_disks' in x and
x['share_physical_disks']) else False)
num_of_disks = (lambda x: x['number_of_physical_disks']
if 'number_of_physical_disks' in x else
constants.RAID_LEVEL_MIN_DISKS[x['raid_level']])
# Separate logical disks based on share_physical_disks value.
# 'logical_disks_shared' when share_physical_disks is True and
# 'logical_disks_nonshared' when share_physical_disks is False
logical_disks_shared = []
logical_disks_nonshared = []
for x in logical_disks:
target = (logical_disks_shared if is_shared(x)
else logical_disks_nonshared)
target.append(x)
# Separete logical disks with raid 1 from the 'logical_disks_shared' into
# 'logical_disks_shared_raid1' and remaining as
# 'logical_disks_shared_excl_raid1'.
logical_disks_shared_raid1 = []
logical_disks_shared_excl_raid1 = []
for x in logical_disks_shared:
target = (logical_disks_shared_raid1 if x['raid_level'] == '1'
else logical_disks_shared_excl_raid1)
target.append(x)
# Sort the 'logical_disks_shared' in reverse order based on
# 'number_of_physical_disks' attribute, if provided, otherwise minimum
# disks required to create the logical volume.
logical_disks_shared = sorted(logical_disks_shared_excl_raid1,
reverse=True,
key=num_of_disks)
# Move RAID 1+0 to first in 'logical_disks_shared' when number of physical
# disks needed to create logical volume cannot be shared with odd number of
# disks and disks higher than that of RAID 1+0.
check = True
for x in logical_disks_shared:
if x['raid_level'] == "1+0":
x_num = num_of_disks(x)
for y in logical_disks_shared:
if y['raid_level'] != "1+0":
y_num = num_of_disks(y)
if x_num < y_num:
check = (True if y_num % 2 == 0 else False)
if check:
break
if not check:
logical_disks_shared.remove(x)
logical_disks_shared.insert(0, x)
check = True
# Final 'logical_disks_sorted' list should have non shared logical disks
# first, followed by shared logical disks with RAID 1, and finally by the
# shared logical disks sorted based on number of disks and RAID 1+0
# condition.
logical_disks_sorted = (logical_disks_nonshared +
logical_disks_shared_raid1 +
logical_disks_shared)
return logical_disks_sorted
def delete_configuration():
"""Delete a RAID configuration on this server.

View File

@@ -1497,7 +1497,7 @@ Available options are:
'''
HPSSA_NO_DRIVES_2_PHYSICAL_DISKS = '''
HPSSA_NO_DRIVES_3_PHYSICAL_DISKS = '''
Smart Array P822 in Slot 2
Bus Interface: PCI
Slot: 2
@@ -1575,6 +1575,28 @@ Smart Array P822 in Slot 2
Carrier Application Version: 11
Carrier Bootloader Version: 6
physicaldrive 5I:1:3
Port: 5I
Box: 1
Bay: 1
Status: OK
Drive Type: Unassigned Drive
Interface Type: SAS
Size: 700 GB
Native Block Size: 512
Rotational Speed: 15000
Firmware Revision: HPD6
Serial Number: 6SL7G55D0000N4173JLT
Model: HP EF0600FARNA
Current Temperature (C): 35
Maximum Temperature (C): 43
PHY Count: 2
PHY Transfer Rate: 6.0Gbps, Unknown
Drive Authentication Status: OK
Carrier Application Version: 11
Carrier Bootloader Version: 6
SEP (Vendor ID PMCSIERA, Model SRCv24x6G) 380
Device Number: 380
Firmware Version: RevB
@@ -1700,6 +1722,29 @@ Smart Array P822 in Slot 2
Carrier Application Version: 11
Carrier Bootloader Version: 6
unassigned
physicaldrive 5I:1:3
Port: 5I
Box: 1
Bay: 1
Status: OK
Drive Type: Unassigned Drive
Interface Type: SAS
Size: 500 GB
Native Block Size: 512
Rotational Speed: 15000
Firmware Revision: HPD6
Serial Number: 6SL7G55D0000N4173JLT
Model: HP EF0600FARNA
Current Temperature (C): 35
Maximum Temperature (C): 43
PHY Count: 2
PHY Transfer Rate: 6.0Gbps, Unknown
Drive Authentication Status: OK
Carrier Application Version: 11
Carrier Bootloader Version: 6
'''
DRIVE_2_RAID_1_OKAY_TO_SHARE = '''
@@ -1850,6 +1895,30 @@ Smart Array P822 in Slot 2
Carrier Application Version: 11
Carrier Bootloader Version: 6
unassigned
physicaldrive 5I:1:3
Port: 5I
Box: 1
Bay: 2
Status: OK
Drive Type: Data Drive
Interface Type: SAS
Size: 600 GB
Native Block Size: 512
Rotational Speed: 15000
Firmware Revision: HPD6
Serial Number: 6SL7H2DM0000B41800Y0
Model: HP EF0600FARNA
Current Temperature (C): 37
Maximum Temperature (C): 44
PHY Count: 2
PHY Transfer Rate: 6.0Gbps, Unknown
Drive Authentication Status: OK
Carrier Application Version: 11
Carrier Bootloader Version: 6
SEP (Vendor ID PMCSIERA, Model SRCv24x6G) 380
Device Number: 380
Firmware Version: RevB

View File

@@ -170,7 +170,7 @@ class ManagerTestCases(testtools.TestCase):
@mock.patch.object(objects.Controller, 'execute_cmd')
def test_create_configuration_share_physical_disks(
self, controller_exec_cmd_mock, get_all_details_mock):
no_drives = raid_constants.HPSSA_NO_DRIVES_2_PHYSICAL_DISKS
no_drives = raid_constants.HPSSA_NO_DRIVES_3_PHYSICAL_DISKS
one_drive = raid_constants.ONE_DRIVE_RAID_1
two_drives = raid_constants.TWO_DRIVES_50GB_RAID1
get_all_details_mock.side_effect = [no_drives, one_drive, two_drives]
@@ -180,7 +180,8 @@ class ManagerTestCases(testtools.TestCase):
(None, None)]
raid_info = {'logical_disks': [{'size_gb': 50,
'share_physical_disks': True,
'raid_level': '1',
'number_of_physical_disks': 2,
'raid_level': '0',
'disk_type': 'hdd'},
{'size_gb': 50,
'share_physical_disks': True,
@@ -199,10 +200,44 @@ class ManagerTestCases(testtools.TestCase):
'create', 'type=logicaldrive', 'drives=5I:1:1,5I:1:2',
'raid=1', 'size=51200', process_input='y')
controller_exec_cmd_mock.assert_any_call(
'array', 'A', 'create', 'type=logicaldrive', 'raid=1', 'size=?',
'array', 'A', 'create', 'type=logicaldrive', 'raid=0', 'size=?',
dont_transform_to_hpssa_exception=True)
controller_exec_cmd_mock.assert_any_call(
'array', 'A', 'create', 'type=logicaldrive', 'raid=1',
'array', 'A', 'create', 'type=logicaldrive', 'raid=0',
'size=51200', process_input='y')
@mock.patch.object(objects.Controller, 'execute_cmd')
def test_create_configuration_share_nonshare_physical_disks(
self, controller_exec_cmd_mock, get_all_details_mock):
no_drives = raid_constants.HPSSA_NO_DRIVES_3_PHYSICAL_DISKS
one_drive = raid_constants.ONE_DRIVE_RAID_1
two_drives = raid_constants.TWO_DRIVES_50GB_RAID1
get_all_details_mock.side_effect = [no_drives, one_drive, two_drives]
controller_exec_cmd_mock.side_effect = [
(None, None),
(raid_constants.DRIVE_2_RAID_1_OKAY_TO_SHARE, None),
(None, None)]
raid_info = {'logical_disks': [{'size_gb': 50,
'raid_level': '1',
'disk_type': 'hdd'},
{'size_gb': 50,
'share_physical_disks': True,
'raid_level': '0',
'disk_type': 'hdd'}]}
raid_info = manager.create_configuration(raid_info)
ld1 = raid_info['logical_disks'][0]
ld2 = raid_info['logical_disks'][1]
self.assertEqual('Smart Array P822 in Slot 2', ld1['controller'])
self.assertEqual('Smart Array P822 in Slot 2', ld2['controller'])
self.assertEqual(sorted(['5I:1:1', '5I:1:2']),
sorted(ld1['physical_disks']))
self.assertEqual(sorted(['5I:1:1', '5I:1:2']),
sorted(ld2['physical_disks']))
controller_exec_cmd_mock.assert_any_call(
'create', 'type=logicaldrive', 'drives=5I:1:1,5I:1:2',
'raid=1', 'size=51200', process_input='y')
controller_exec_cmd_mock.assert_any_call(
'create', 'type=logicaldrive', 'drives=5I:1:3', 'raid=0',
'size=51200', process_input='y')
@mock.patch.object(objects.Controller, 'execute_cmd')
@@ -234,6 +269,74 @@ class ManagerTestCases(testtools.TestCase):
'create', 'type=logicaldrive', 'drives=5I:1:3,5I:1:4,6I:1:5',
'raid=5', process_input='y')
def test__sort_shared_logical_disks(self, get_all_details_mock):
logical_disk_sorted_expected = [
{'size_gb': 500, 'disk_type': 'hdd', 'raid_level': '1'},
{'share_physical_disks': True, 'size_gb': 450, 'disk_type': 'hdd',
'number_of_physical_disks': 6, 'raid_level': '0'},
{'share_physical_disks': True, 'size_gb': 200, 'disk_type': 'hdd',
'raid_level': '1+0'},
{'share_physical_disks': True, 'size_gb': 200, 'disk_type': 'hdd',
'raid_level': '0'},
{'share_physical_disks': True, 'size_gb': 100, 'disk_type': 'hdd',
'raid_level': '0'}]
logical_disks = [{'size_gb': 500,
'disk_type': 'hdd',
'raid_level': '1'},
{'share_physical_disks': True,
'size_gb': 450,
'disk_type': 'hdd',
'number_of_physical_disks': 6,
'raid_level': '0'},
{'share_physical_disks': True,
'size_gb': 200,
'disk_type': 'hdd',
'raid_level': '1+0'},
{'share_physical_disks': True,
'size_gb': 200,
'disk_type': 'hdd',
'raid_level': '0'},
{'share_physical_disks': True,
'size_gb': 100,
'disk_type': 'hdd',
'raid_level': '0'}]
logical_disks_sorted = manager._sort_shared_logical_disks(
logical_disks)
self.assertEqual(logical_disks_sorted, logical_disk_sorted_expected)
def test__sort_shared_logical_disks_raid10(self, get_all_details_mock):
logical_disk_sorted_expected = [
{'size_gb': 600, 'disk_type': 'hdd', 'raid_level': '1'},
{'share_physical_disks': False, 'size_gb': 400, 'disk_type': 'hdd',
'raid_level': '1+0'},
{'share_physical_disks': False, 'size_gb': 100, 'disk_type': 'hdd',
'raid_level': '5'},
{'share_physical_disks': True, 'size_gb': 550, 'disk_type': 'hdd',
'raid_level': '1'},
{'share_physical_disks': True, 'size_gb': 200, 'disk_type': 'hdd',
'raid_level': '1+0'},
{'share_physical_disks': True, 'size_gb': 450, 'disk_type': 'hdd',
'number_of_physical_disks': 5, 'raid_level': '0'},
{'share_physical_disks': True, 'size_gb': 300, 'disk_type': 'hdd',
'raid_level': '5'}]
logical_disks = [
{'size_gb': 600, 'disk_type': 'hdd', 'raid_level': '1'},
{'share_physical_disks': True, 'size_gb': 550, 'disk_type': 'hdd',
'raid_level': '1'},
{'share_physical_disks': True, 'size_gb': 450, 'disk_type': 'hdd',
'number_of_physical_disks': 5, 'raid_level': '0'},
{'share_physical_disks': False, 'size_gb': 400, 'disk_type': 'hdd',
'raid_level': '1+0'},
{'share_physical_disks': True, 'size_gb': 300, 'disk_type': 'hdd',
'raid_level': '5'},
{'share_physical_disks': True, 'size_gb': 200, 'disk_type': 'hdd',
'raid_level': '1+0'},
{'share_physical_disks': False, 'size_gb': 100, 'disk_type': 'hdd',
'raid_level': '5'}]
logical_disks_sorted = manager._sort_shared_logical_disks(
logical_disks)
self.assertEqual(logical_disks_sorted, logical_disk_sorted_expected)
@mock.patch.object(manager, 'get_configuration')
@mock.patch.object(objects.Controller, 'execute_cmd')
def test_delete_configuration(self, controller_exec_cmd_mock,