PowerMax Driver - Port Group & Port Load Balancing

This submission implements the new PowerMax for Cinder driver
feature allowing mount paths to be determined by load on a
given Port Group or Port.

Change-Id: Ie15f6c541c5b091bc16b42c116c1ff9c573ffe8a
Implements: blueprint powermax-port-load-balance
This commit is contained in:
Michael McAleer 2020-07-16 14:17:08 +01:00
parent d592b2ad0d
commit 55042c357e
15 changed files with 1599 additions and 153 deletions

View File

@ -14,6 +14,7 @@
# under the License.
from copy import deepcopy
import random
import six
@ -97,10 +98,13 @@ class PowerMaxData(object):
wwpn1 = '123456789012345'
wwpn2 = '123456789054321'
wwnn1 = '223456789012345'
initiator = 'iqn.1993-08.org.debian: 01: 222'
ip, ip2 = u'123.456.7.8', u'123.456.7.9'
iqn = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001,t,0x0001'
iqn2 = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000002,t,0x0001'
wwnn2 = '223456789012346'
initiator = 'iqn.1993-08.org.debian:01:222'
iscsi_dir = 'SE-4E'
iscsi_port = '1'
ip, ip2 = '123.456.7.8', '123.456.7.9'
iqn = 'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001'
iqn2 = 'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000002'
connector = {'ip': ip,
'initiator': initiator,
'wwpns': [wwpn1, wwpn2],
@ -111,6 +115,7 @@ class PowerMaxData(object):
end_point_map = {connector['wwpns'][0]: [wwnn1],
connector['wwpns'][1]: [wwnn1]}
target_wwns = [wwnn1]
target_wwns_multi = [wwnn1, wwnn2]
zoning_mappings = {
'array': u'000197800123',
'init_targ_map': end_point_map,
@ -132,16 +137,22 @@ class PowerMaxData(object):
}
device_map[fabric_name] = fabric_map
iscsi_dir_port = '%(dir)s:%(port)s' % {'dir': iscsi_dir,
'port': iscsi_port}
iscsi_dir_virtual_port = '%(dir)s:%(port)s' % {'dir': iscsi_dir,
'port': '000'}
iscsi_device_info = {'maskingview': masking_view_name_i,
'ip_and_iqn': [{'ip': ip,
'iqn': initiator}],
'iqn': initiator,
'physical_port': iscsi_dir_port}],
'is_multipath': True,
'array': array,
'controller': {'host': '10.00.00.00'},
'hostlunid': 3,
'device_id': device_id}
iscsi_device_info_metro = deepcopy(iscsi_device_info)
iscsi_device_info_metro['metro_ip_and_iqn'] = [{'ip': ip2, 'iqn': iqn2}]
iscsi_device_info_metro['metro_ip_and_iqn'] = [{
'ip': ip2, 'iqn': iqn2, 'physical_port': iscsi_dir_port}]
iscsi_device_info_metro['metro_hostlunid'] = 2
fc_device_info = {'maskingview': masking_view_name_f,
@ -149,6 +160,11 @@ class PowerMaxData(object):
'controller': {'host': '10.00.00.00'},
'hostlunid': 3}
director_port_keys_empty = {'symmetrixPortKey': []}
director_port_keys_multiple = {'symmetrixPortKey': [
{'directorId': 'SE-1E', 'portId': '1'},
{'directorId': 'SE-1E', 'portId': '2'}]}
# snapshot info
snapshot_id = '390eeb4d-0f56-4a02-ba14-167167967014'
snapshot_display_id = 'my_snap'
@ -1531,3 +1547,66 @@ class PowerMaxData(object):
'description': vol_create_desc1},
{'execution_order': 2,
'description': vol_create_desc2}]
# performance
f_date_a = 1593432600000
f_date_b = 1594136400000
l_date = 1594730100000
perf_pb_metric = 'PercentBusy'
perf_df_avg = 'Average'
perf_port_groups = ['port_group_a', 'port_group_b', 'port_group_c']
perf_ports = ['SE-1E:1', 'SE-1E:2', 'SE-1E:3']
performance_config = {
'load_balance': True, 'load_balance_rt': True,
'perf_registered': True, 'rt_registered': True,
'collection_interval': 5, 'data_format': 'Average',
'look_back': 60, 'look_back_rt': 10,
'port_group_metric': 'PercentBusy', 'port_metric': 'PercentBusy'}
array_registration = {"registrationDetailsInfo": [
{"symmetrixId": array, "realtime": True, "message": "Success",
"collectionintervalmins": 5, "diagnostic": True}]}
array_keys = {"arrayInfo": [
{"symmetrixId": array,
"firstAvailableDate": f_date_a,
"lastAvailableDate": l_date},
{"symmetrixId": array_herc,
"firstAvailableDate": f_date_a,
"lastAvailableDate": l_date},
{"symmetrixId": remote_array,
"firstAvailableDate": f_date_b,
"lastAvailableDate": l_date}]}
dummy_performance_data = {
"expirationTime": 1594731525645,
"count": 10,
"maxPageSize": 1000,
"id": "3b757302-6e4a-4dbe-887d-e42aed7f5944_0",
"resultList": {
"result": [
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593432600000},
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593432900000},
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593433200000},
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593433500000},
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593433800000},
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593434100000},
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593434400000},
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593434700000},
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593435000000},
{"PercentBusy": random.uniform(0.0, 100.0),
"timestamp": 1593435300000}],
"from": 1,
"to": 10
}
}

View File

@ -121,13 +121,24 @@ class FakeRequestsSession(object):
elif 'system' in url:
if 'director' in url:
return_object = self._system_port(url)
url_split = url.split('/')
if 'port' in url_split[-1]:
return_object = self._system_port_list(url)
elif url_split[-2] == 'port':
return_object = self._system_port_detail(url)
else:
return_object = self._system(url)
elif 'headroom' in url:
return_object = self.data.headroom
elif 'performance' in url:
if 'Array' in url:
if 'registrationdetails' in url:
return_object = self._performance_registration(url)
if 'keys' in url:
return_object = self.data.array_keys
return status_code, return_object
def _sloprovisioning_volume(self, url, params):
@ -169,7 +180,7 @@ class FakeRequestsSession(object):
break
return return_object
def _system_port(self, url):
def _system_port_detail(self, url):
return_object = None
for port in self.data.port_list:
if port['symmetrixPort']['symmetrixPortKey']['directorId'] in url:
@ -177,6 +188,12 @@ class FakeRequestsSession(object):
break
return return_object
@staticmethod
def _system_port_list(url):
url_split = url.split('/')
return {'symmetrixPortKey': [{'directorId': url_split[-2],
'portId': '1'}]}
def _sloprovisioning_ig(self, url):
return_object = None
for ig in self.data.inititiatorgroup:
@ -241,12 +258,33 @@ class FakeRequestsSession(object):
break
return return_object
@staticmethod
def _performance_registration(url):
url_split = url.split('/')
array_id = url_split[-1]
return {"registrationDetailsInfo": [
{"symmetrixId": array_id, "realtime": True, "message": "Success",
"collectionintervalmins": 5, "diagnostic": True}]}
def _post_or_put(self, url, payload):
return_object = self.data.job_list[0]
status_code = 201
if self.data.failed_resource in url:
if 'performance' in url:
if 'PortGroup' in url:
if 'metrics' in url:
return 200, self.data.dummy_performance_data
elif 'FEPort' in url:
if 'metrics' in url:
return 200, self.data.dummy_performance_data
elif 'realtime' in url:
if 'metrics' in url:
return 200, self.data.dummy_performance_data
elif self.data.failed_resource in url:
status_code = 500
return_object = self.data.job_list[2]
elif payload:
payload = ast.literal_eval(payload)
if self.data.failed_resource in payload.values():
@ -254,6 +292,7 @@ class FakeRequestsSession(object):
return_object = self.data.job_list[2]
if payload.get('executionOption'):
status_code = 202
return status_code, return_object
def _delete(self, url):
@ -287,48 +326,90 @@ class FakeConfiguration(object):
if replication_device:
self.replication_device = replication_device
for key, value in kwargs.items():
if key == 'san_login':
self.san_login = value
elif key == 'san_password':
self.san_password = value
elif key == 'san_ip':
self.san_ip = value
elif key == 'san_api_port':
self.san_api_port = value
elif key == 'vmax_srp':
self.vmax_srp = value
elif key == 'vmax_service_level':
self.vmax_service_level = value
elif key == 'vmax_workload':
self.vmax_workload = value
elif key == 'vmax_port_groups':
self.vmax_port_groups = value
elif key == 'vmax_array':
self.vmax_array = value
elif key == 'use_chap_auth':
self.use_chap_auth = value
elif key == 'chap_username':
self.chap_username = value
elif key == 'chap_password':
self.chap_password = value
elif key == 'driver_ssl_cert_verify':
self.driver_ssl_cert_verify = value
elif key == 'driver_ssl_cert_path':
self.driver_ssl_cert_path = value
elif key == 'u4p_failover_target':
self.u4p_failover_target = value
elif key == 'u4p_failover_backoff_factor':
self.u4p_failover_backoff_factor = value
elif key == 'u4p_failover_retries':
self.u4p_failover_retries = value
elif key == 'u4p_failover_timeout':
self.u4p_failover_timeout = value
elif key == 'u4p_primary':
self.u4p_primary = value
elif key == 'powermax_short_host_name_template':
self.powermax_short_host_name_template = value
elif key == 'powermax_port_group_name_template':
self.powermax_port_group_name_template = value
if 'san_' in key:
self.set_san_config_options(key, value)
elif 'vmax_' in key:
self.set_vmax_config_options(key, value)
elif 'chap_' in key:
self.set_chap_config_options(key, value)
elif 'driver_ssl_cert' in key:
self.set_ssl_cert_config_options(key, value)
elif 'u4p_' in key:
self.set_u4p_failover_config_options(key, value)
elif '_name_template' in key:
self.set_host_name_template_config_options(key, value)
elif 'load_' in key:
self.set_performance_config_options(key, value)
def set_san_config_options(self, key, value):
if key == 'san_login':
self.san_login = value
elif key == 'san_password':
self.san_password = value
elif key == 'san_ip':
self.san_ip = value
elif key == 'san_api_port':
self.san_api_port = value
def set_vmax_config_options(self, key, value):
if key == 'vmax_srp':
self.vmax_srp = value
elif key == 'vmax_service_level':
self.vmax_service_level = value
elif key == 'vmax_workload':
self.vmax_workload = value
elif key == 'vmax_port_groups':
self.vmax_port_groups = value
elif key == 'vmax_array':
self.vmax_array = value
def set_chap_config_options(self, key, value):
if key == 'use_chap_auth':
self.use_chap_auth = value
elif key == 'chap_username':
self.chap_username = value
elif key == 'chap_password':
self.chap_password = value
def set_ssl_cert_config_options(self, key, value):
if key == 'driver_ssl_cert_verify':
self.driver_ssl_cert_verify = value
elif key == 'driver_ssl_cert_path':
self.driver_ssl_cert_path = value
def set_u4p_failover_config_options(self, key, value):
if key == 'u4p_failover_target':
self.u4p_failover_target = value
elif key == 'u4p_failover_backoff_factor':
self.u4p_failover_backoff_factor = value
elif key == 'u4p_failover_retries':
self.u4p_failover_retries = value
elif key == 'u4p_failover_timeout':
self.u4p_failover_timeout = value
elif key == 'u4p_primary':
self.u4p_primary = value
def set_host_name_template_config_options(self, key, value):
if key == 'powermax_short_host_name_template':
self.powermax_short_host_name_template = value
elif key == 'powermax_port_group_name_template':
self.powermax_port_group_name_template = value
def set_performance_config_options(self, key, value):
if key == 'load_balance':
self.load_balance = value
elif key == 'load_balance_real_time':
self.load_balance_real_time = value
elif key == 'load_data_format':
self.load_data_format = value
elif key == 'load_look_back':
self.load_look_back = value
elif key == 'load_look_back_real_time':
self.load_look_back_real_time = value
elif key == 'port_group_load_metric':
self.port_group_load_metric = value
elif key == 'port_load_metric':
self.port_load_metric = value
def safe_get(self, key):
try:

View File

@ -584,6 +584,15 @@ class PowerMaxCommonTest(test.TestCase):
device_info_dict = self.common.initialize_connection(volume, connector)
self.assertEqual(ref_dict, device_info_dict)
def test_initialize_connection_setup_init_conn(self):
volume = self.data.test_volume
connector = self.data.connector
with mock.patch.object(
self.common, '_initial_setup',
side_effect=self.common._initial_setup) as mck_setup:
self.common.initialize_connection(volume, connector)
mck_setup.assert_called_once_with(volume, init_conn=True)
def test_initialize_connection_already_mapped_next_gen(self):
with mock.patch.object(self.rest, 'is_next_gen_array',
return_value=True):
@ -984,6 +993,18 @@ class PowerMaxCommonTest(test.TestCase):
self.assertRaises(exception.VolumeBackendAPIException,
self.common._initial_setup, volume)
def test_initial_setup_success_specs_init_conn_call(self):
volume = self.data.test_volume
array_info = self.common.get_attributes_from_cinder_config()
extra_specs, __ = self.common._set_config_file_and_get_extra_specs(
volume)
with mock.patch.object(
self.common, '_set_vmax_extra_specs',
side_effect=self.common._set_vmax_extra_specs) as mck_specs:
self.common._initial_setup(volume, init_conn=True)
mck_specs.assert_called_once_with(
extra_specs, array_info, True)
@mock.patch.object(rest.PowerMaxRest, 'get_rdf_pair_volume',
return_value=tpd.PowerMaxData.rdf_group_vol_details)
def test_populate_masking_dict(self, mock_tgt):
@ -1488,6 +1509,17 @@ class PowerMaxCommonTest(test.TestCase):
self.common._set_vmax_extra_specs,
self.data.vol_type_extra_specs_tags_bad, srp_record)
def test_set_vmax_extra_specs_pg_specs_init_conn(self):
pool_record = self.common.get_attributes_from_cinder_config()
with mock.patch.object(
self.common, '_select_port_group_for_extra_specs',
side_effect=(
self.common._select_port_group_for_extra_specs)) as mck_s:
self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs, pool_record, init_conn=True)
mck_s.assert_called_once_with(
self.data.vol_type_extra_specs, pool_record, True)
def test_delete_volume_from_srp_success(self):
array = self.data.array
device_id = self.data.device_id
@ -1611,18 +1643,25 @@ class PowerMaxCommonTest(test.TestCase):
mock_get.assert_called_once_with(
array, portgroup_name, initiator_group_name)
def test_get_ip_and_iqn(self):
def test_get_iscsi_ip_iqn_port(self):
phys_port = '%(dir)s:%(port)s' % {'dir': self.data.iscsi_dir,
'port': self.data.iscsi_port}
ref_ip_iqn = [{'iqn': self.data.initiator,
'ip': self.data.ip}]
'ip': self.data.ip,
'physical_port': phys_port}]
director = self.data.portgroup[1]['symmetrixPortKey'][0]['directorId']
port = self.data.portgroup[1]['symmetrixPortKey'][0]['portId']
dirport = "%s:%s" % (director, port)
ip_iqn_list = self.common._get_ip_and_iqn(self.data.array, dirport)
ip_iqn_list = self.common._get_iscsi_ip_iqn_port(self.data.array,
dirport)
self.assertEqual(ref_ip_iqn, ip_iqn_list)
def test_find_ip_and_iqns(self):
ref_ip_iqn = [{'iqn': self.data.initiator,
'ip': self.data.ip}]
'ip': self.data.ip,
'physical_port': self.data.iscsi_dir_port}]
ip_iqn_list = self.common._find_ip_and_iqns(
self.data.array, self.data.port_group_name_i)
self.assertEqual(ref_ip_iqn, ip_iqn_list)
@ -2390,7 +2429,7 @@ class PowerMaxCommonTest(test.TestCase):
{'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False,
'SerialNumber': self.data.array, 'srpName': 'SRP_1',
'PortGroup': self.data.port_group_name_i})
'PortGroup': [self.data.port_group_name_i]})
old_conf = tpfo.FakeConfiguration(None, 'CommonTests', 1, 1)
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
@ -2408,7 +2447,7 @@ class PowerMaxCommonTest(test.TestCase):
{'RestServerIp': '1.1.1.1', 'RestServerPort': 3448,
'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False,
'SerialNumber': self.data.array, 'srpName': 'SRP_1',
'PortGroup': self.data.port_group_name_i})
'PortGroup': [self.data.port_group_name_i]})
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='smc',
@ -2422,7 +2461,7 @@ class PowerMaxCommonTest(test.TestCase):
{'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False,
'SerialNumber': self.data.array, 'srpName': 'SRP_1',
'PortGroup': self.data.port_group_name_i})
'PortGroup': [self.data.port_group_name_i]})
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='smc',
@ -2715,7 +2754,7 @@ class PowerMaxCommonTest(test.TestCase):
'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False,
'SerialNumber': '000197800123', 'srpName': 'SRP_1',
'PortGroup': 'OS-fibre-PG'}
'PortGroup': ['OS-fibre-PG']}
self.common.configuration.vmax_service_level = None
self.common.configuration.vmax_workload = 'DSS'
@ -3570,3 +3609,72 @@ class PowerMaxCommonTest(test.TestCase):
self.data.test_group_1, self.common.interval,
self.common.retries)
mock_array.assert_called_once()
def test_get_performance_config(self):
ref_cinder_conf = tpfo.FakeConfiguration(
None, 'ProvisionTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='smc',
san_api_port=8443, vmax_port_groups=[self.data.port_group_name_f],
load_balance=True, load_balance_real_time=True,
load_data_format='avg', load_look_back=60,
load_look_back_real_time=10, port_group_load_metric='PercentBusy',
port_load_metric='PercentBusy')
ref_perf_conf = self.data.performance_config
volume_utils.get_max_over_subscription_ratio = mock.Mock()
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=ref_cinder_conf)
self.assertEqual(ref_perf_conf, driver.common.performance.config)
def test_select_port_group_for_extra_specs_volume_type(self):
"""Test _select_port_group_for_extra_specs PG in volume-type."""
extra_specs = {utils.PORTGROUPNAME: self.data.port_group_name_i}
pool_record = {}
port_group = self.common._select_port_group_for_extra_specs(
extra_specs, pool_record)
self.assertEqual(self.data.port_group_name_i, port_group)
def test_select_port_group_for_extra_specs_cinder_conf_single(self):
"""Test _select_port_group_for_extra_specs single PG in cinder conf."""
extra_specs = {}
pool_record = {utils.PORT_GROUP: [self.data.port_group_name_i]}
port_group = self.common._select_port_group_for_extra_specs(
extra_specs, pool_record)
self.assertEqual(self.data.port_group_name_i, port_group)
def test_select_port_group_for_extra_specs_cinder_conf_multi(self):
"""Test _select_port_group_for_extra_specs multi PG in cinder conf.
Random selection is used, no performance configuration supplied.
"""
extra_specs = {}
pool_record = {utils.PORT_GROUP: self.data.perf_port_groups}
port_group = self.common._select_port_group_for_extra_specs(
extra_specs, pool_record)
self.assertIn(port_group, self.data.perf_port_groups)
def test_select_port_group_for_extra_specs_load_balanced(self):
"""Test _select_port_group_for_extra_specs multi PG in cinder conf.
Load balanced selection is used, performance configuration supplied.
"""
extra_specs = {utils.ARRAY: self.data.array}
pool_record = {utils.PORT_GROUP: self.data.perf_port_groups}
self.common.performance.config = self.data.performance_config
with mock.patch.object(
self.common.performance, 'process_port_group_load',
side_effect=(
self.common.performance.process_port_group_load)) as (
mck_process):
port_group = self.common._select_port_group_for_extra_specs(
extra_specs, pool_record, init_conn=True)
mck_process.assert_called_once_with(
self.data.array, self.data.perf_port_groups)
self.assertIn(port_group, self.data.perf_port_groups)
def test_select_port_group_for_extra_specs_exception(self):
"""Test _select_port_group_for_extra_specs exception."""
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._select_port_group_for_extra_specs, {}, {})

View File

@ -15,6 +15,7 @@
from unittest import mock
from cinder import exception
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
powermax_data as tpd)
@ -112,7 +113,8 @@ class PowerMaxFCTest(test.TestCase):
self.data.connector)
self.assertEqual(ref_data, data)
mock_build.assert_called_once_with(
self.data.test_volume, self.data.connector)
self.data.test_volume, self.data.connector,
self.data.fc_device_info)
def test_terminate_connection(self):
with mock.patch.object(
@ -187,7 +189,7 @@ class PowerMaxFCTest(test.TestCase):
data = self.driver._cleanup_zones(self.data.zoning_mappings)
self.assertEqual(ref_data, data)
def test_build_initiator_target_map(self):
def test_build_initiator_target_map_default(self):
ref_target_map = {'123456789012345': ['543210987654321'],
'123456789054321': ['123450987654321']}
with mock.patch.object(fczm_utils, 'create_lookup_service',
@ -200,6 +202,39 @@ class PowerMaxFCTest(test.TestCase):
self.data.test_volume, self.data.connector)
self.assertEqual(ref_target_map, target_map)
def test_build_initiator_target_map_load_balanced(self):
init_wwns = self.data.connector.get('wwpns')
init_a, init_b = init_wwns[0], init_wwns[1]
self.driver.performance.config = self.data.performance_config
with mock.patch.object(
self.common, 'get_target_wwns_from_masking_view',
return_value=(self.data.target_wwns_multi, [])):
targets, target_map = self.driver._build_initiator_target_map(
self.data.test_volume, self.data.connector,
device_info=self.data.iscsi_device_info)
self.assertEqual(1, len(target_map.get(init_a)))
self.assertEqual(1, len(target_map.get(init_b)))
self.assertTrue(
len(target_map.get(init_a)) < len(self.data.target_wwns_multi))
self.assertTrue(
len(target_map.get(init_b)) < len(self.data.target_wwns_multi))
def test_build_initiator_target_map_load_balanced_exception(self):
ref_target_map = {'123456789012345': self.data.target_wwns_multi,
'123456789054321': self.data.target_wwns_multi}
self.driver.performance.config = self.data.performance_config
with mock.patch.object(
self.common, 'get_target_wwns_from_masking_view',
return_value=(self.data.target_wwns_multi, [])) as mck_wwns:
with mock.patch.object(
self.driver.performance, 'process_port_load',
side_effect=exception.VolumeBackendAPIException('')):
targets, target_map = self.driver._build_initiator_target_map(
self.data.test_volume, self.data.connector,
device_info=self.data.iscsi_device_info)
self.assertEqual(ref_target_map, target_map)
self.assertEqual(mck_wwns.call_count, 2)
def test_extend_volume(self):
with mock.patch.object(self.common, 'extend_volume') as mock_extend:
self.driver.extend_volume(self.data.test_volume, '3')

View File

@ -88,11 +88,14 @@ class PowerMaxISCSITest(test.TestCase):
self.data.test_snapshot, self.data.test_snapshot.volume)
def test_initialize_connection(self):
phys_port = '%(dir)s:%(port)s' % {'dir': self.data.iscsi_dir,
'port': self.data.iscsi_port}
ref_dict = {'maskingview': self.data.masking_view_name_f,
'array': self.data.array, 'hostlunid': 3,
'device_id': self.data.device_id,
'ip_and_iqn': [{'ip': self.data.ip,
'iqn': self.data.initiator}],
'iqn': self.data.initiator,
'physical_port': phys_port}],
'is_multipath': False}
with mock.patch.object(self.driver, 'get_iscsi_dict') as mock_get:
with mock.patch.object(
@ -116,7 +119,8 @@ class PowerMaxISCSITest(test.TestCase):
data = self.driver.get_iscsi_dict(device_info, volume)
self.assertEqual(ref_data, data)
mock_get.assert_called_once_with(
volume, ip_and_iqn, True, host_lun_id, None, None)
self.data.array, volume, ip_and_iqn, True, host_lun_id, None,
None)
def test_get_iscsi_dict_exception(self):
device_info = {'ip_and_iqn': ''}
@ -136,7 +140,7 @@ class PowerMaxISCSITest(test.TestCase):
data = self.driver.get_iscsi_dict(device_info, volume)
self.assertEqual(ref_data, data)
mock_get.assert_called_once_with(
volume, ip_and_iqn, True, host_lun_id,
self.data.array, volume, ip_and_iqn, True, host_lun_id,
self.data.iscsi_device_info_metro['metro_ip_and_iqn'],
self.data.iscsi_device_info_metro['metro_hostlunid'])
@ -152,28 +156,80 @@ class PowerMaxISCSITest(test.TestCase):
'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id}
iscsi_properties = self.driver.vmax_get_iscsi_properties(
vol, ip_and_iqn, True, host_lun_id, [], None)
self.data.array, vol, ip_and_iqn, True, host_lun_id, [], None)
self.assertEqual(type(ref_properties), type(iscsi_properties))
self.assertEqual(ref_properties, iscsi_properties)
def test_vmax_get_iscsi_properties_multiple_targets(self):
def test_vmax_get_iscsi_properties_multiple_targets_random_select(self):
ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator},
{'ip': self.data.ip, 'iqn': self.data.iqn}]
{'ip': self.data.ip2, 'iqn': self.data.iqn}]
host_lun_id = self.data.iscsi_device_info['hostlunid']
ref_properties = {
'target_portals': (
[t['ip'] + ':3260' for t in ip_and_iqn]),
'target_iqns': (
[t['iqn'].split(',')[0] for t in ip_and_iqn]),
'target_luns': [host_lun_id] * len(ip_and_iqn),
'target_discovered': True,
'target_iqn': ip_and_iqn[0]['iqn'].split(',')[0],
'target_portal': ip_and_iqn[0]['ip'] + ':3260',
'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id}
iscsi_properties = self.driver.vmax_get_iscsi_properties(
self.data.test_volume, ip_and_iqn, True, host_lun_id, [], None)
self.assertEqual(ref_properties, iscsi_properties)
self.data.array, self.data.test_volume, ip_and_iqn, True,
host_lun_id, [], None)
iscsi_tgt_iqn = iscsi_properties.get('target_iqn')
iscsi_tgt_portal = iscsi_properties.get('target_portal')
self.assertIn(iscsi_tgt_iqn, [self.data.initiator, self.data.iqn])
self.assertIn(iscsi_tgt_portal, [self.data.ip + ":3260",
self.data.ip2 + ":3260"])
for ip_iqn in ip_and_iqn:
if ip_iqn['ip'] + ":3260" == iscsi_tgt_portal:
self.assertEqual(iscsi_tgt_iqn, ip_iqn.get('iqn'))
def test_vmax_get_iscsi_properties_multiple_targets_load_balance(self):
ip_and_iqn = [
{'ip': self.data.ip, 'iqn': self.data.initiator,
'physical_port': self.data.perf_ports[0]},
{'ip': self.data.ip2, 'iqn': self.data.iqn,
'physical_port': self.data.perf_ports[1]}]
host_lun_id = self.data.iscsi_device_info['hostlunid']
self.driver.performance.config = self.data.performance_config
ref_tgt_map = {}
for tgt in ip_and_iqn:
ref_tgt_map.update({
tgt['physical_port']: {'ip': tgt['ip'],
'iqn': tgt['iqn']}})
with mock.patch.object(
self.driver.performance, 'process_port_load',
side_effect=(
self.driver.performance.process_port_load)) as mck_p:
iscsi_properties = self.driver.vmax_get_iscsi_properties(
self.data.array, self.data.test_volume, ip_and_iqn, False,
host_lun_id, None, None)
mck_p.assert_called_once_with(self.data.array, ref_tgt_map.keys())
iscsi_tgt_iqn = iscsi_properties.get('target_iqn')
iscsi_tgt_portal = iscsi_properties.get('target_portal')
self.assertIn(iscsi_tgt_iqn, [self.data.initiator, self.data.iqn])
self.assertIn(iscsi_tgt_portal, [self.data.ip + ":3260",
self.data.ip2 + ":3260"])
for ip_iqn in ip_and_iqn:
if ip_iqn['ip'] + ":3260" == iscsi_tgt_portal:
self.assertEqual(iscsi_tgt_iqn, ip_iqn.get('iqn'))
def test_vmax_get_iscsi_properties_multiple_targets_load_balance_exc(self):
ip_and_iqn = [
{'ip': self.data.ip, 'iqn': self.data.initiator},
{'ip': self.data.ip2, 'iqn': self.data.iqn}]
host_lun_id = self.data.iscsi_device_info['hostlunid']
self.driver.performance.config = self.data.performance_config
with mock.patch.object(
self.driver.performance, 'process_port_load',
side_effect=(
self.driver.performance.process_port_load)) as mck_p:
iscsi_properties = self.driver.vmax_get_iscsi_properties(
self.data.array, self.data.test_volume, ip_and_iqn, False,
host_lun_id, None, None)
mck_p.assert_not_called()
iscsi_tgt_iqn = iscsi_properties.get('target_iqn')
iscsi_tgt_portal = iscsi_properties.get('target_portal')
self.assertIn(iscsi_tgt_iqn, [self.data.initiator, self.data.iqn])
self.assertIn(iscsi_tgt_portal, [self.data.ip + ":3260",
self.data.ip2 + ":3260"])
for ip_iqn in ip_and_iqn:
if ip_iqn['ip'] + ":3260" == iscsi_tgt_portal:
self.assertEqual(iscsi_tgt_iqn, ip_iqn.get('iqn'))
def test_vmax_get_iscsi_properties_auth(self):
vol = deepcopy(self.data.test_volume)
@ -188,23 +244,14 @@ class PowerMaxISCSITest(test.TestCase):
ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator},
{'ip': self.data.ip, 'iqn': self.data.iqn}]
host_lun_id = self.data.iscsi_device_info['hostlunid']
ref_properties = {
'target_portals': (
[t['ip'] + ':3260' for t in ip_and_iqn]),
'target_iqns': (
[t['iqn'].split(',')[0] for t in ip_and_iqn]),
'target_luns': [host_lun_id] * len(ip_and_iqn),
'target_discovered': True,
'target_iqn': ip_and_iqn[0]['iqn'].split(',')[0],
'target_portal': ip_and_iqn[0]['ip'] + ':3260',
'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id,
'auth_method': 'CHAP',
'auth_username': 'auth_username',
'auth_password': 'auth_secret'}
iscsi_properties = self.driver.vmax_get_iscsi_properties(
vol, ip_and_iqn, True, host_lun_id, None, None)
self.assertEqual(ref_properties, iscsi_properties)
self.data.array, vol, ip_and_iqn, True, host_lun_id, None, None)
self.assertIn('auth_method', iscsi_properties.keys())
self.assertIn('auth_username', iscsi_properties.keys())
self.assertIn('auth_password', iscsi_properties.keys())
self.assertEqual('CHAP', iscsi_properties['auth_method'])
self.assertEqual('auth_username', iscsi_properties['auth_username'])
self.assertEqual('auth_secret', iscsi_properties['auth_password'])
self.driver.configuration = backup_conf
def test_vmax_get_iscsi_properties_metro(self):
@ -225,8 +272,8 @@ class PowerMaxISCSITest(test.TestCase):
'target_lun': host_lun_id,
'volume_id': self.data.test_volume.id}
iscsi_properties = self.driver.vmax_get_iscsi_properties(
self.data.test_volume, ip_and_iqn, True, host_lun_id,
self.data.iscsi_device_info_metro['metro_ip_and_iqn'],
self.data.array, self.data.test_volume, ip_and_iqn, True,
host_lun_id, self.data.iscsi_device_info_metro['metro_ip_and_iqn'],
self.data.iscsi_device_info_metro['metro_hostlunid'])
self.assertEqual(ref_properties, iscsi_properties)

View File

@ -0,0 +1,379 @@
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
from unittest import mock
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
powermax_data as tpd)
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
powermax_fake_objects as tpfo)
from cinder.volume.drivers.dell_emc.powermax import iscsi
from cinder.volume.drivers.dell_emc.powermax import performance
from cinder.volume.drivers.dell_emc.powermax import rest
from cinder.volume.drivers.dell_emc.powermax import utils
from cinder.volume import volume_utils
class PowerMaxPerformanceTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
self.reference_cinder_conf = tpfo.FakeConfiguration(
None, 'ProvisionTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='smc',
san_api_port=8443, vmax_port_groups=[self.data.port_group_name_i],
load_balance=True, load_balance_real_time=True,
load_data_format='avg', load_look_back=60,
load_look_back_real_time=10, port_group_load_metric='PercentBusy',
port_load_metric='PercentBusy')
self.reference_perf_conf = {
'load_balance': True, 'load_balance_rt': True,
'perf_registered': True, 'rt_registered': True,
'collection_interval': 5, 'data_format': 'Average',
'look_back': 60, 'look_back_rt': 10,
'port_group_metric': 'PercentBusy', 'port_metric': 'PercentBusy'}
super(PowerMaxPerformanceTest, self).setUp()
volume_utils.get_max_over_subscription_ratio = mock.Mock()
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
driver = iscsi.PowerMaxISCSIDriver(
configuration=self.reference_cinder_conf)
self.driver = driver
self.common = self.driver.common
self.performance = self.driver.performance
self.rest = self.common.rest
def test_set_performance_configuration(self):
"""Test set_performance_configuration diagnostic & real time."""
self.assertEqual(self.reference_perf_conf, self.performance.config)
@mock.patch.object(
performance.PowerMaxPerformance, 'get_array_registration_details',
return_value=(True, False, 5))
def test_set_performance_configuration_no_rt_reg_rt_disabled(
self, mck_reg):
"""Test set_performance_configuration real-time disabled.
Test configurations settings when real-time is disabled in cinder.conf
and real-time metrics are not registered in Unisphere.
"""
cinder_conf = deepcopy(self.reference_cinder_conf)
cinder_conf.load_balance_real_time = False
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
perf_conf = deepcopy(self.reference_perf_conf)
perf_conf['load_balance_rt'] = False
perf_conf['rt_registered'] = False
self.assertEqual(perf_conf, temp_driver.performance.config)
def test_set_performance_configuration_rt_reg_rt_disabled(self):
"""Test set_performance_configuration real-time disabled v2.
Test configurations settings when real-time is disabled in cinder.conf
and real-time metrics are registered in Unisphere.
"""
cinder_conf = deepcopy(self.reference_cinder_conf)
cinder_conf.load_balance_real_time = False
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
perf_conf = deepcopy(self.reference_perf_conf)
perf_conf['load_balance_rt'] = False
perf_conf['rt_registered'] = True
self.assertEqual(perf_conf, temp_driver.performance.config)
@mock.patch.object(
performance.PowerMaxPerformance, 'get_array_registration_details',
return_value=(False, False, 5))
def test_set_performance_configuration_not_perf_registered(self, mck_reg):
"""Test set_performance_configuration performance metrics not enabled.
This tests config settings where user has enabled load balancing in
cinder.conf but Unisphere is not registered for performance metrics.
"""
cinder_conf = deepcopy(self.reference_cinder_conf)
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
perf_conf = {'load_balance': False}
self.assertEqual(perf_conf, temp_driver.performance.config)
def test_set_performance_configuration_invalid_data_format(self):
"""Test set_performance_configuration invalid data format, avg set."""
cinder_conf = deepcopy(self.reference_cinder_conf)
cinder_conf.load_data_format = 'InvalidFormat'
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
self.assertEqual(self.reference_perf_conf,
temp_driver.performance.config)
def test_set_performance_configuration_max_data_format(self):
"""Test set_performance_configuration max data format, max set."""
cinder_conf = deepcopy(self.reference_cinder_conf)
cinder_conf.load_data_format = 'MAXIMUM'
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
perf_conf = deepcopy(self.reference_perf_conf)
perf_conf['data_format'] = 'Maximum'
self.assertEqual(perf_conf, temp_driver.performance.config)
def test_set_performance_configuration_lookback_invalid(self):
"""Test set_performance_configuration invalid lookback windows."""
# Window set to negative value
cinder_conf = deepcopy(self.reference_cinder_conf)
cinder_conf.load_look_back = -1
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
perf_conf = deepcopy(self.reference_perf_conf)
perf_conf['look_back'] = 60
self.assertEqual(perf_conf, temp_driver.performance.config)
# Window set to value larger than upper limit of 1440
cinder_conf.load_look_back = 9999
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
self.assertEqual(perf_conf, temp_driver.performance.config)
def test_set_performance_configuration_rt_lookback_invalid(self):
"""Test set_performance_configuration invalid rt lookback windows."""
# Window set to negative value
cinder_conf = deepcopy(self.reference_cinder_conf)
cinder_conf.load_look_back_real_time = -1
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
perf_conf = deepcopy(self.reference_perf_conf)
perf_conf['look_back_rt'] = 1
self.assertEqual(perf_conf, temp_driver.performance.config)
# Window set to value larger than upper limit of 1440
cinder_conf.load_look_back_real_time = 100
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
self.assertEqual(perf_conf, temp_driver.performance.config)
def test_set_performance_configuration_invalid_pg_metric(self):
"""Test set_performance_configuration invalid pg metric."""
cinder_conf = deepcopy(self.reference_cinder_conf)
cinder_conf.port_group_load_metric = 'InvalidMetric'
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
self.assertEqual(self.reference_perf_conf,
temp_driver.performance.config)
def test_set_performance_configuration_invalid_port_metric(self):
"""Test set_performance_configuration invalid port metric."""
cinder_conf = deepcopy(self.reference_cinder_conf)
cinder_conf.port_load_metric = 'InvalidMetric'
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf)
self.assertEqual(self.reference_perf_conf,
temp_driver.performance.config)
def test_get_array_registration_details(self):
"""Test get_array_registration_details."""
p_reg, rt_reg, c_int = self.performance.get_array_registration_details(
self.data.array)
self.assertEqual((True, True, 5), (p_reg, rt_reg, c_int))
def test_get_array_performance_keys(self):
"""Test get_array_performance_keys."""
f_date, l_date = self.performance.get_array_performance_keys(
self.data.array)
self.assertEqual(self.data.f_date_a, f_date)
self.assertEqual(self.data.l_date, l_date)
def test_get_look_back_window_interval_timestamp(self):
"""Test _get_look_back_window_interval_timestamp."""
self.assertEqual(
self.data.l_date - (utils.ONE_MINUTE * 10),
self.performance._get_look_back_window_interval_timestamp(
self.data.l_date, 10))
def test_process_load(self):
"""Test _process_load to calculate average of all intervals."""
performance_data = self.data.dummy_performance_data
perf_metrics = performance_data['resultList']['result']
metric = self.data.perf_pb_metric
ref_total = 0
for interval in perf_metrics:
ref_total += interval.get(metric)
ref_avg = ref_total / len(perf_metrics)
avg, total, count = self.performance._process_load(
performance_data, metric)
self.assertEqual(avg, ref_avg)
self.assertEqual(total, ref_total)
self.assertEqual(count, len(perf_metrics))
def test_get_port_group_performance_stats(self):
"""Test _get_port_group_performance_stats."""
array_id = self.data.array
port_group_id = self.data.port_group_name_i
f_date = self.data.f_date_a
l_date = self.data.l_date
metric = self.data.perf_pb_metric
data_format = self.data.perf_df_avg
avg, total, count = self.performance._get_port_group_performance_stats(
array_id, port_group_id, f_date, l_date, metric, data_format)
self.assertTrue(avg > 0)
self.assertIsInstance(avg, float)
self.assertTrue(total > 0)
self.assertIsInstance(total, float)
self.assertTrue(count > 0)
self.assertIsInstance(count, int)
def test_get_port_performance_stats_diagnostic(self):
"""Test _get_port_performance_stats diagnostic."""
array_id = self.data.array
dir_id = self.data.iscsi_dir
port_id = self.data.iscsi_port
f_date = self.data.f_date_a
l_date = self.data.l_date
metric = self.data.perf_pb_metric
data_format = self.data.perf_df_avg
res_type = 'diagnostic'
ref_target_uri = '/performance/FEPort/metrics'
ref_resource = '%(res)s Port performance metrics' % {'res': res_type}
ref_request_body = {
utils.SYMM_ID: array_id, utils.DIR_ID: dir_id,
utils.PORT_ID: port_id, utils.S_DATE: f_date, utils.E_DATE: l_date,
utils.DATA_FORMAT: data_format, utils.METRICS: [metric]}
with mock.patch.object(
self.rest, 'post_request',
side_effect=self.rest.post_request) as mck_post:
avg, total, count = self.performance._get_port_performance_stats(
array_id, dir_id, port_id, f_date, l_date, metric, data_format,
real_time=False)
mck_post.assert_called_once_with(
ref_target_uri, ref_resource, ref_request_body)
self.assertTrue(avg > 0)
self.assertIsInstance(avg, float)
self.assertTrue(total > 0)
self.assertIsInstance(total, float)
self.assertTrue(count > 0)
self.assertIsInstance(count, int)
def test_get_port_performance_stats_real_time(self):
"""Test _get_port_performance_stats real-time."""
array_id = self.data.array
dir_id = self.data.iscsi_dir
port_id = self.data.iscsi_port
f_date = self.data.f_date_a
l_date = self.data.l_date
metric = self.data.perf_pb_metric
res_type = 'real-time'
ref_target_uri = '/performance/realtime/metrics'
ref_resource = '%(res)s Port performance metrics' % {'res': res_type}
ref_request_body = {
utils.SYMM_ID: array_id,
utils.INST_ID: self.data.iscsi_dir_port,
utils.S_DATE: f_date, utils.E_DATE: l_date,
utils.CAT: utils.FE_PORT_RT, utils.METRICS: [metric]}
with mock.patch.object(
self.rest, 'post_request',
side_effect=self.rest.post_request) as mck_post:
avg, total, count = self.performance._get_port_performance_stats(
array_id, dir_id, port_id, f_date, l_date, metric,
real_time=True)
mck_post.assert_called_once_with(
ref_target_uri, ref_resource, ref_request_body)
self.assertTrue(avg > 0)
self.assertIsInstance(avg, float)
self.assertTrue(total > 0)
self.assertIsInstance(total, float)
self.assertTrue(count > 0)
self.assertIsInstance(count, int)
def test_process_port_group_load_min(self):
"""Test process_port_group_load min load."""
array_id = self.data.array
port_groups = self.data.perf_port_groups
avg, metric, port_group = self.performance.process_port_group_load(
array_id, port_groups)
self.assertTrue(avg > 0)
self.assertIsInstance(avg, float)
self.assertEqual(metric,
self.performance.config.get('port_group_metric'))
self.assertIn(port_group, port_groups)
def test_process_port_group_load_max(self):
"""Test process_port_group_load max load."""
array_id = self.data.array
port_groups = self.data.perf_port_groups
avg, metric, port_group = self.performance.process_port_group_load(
array_id, port_groups, max_load=True)
self.assertTrue(abs(avg) > 0)
self.assertIsInstance(avg, float)
self.assertEqual(metric,
self.performance.config.get('port_group_metric'))
self.assertIn(port_group, port_groups)
def test_process_port_load_real_time_min(self):
"""Test process_port_load min load real-time."""
array_id = self.data.array
ports = self.data.perf_ports
avg, metric, port = self.performance.process_port_group_load(
array_id, ports)
self.assertTrue(avg > 0)
self.assertIsInstance(avg, float)
self.assertEqual(metric,
self.performance.config.get('port_group_metric'))
self.assertIn(port, ports)
def test_process_port_load_real_time_max(self):
"""Test process_port_load max load real-time."""
array_id = self.data.array
ports = self.data.perf_ports
avg, metric, port = self.performance.process_port_group_load(
array_id, ports, max_load=True)
self.assertTrue(abs(avg) > 0)
self.assertIsInstance(avg, float)
self.assertEqual(metric,
self.performance.config.get('port_group_metric'))
self.assertIn(port, ports)
def test_process_port_load_diagnostic_min(self):
"""Test process_port_load min load real-time."""
array_id = self.data.array
ports = self.data.perf_ports
self.performance.config['load_balance_rt'] = False
avg, metric, port = self.performance.process_port_group_load(
array_id, ports)
self.assertTrue(avg > 0)
self.assertIsInstance(avg, float)
self.assertEqual(metric,
self.performance.config.get('port_group_metric'))
self.assertIn(port, ports)
def test_process_port_load_diagnostic_max(self):
"""Test process_port_load min load real-time."""
array_id = self.data.array
ports = self.data.perf_ports
self.performance.config['load_balance_rt'] = False
avg, metric, port = self.performance.process_port_group_load(
array_id, ports, max_load=True)
self.assertTrue(abs(avg) > 0)
self.assertIsInstance(avg, float)
self.assertEqual(metric,
self.performance.config.get('port_group_metric'))
self.assertIn(port, ports)

View File

@ -171,6 +171,9 @@ class PowerMaxReplicationTest(test.TestCase):
mock_ip):
metro_connector = deepcopy(self.data.connector)
metro_connector['multipath'] = True
phys_port = '%(dir)s:%(port)s' % {
'dir': self.data.portgroup[0]['symmetrixPortKey'][0]['directorId'],
'port': '1'}
info_dict = self.iscsi_common.initialize_connection(
self.data.test_volume, metro_connector)
ref_dict = {'array': self.data.array,
@ -178,11 +181,13 @@ class PowerMaxReplicationTest(test.TestCase):
'hostlunid': 3,
'maskingview': self.data.masking_view_name_f,
'ip_and_iqn': [{'ip': self.data.ip,
'iqn': self.data.initiator}],
'iqn': self.data.initiator,
'physical_port': phys_port}],
'metro_hostlunid': 3,
'is_multipath': True,
'metro_ip_and_iqn': [{'ip': self.data.ip,
'iqn': self.data.initiator}]}
'iqn': self.data.initiator,
'physical_port': phys_port}]}
self.assertEqual(ref_dict, info_dict)
@mock.patch.object(utils.PowerMaxUtils, 'is_metro_device',

View File

@ -2330,3 +2330,31 @@ class PowerMaxRestTest(test.TestCase):
mck_request.assert_called_once_with(
test_uri, rest.POST, request_object=test_filters)
self.assertEqual(response_obj, {'success': True})
def test_get_ip_interface_physical_port(self):
array_id = self.data.array
virtual_port = self.data.iscsi_dir_virtual_port
ip_address = self.data.ip
response_dir_port = self.rest.get_ip_interface_physical_port(
array_id, virtual_port, ip_address)
self.assertEqual(self.data.iscsi_dir_port, response_dir_port)
@mock.patch.object(
rest.PowerMaxRest, 'get_request',
side_effect=[tpd.PowerMaxData.director_port_keys_empty,
tpd.PowerMaxData.director_port_keys_multiple])
def test_get_ip_interface_physical_port_exceptions(self, mck_get):
array_id = self.data.array
virtual_port = self.data.iscsi_dir_virtual_port
ip_address = self.data.ip
# No physical port keys returned
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.get_ip_interface_physical_port,
array_id, virtual_port, ip_address)
# Multiple physical port keys returned
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.get_ip_interface_physical_port,
array_id, virtual_port, ip_address)

View File

@ -34,6 +34,7 @@ from cinder.utils import retry
from cinder.volume import configuration
from cinder.volume.drivers.dell_emc.powermax import masking
from cinder.volume.drivers.dell_emc.powermax import metadata as volume_metadata
from cinder.volume.drivers.dell_emc.powermax import performance
from cinder.volume.drivers.dell_emc.powermax import provision
from cinder.volume.drivers.dell_emc.powermax import rest
from cinder.volume.drivers.dell_emc.powermax import utils
@ -146,7 +147,34 @@ powermax_opts = [
help='User defined override for short host name.'),
cfg.StrOpt(utils.POWERMAX_PORT_GROUP_NAME_TEMPLATE,
default='portGroupName',
help='User defined override for port group name.')]
help='User defined override for port group name.'),
cfg.BoolOpt(utils.LOAD_BALANCE,
default=False,
help='Enable/disable load balancing for a PowerMax backend.'),
cfg.BoolOpt(utils.LOAD_BALANCE_RT,
default=False,
help='Enable/disable real-time performance metrics for Port '
'level load balancing for a PowerMax backend.'),
cfg.StrOpt(utils.PERF_DATA_FORMAT,
default='Avg',
help='Performance data format, not applicable for real-time '
'metrics. Available options are "avg" and "max".'),
cfg.IntOpt(utils.LOAD_LOOKBACK,
default=60,
help='How far in minutes to look back for diagnostic '
'performance metrics in load calculation, minimum of 0 '
'maximum of 1440 (24 hours).'),
cfg.IntOpt(utils.LOAD_LOOKBACK_RT,
default=1,
help='How far in minutes to look back for real-time '
'performance metrics in load calculation, minimum of 1 '
'maximum of 10.'),
cfg.StrOpt(utils.PORT_GROUP_LOAD_METRIC,
default='PercentBusy',
help='Metric used for port group load calculation.'),
cfg.StrOpt(utils.PORT_LOAD_METRIC,
default='PercentBusy',
help='Metric used for port load calculation.')]
CONF.register_opts(powermax_opts, group=configuration.SHARED_CONF_GROUP)
@ -197,6 +225,7 @@ class PowerMaxCommon(object):
self._get_replication_info()
self._get_u4p_failover_info()
self._gather_info()
self._get_performance_config()
self.rest.validate_unisphere_version()
def _gather_info(self):
@ -251,6 +280,20 @@ class PowerMaxCommon(object):
"Updating volume stats on Cinder backend %(backendName)s.",
{'backendName': self.pool_info['backend_name']})
def _get_performance_config(self):
"""Gather performance configuration, if provided in cinder.conf."""
performance_config = {'load_balance': False}
self.performance = performance.PowerMaxPerformance(
self.rest, performance_config)
if self.configuration.safe_get(utils.LOAD_BALANCE):
LOG.info(
"Updating performance config for Cinder backend %(be)s.",
{'be': self.pool_info['backend_name']})
array_info = self.get_attributes_from_cinder_config()
self.performance.set_performance_configuration(
array_info['SerialNumber'], self.configuration)
def _get_u4p_failover_info(self):
"""Gather Unisphere failover target information, if provided."""
@ -844,21 +887,19 @@ class PowerMaxCommon(object):
type. These are precreated. If the portGroup does not
exist then an error will be returned to the user
maskingview_name = OS-<shortHostName>-<srpName>-<shortProtocol>-MV
e.g OS-myShortHost-SRP_1-I-MV
e.g OS-myShortHost-SRP_1-I-MV
:param volume: volume Object
:param connector: the connector Object
:returns: dict -- device_info_dict - device information dict
"""
extra_specs = self._initial_setup(volume)
LOG.info("Initialize connection: %(vol)s.", {'vol': volume.name})
extra_specs = self._initial_setup(volume, init_conn=True)
is_multipath = connector.get('multipath', False)
rep_config = extra_specs.get(utils.REP_CONFIG)
rep_extra_specs = self._get_replication_extra_specs(
extra_specs, rep_config)
remote_port_group = None
volume_name = volume.name
LOG.info("Initialize connection: %(volume)s.",
{'volume': volume_name})
if (self.utils.is_metro_device(rep_config, extra_specs)
and not is_multipath and self.protocol.lower() == 'iscsi'):
LOG.warning("Either multipathing is not correctly/currently "
@ -889,7 +930,7 @@ class PowerMaxCommon(object):
hostlunid = device_info_dict['hostlunid']
LOG.info("Volume %(volume)s is already mapped to host %(host)s. "
"The hostlunid is %(hostlunid)s.",
{'volume': volume_name, 'host': connector['host'],
{'volume': volume.name, 'host': connector['host'],
'hostlunid': hostlunid})
port_group_name = (
self.get_port_group_from_masking_view(
@ -1696,15 +1737,18 @@ class PowerMaxCommon(object):
final_masking_view_list.extend(masking_view_list)
return final_masking_view_list, storage_group_list
def _initial_setup(self, volume, volume_type_id=None):
def _initial_setup(self, volume, volume_type_id=None,
init_conn=False):
"""Necessary setup to accumulate the relevant information.
The volume object has a host in which we can parse the
config group name. The config group name is the key to our EMC
configuration file. The emc configuration file contains srp name
and array name which are mandatory fields.
:param volume: the volume object
:param volume: the volume object -- obj
:param volume_type_id: optional override of volume.volume_type_id
-- str
:param init_conn: if extra specs are for initialize connection -- bool
:returns: dict -- extra spec dict
:raises: VolumeBackendAPIException:
"""
@ -1723,7 +1767,8 @@ class PowerMaxCommon(object):
raise exception.VolumeBackendAPIException(
message=exception_message)
extra_specs = self._set_vmax_extra_specs(extra_specs, array_info)
extra_specs = self._set_vmax_extra_specs(
extra_specs, array_info, init_conn)
if qos_specs and qos_specs.get('consumer') != "front-end":
extra_specs['qos'] = qos_specs.get('specs')
except Exception:
@ -2197,7 +2242,8 @@ class PowerMaxCommon(object):
# from error status codes returned from the various REST jobs.
raise e
def _set_vmax_extra_specs(self, extra_specs, pool_record):
def _set_vmax_extra_specs(self, extra_specs, pool_record,
init_conn=False):
"""Set the PowerMax/VMAX extra specs.
The pool_name extra spec must be set, otherwise a default slo/workload
@ -2205,25 +2251,17 @@ class PowerMaxCommon(object):
on the volume type (e.g. 'storagetype:portgroupname = os-pg1-pg'), or
can be chosen from a list provided in the cinder.conf
:param extra_specs: extra specifications
:param pool_record: pool record
:returns: dict -- the extra specifications dictionary
:param extra_specs: extra specifications -- dict
:param pool_record: pool record -- dict
:param: init_conn: if extra specs are for initialize connection -- bool
:returns: the extra specifications -- dict
"""
# set extra_specs from pool_record
extra_specs[utils.SRP] = pool_record['srpName']
extra_specs[utils.ARRAY] = pool_record['SerialNumber']
try:
if not extra_specs.get(utils.PORTGROUPNAME):
extra_specs[utils.PORTGROUPNAME] = pool_record['PortGroup']
except Exception:
error_message = (_("Port group name has not been provided - "
"please configure the "
"'storagetype:portgroupname' extra spec on "
"the volume type, or enter a list of "
"portgroups in the cinder.conf associated with "
"this backend."))
LOG.error(error_message)
raise exception.VolumeBackendAPIException(message=error_message)
extra_specs[utils.PORTGROUPNAME] = (
self._select_port_group_for_extra_specs(extra_specs, pool_record,
init_conn))
self._validate_storage_group_tag_list(extra_specs)
@ -2307,6 +2345,74 @@ class PowerMaxCommon(object):
return extra_specs
def _select_port_group_for_extra_specs(self, extra_specs, pool_record,
init_conn=False):
"""Determine Port Group for operation extra specs.
:param extra_specs: existing extra specs -- dict
:param pool_record: pool record -- dict
:param init_conn: if extra specs are for initialize connection -- bool
:returns: Port Group -- str
:raises: exception.VolumeBackendAPIException
"""
port_group = None
conf_port_groups = pool_record.get(utils.PORT_GROUP, [])
vt_port_group = extra_specs.get(utils.PORTGROUPNAME, None)
# Scenario 1: Port Group is set in volume-type extra specs, over-rides
# any settings in cinder.conf
if vt_port_group:
port_group = vt_port_group
LOG.info("Using Port Group '%(pg)s' from volume-type extra specs.",
{'pg': port_group})
# Scenario 2: Port Group(s) set in cinder.conf and not in volume-type
elif conf_port_groups:
# Scenario 2-1: There is only one Port Group defined, no load
# balance or random selection required
if len(conf_port_groups) == 1:
port_group = conf_port_groups[0]
LOG.info(
"Using Port Group '%(pg)s' from cinder.conf backend "
"configuration.", {'pg': port_group})
# Scenario 2-2: Else more than one Port Group in cinder.conf
else:
# Scenario 2-2-1: If load balancing is enabled and the extra
# specs are for initialize_connection() method then use load
# balance selection
if init_conn and (
self.performance.config.get('load_balance', False)):
try:
load, metric, port_group = (
self.performance.process_port_group_load(
extra_specs[utils.ARRAY], conf_port_groups))
LOG.info(
"Selecting Port Group %(pg)s with %(met)s load of "
"%(load)s", {'pg': port_group, 'met': metric,
'load': load})
except exception.VolumeBackendAPIException:
LOG.error(
"There has been a problem calculating Port Group "
"load, reverting to default random selection.")
# Scenario 2-2-2: If the call is not for initialize_connection,
# load balancing is not enabled, or there was an error while
# calculating PG load, revert to random PG selection method
if not port_group:
port_group = random.choice(conf_port_groups)
# Port group not extracted from volume-type or cinder.conf, raise
if not port_group:
error_message = (_(
"Port Group name has not been provided - please configure the "
"'storagetype:portgroupname' extra spec on the volume type, "
"or enter a list of Port Groups in the cinder.conf associated "
"with this backend."))
LOG.error(error_message)
raise exception.VolumeBackendAPIException(message=error_message)
return port_group
def _validate_storage_group_tag_list(self, extra_specs):
"""Validate the storagetype:storagegrouptags list
@ -2627,33 +2733,36 @@ class PowerMaxCommon(object):
array, portgroup_name, initiator_group_name)
return masking_view_list
def _get_ip_and_iqn(self, array, port):
"""Get ip and iqn from the director port.
def _get_iscsi_ip_iqn_port(self, array, port):
"""Get ip and iqn from a virtual director port.
:param array: the array serial number
:param port: the director port on the array
:returns: ip_and_iqn - dict
:param array: the array serial number -- str
:param port: the director & virtual port on the array -- str
:returns: ip_and_iqn -- dict
"""
ip_iqn_list = []
ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn(
array, port)
for ip in ip_addresses:
ip_iqn_list.append({'iqn': iqn, 'ip': ip})
physical_port = self.rest.get_ip_interface_physical_port(
array, port.split(':')[0], ip)
ip_iqn_list.append({'iqn': iqn, 'ip': ip,
'physical_port': physical_port})
return ip_iqn_list
def _find_ip_and_iqns(self, array, port_group_name):
"""Find the list of ips and iqns for the ports in a portgroup.
"""Find the list of ips and iqns for the ports in a port group.
:param array: the array serial number
:param port_group_name: the portgroup name
:returns: ip_and_iqn - list of dicts
:param array: the array serial number -- str
:param port_group_name: the port group name -- str
:returns: ip_and_iqn -- list of dicts
"""
ips_and_iqns = []
LOG.debug("The portgroup name for iscsiadm is %(pg)s",
{'pg': port_group_name})
ports = self.rest.get_port_ids(array, port_group_name)
for port in ports:
ip_and_iqn = self._get_ip_and_iqn(array, port)
ip_and_iqn = self._get_iscsi_ip_iqn_port(array, port)
ips_and_iqns.extend(ip_and_iqn)
return ips_and_iqns
@ -5752,9 +5861,6 @@ class PowerMaxCommon(object):
workload = self.configuration.safe_get(utils.VMAX_WORKLOAD)
port_groups = self._get_configuration_value(
utils.VMAX_PORT_GROUPS, utils.POWERMAX_PORT_GROUPS)
random_portgroup = None
if port_groups:
random_portgroup = random.choice(port_groups)
kwargs = (
{'RestServerIp': self.configuration.safe_get(
@ -5764,7 +5870,7 @@ class PowerMaxCommon(object):
'RestPassword': password,
'SerialNumber': serial_number,
'srpName': srp_name,
'PortGroup': random_portgroup})
'PortGroup': port_groups})
if self.configuration.safe_get('driver_ssl_cert_verify'):
if self.configuration.safe_get('driver_ssl_cert_path'):

View File

@ -17,6 +17,7 @@ import ast
from oslo_log import log as logging
from cinder import exception
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.dell_emc.powermax import common
@ -123,6 +124,8 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
- Support for multiple replication devices
- Pools bug fix allowing 'None' variants (bug #1873253)
4.3.0 - Changing from 91 to 92 REST endpoints
- Support for Port Group and Port load balancing
(bp powermax-port-load-balance)
"""
VERSION = "4.3.0"
@ -139,6 +142,8 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
self.VERSION,
configuration=self.configuration,
active_backend_id=self.active_backend_id)
self.performance = self.common.performance
self.rest = self.common.rest
self.zonemanager_lookup_service = fczm_utils.create_lookup_service()
@classmethod
@ -295,7 +300,7 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
"""
device_number = device_info['hostlunid']
target_wwns, init_targ_map = self._build_initiator_target_map(
volume, connector)
volume, connector, device_info)
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': device_number,
@ -451,11 +456,12 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
return data
def _build_initiator_target_map(self, volume, connector):
def _build_initiator_target_map(self, volume, connector, device_info=None):
"""Build the target_wwns and the initiator target map.
:param volume: the cinder volume object
:param connector: the connector object
:param device_info: device_info
:returns: target_wwns -- list, init_targ_map -- dict
"""
target_wwns, init_targ_map = [], {}
@ -464,6 +470,40 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
self.common.get_target_wwns_from_masking_view(
volume, connector))
# If load balance is enabled we want to select only the FC target that
# has the lowest load of all ports in selected port group.
# Note: device_info in if condition as this method is called also for
# terminate connection, we only want to calculate load on initialise
# connection.
if device_info and self.performance.config.get('load_balance'):
try:
array_id = device_info.get('array')
masking_view = device_info.get('maskingview')
# Get PG from MV
port_group = self.rest.get_element_from_masking_view(
array_id, masking_view, portgroup=True)
# Get port list from PG
port_list = self.rest.get_port_ids(array_id, port_group)
# Get lowest load port in PG
load, metric, port = self.performance.process_port_load(
array_id, port_list)
LOG.info("Lowest %(met)s load port is %(port)s: %(load)s",
{'met': metric, 'port': port, 'load': load})
# Get target WWN
port_details = self.rest.get_port(array_id, port)
port_info = port_details.get('symmetrixPort')
port_wwn = port_info.get('wwn_node')
LOG.info("Port %(p)s WWN: %(wwn)s",
{'p': port, 'wwn': port_wwn})
# Set lowest load port WWN as FC target for connection
fc_targets = [port_wwn]
except exception.VolumeBackendAPIException:
LOG.error("There was an error calculating port load, "
"reverting to default target selection.")
fc_targets, __ = (
self.common.get_target_wwns_from_masking_view(
volume, connector))
if self.zonemanager_lookup_service:
fc_targets.extend(metro_fc_targets)
mapping = (

View File

@ -16,6 +16,8 @@
ISCSI Drivers for Dell EMC PowerMax/PowerMax/VMAX arrays based on REST.
"""
import random
from oslo_log import log as logging
from oslo_utils import strutils
import six
@ -128,6 +130,8 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
- Support for multiple replication devices
- Pools bug fix allowing 'None' variants (bug #1873253)
4.3.0 - Changing from 91 to 92 REST endpoints
- Support for Port Group and Port load balancing
(bp powermax-port-load-balance)
"""
VERSION = "4.3.0"
@ -145,6 +149,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
self.VERSION,
configuration=self.configuration,
active_backend_id=self.active_backend_id))
self.performance = self.common.performance
@classmethod
def get_driver_options(cls):
@ -298,6 +303,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
"""
metro_ip_iqn, metro_host_lun = None, None
try:
array_id = device_info['array']
ip_and_iqn = device_info['ip_and_iqn']
is_multipath = device_info['is_multipath']
host_lun_id = device_info['hostlunid']
@ -314,17 +320,19 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
metro_host_lun = device_info['metro_hostlunid']
iscsi_properties = self.vmax_get_iscsi_properties(
volume, ip_and_iqn, is_multipath, host_lun_id,
array_id, volume, ip_and_iqn, is_multipath, host_lun_id,
metro_ip_iqn, metro_host_lun)
LOG.info("iSCSI properties are: %(props)s",
{'props': strutils.mask_dict_password(iscsi_properties)})
LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume})
return {'driver_volume_type': 'iscsi',
'data': iscsi_properties}
def vmax_get_iscsi_properties(self, volume, ip_and_iqn,
is_multipath, host_lun_id,
metro_ip_iqn, metro_host_lun):
def vmax_get_iscsi_properties(
self, array_id, volume, ip_and_iqn, is_multipath, host_lun_id,
metro_ip_iqn, metro_host_lun):
"""Gets iscsi configuration.
We ideally get saved information in the volume entity, but fall back
@ -340,6 +348,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
:param array_id: the array serial number
:param volume: the cinder volume object
:param ip_and_iqn: list of ip and iqn dicts
:param is_multipath: flag for multipath
@ -350,6 +359,7 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
"""
properties = {}
populate_plurals = False
tgt_iqn, tgt_portal = None, None
if len(ip_and_iqn) > 1 and is_multipath:
populate_plurals = True
elif len(ip_and_iqn) == 1 and is_multipath and metro_ip_iqn:
@ -370,16 +380,44 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
metro_ip_iqn]))
properties['target_luns'].extend(
[metro_host_lun] * len(metro_ip_iqn))
# If load balancing is enabled select target IQN and IP address of
# lowest load physical port from all ports in selected port group
if self.performance.config.get('load_balance', False):
try:
# Get the dir/ports and create a new mapped dict
tgt_map = {}
for tgt in ip_and_iqn:
tgt_map.update({
tgt['physical_port']: {'ip': tgt['ip'],
'iqn': tgt['iqn']}})
# Calculate load for the ports
load, metric, port = self.performance.process_port_load(
array_id, tgt_map.keys())
# Get the lowest load port from mapping in step 1
low_port_map = tgt_map.get(port)
LOG.info("Selecting port %(port)s with %(met)s load %(load)s.",
{'port': port, 'met': metric, 'load': load})
# Set the target IQN and portal
tgt_iqn = low_port_map['iqn']
tgt_portal = low_port_map['ip'] + ":3260"
except (KeyError, exception.VolumeBackendAPIException):
LOG.error("There was an error calculating port load, "
"reverting to default port selection.")
# If load balancing is not enabled or if there has been a problem
# calculating the load, revert to default random IP/IQN selection
if not tgt_iqn or not tgt_portal:
port_idx = random.randint(0, len(ip_and_iqn) - 1)
tgt_iqn = ip_and_iqn[port_idx]['iqn']
tgt_portal = ip_and_iqn[port_idx]['ip'] + ":3260"
properties['target_iqn'] = tgt_iqn
properties['target_portal'] = tgt_portal
properties['target_discovered'] = True
properties['target_iqn'] = ip_and_iqn[0]['iqn'].split(",")[0]
properties['target_portal'] = ip_and_iqn[0]['ip'] + ":3260"
properties['target_lun'] = host_lun_id
properties['volume_id'] = volume.id
LOG.info("ISCSI properties: %(properties)s.",
{'properties': properties})
LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume})
if self.configuration.safe_get('use_chap_auth'):
LOG.info("Chap authentication enabled.")
properties['auth_method'] = 'CHAP'

View File

@ -0,0 +1,394 @@
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heapq import heappop
from heapq import heappush
import time
from oslo_log import log as logging
from cinder.volume.drivers.dell_emc.powermax import utils
LOG = logging.getLogger(__name__)
class PowerMaxPerformance(object):
"""Performance Class for Dell EMC PowerMax volume drivers.
It supports VMAX 3, All Flash and PowerMax arrays.
"""
def __init__(self, rest, performance_config):
self.rest = rest
self.config = performance_config
def set_performance_configuration(self, array_id, cinder_conf):
"""Set the performance configuration if details present in cinder.conf.
:param array_id: the array serial number -- str
:param cinder_conf: cinder configuration options -- dict
"""
# Get performance registration, real-time registration, and collection
# interval information for PowerMax array
p_reg, rt_reg, c_int = self.get_array_registration_details(array_id)
# Get load balance settings from cinder backend configuration
lb_enabled = cinder_conf.safe_get(utils.LOAD_BALANCE)
rt_enabled = cinder_conf.safe_get(utils.LOAD_BALANCE_RT)
# Real-time
if rt_enabled and not rt_reg:
LOG.warning(
"Real-time load balancing is enabled but array %(arr)s is not "
"registered for real-time performance metrics collection. "
"Diagnostic performance metrics will be used instead.",
{'arr': array_id})
rt_enabled = False
# Load balancing enabled but array not registered for perf metrics
if (lb_enabled or rt_enabled) and not p_reg:
LOG.warning(
"Load balancing is enabled but array %(arr)s is not "
"registered for performance metrics collection. Reverting to "
"default random Port and Port Group selection",
{'arr': array_id})
return {'load_balance': False}
data_format = cinder_conf.safe_get(utils.PERF_DATA_FORMAT)
if data_format.lower() not in ['average', 'avg', 'maximum', 'max']:
LOG.warning("Incorrect data format '%(df)s', reverting to "
"default value 'Average'.", {'df': data_format})
data_format = 'Average'
if data_format.lower() in ['average', 'avg']:
data_format = 'Average'
elif data_format.lower() in ['maximum', 'max']:
data_format = 'Maximum'
# Get diagnostic metrics look back window
lb_diagnostic = cinder_conf.safe_get(utils.LOAD_LOOKBACK)
if not lb_diagnostic:
LOG.warning(
"Diagnostic look back window not set in cinder.conf, "
"reverting to default value of 60 for most recent hour of "
"metrics.")
lb_diagnostic = 60
elif lb_diagnostic < 0 or lb_diagnostic > 1440:
LOG.warning(
"Diagnostic look back window '%(lb)s' is not within the "
"minimum and maximum range 0-1440, reverting to default "
"value of 60 for most recent hour of metrics.", {
'lb': lb_diagnostic})
lb_diagnostic = 60
# Get real-time metrics look back window
lb_real_time = cinder_conf.safe_get(utils.LOAD_LOOKBACK_RT)
if rt_enabled:
if not lb_real_time:
LOG.warning(
"Real-time look back window not set in cinder.conf, "
"reverting to default value of 1 for for most recent "
"minute of metrics.")
lb_real_time = 1
elif lb_real_time < 1 or lb_real_time > 60:
LOG.warning(
"Real-time look back window '%(lb)s' is not within the "
"minimum and maximum range 1-60, reverting to default "
"value of 1 for for most recent minute of metrics.", {
'lb': lb_real_time})
lb_real_time = 1
# Get Port Group metric for load calculation
pg_metric = cinder_conf.safe_get(utils.PORT_GROUP_LOAD_METRIC)
if not pg_metric:
LOG.warning(
"Port Group performance metric not set in cinder.conf, "
"reverting to default metric 'PercentBusy'.")
pg_metric = 'PercentBusy'
elif pg_metric not in utils.PG_METRICS:
LOG.warning(
"Port Group performance metric selected for load "
"balancing '%(pg_met)s' is not valid, reverting to "
"default metric 'PercentBusy'.", {
'pg_met': pg_metric})
pg_metric = 'PercentBusy'
# Get Port metric for load calculation
port_metric = cinder_conf.safe_get(utils.PORT_LOAD_METRIC)
valid_port_metrics = (
utils.PORT_RT_METRICS if rt_enabled else utils.PORT_METRICS)
if not port_metric:
LOG.warning(
"Port performance metric not set in cinder.conf, "
"reverting to default metric 'PercentBusy'.")
port_metric = 'PercentBusy'
elif port_metric not in valid_port_metrics:
LOG.warning(
"Port performance metric selected for load balancing "
"'%(port_met)s' is not valid, reverting to default metric "
"'PercentBusy'.", {'port_met': port_metric})
port_metric = 'PercentBusy'
self.config = {
'load_balance': lb_enabled, 'load_balance_rt': rt_enabled,
'perf_registered': p_reg, 'rt_registered': rt_reg,
'collection_interval': c_int, 'data_format': data_format,
'look_back': lb_diagnostic, 'look_back_rt': lb_real_time,
'port_group_metric': pg_metric, 'port_metric': port_metric}
def get_array_registration_details(self, array_id):
"""Get array performance registration details.
:param array_id: the array serial number -- str
:returns: performance registered, real-time registered,
collection interval -- bool, bool, int
"""
LOG.info("Retrieving array %(arr)s performance registration details.",
{'arr': array_id})
array_reg_uri = self.rest.build_uri(
category=utils.PERFORMANCE, resource_level=utils.ARRAY_PERF,
resource_type=utils.REG_DETAILS, resource_type_id=array_id,
no_version=True)
reg_details = self.rest.get_request(
target_uri=array_reg_uri,
resource_type='Array registration details')
array_reg_info = reg_details.get(utils.REG_DETAILS_INFO)[0]
perf_registered = array_reg_info.get(utils.DIAGNOSTIC)
real_time_registered = array_reg_info.get(utils.REAL_TIME)
collection_interval = array_reg_info.get(utils.COLLECTION_INT)
return perf_registered, real_time_registered, collection_interval
def get_array_performance_keys(self, array_id):
"""Get array performance keys (first and last available timestamps).
:param array_id: the array serial number
:returns: first date, last date -- int, int
"""
LOG.debug("Retrieving array %(arr)s performance keys.",
{'arr': array_id})
array_keys_uri = self.rest.build_uri(
category=utils.PERFORMANCE, resource_level=utils.ARRAY_PERF,
resource_type=utils.KEYS, no_version=True)
array_keys = self.rest.get_request(
target_uri=array_keys_uri, resource_type='Array performance keys')
env_symm_info = array_keys.get(utils.ARRAY_INFO)
f_date, l_date = None, None
for symm in env_symm_info:
if symm.get(utils.SYMM_ID) == array_id:
f_date, l_date = symm.get(utils.F_DATE), symm.get(utils.L_DATE)
return f_date, l_date
@staticmethod
def _get_look_back_window_interval_timestamp(l_date, lb_window):
"""Get first date value when calculated from last date and window.
:param l_date: the last (most recent) timestamp -- int
:param lb_window: the look back window in minutes -- int
:returns: the first timestamp -- int
"""
return l_date - (utils.ONE_MINUTE * lb_window)
@staticmethod
def _process_load(performance_data, metric):
"""Process the load for a given performance response, return average.
:param performance_data: raw performance data from REST API -- dict
:param metric: performance metric in use -- str
:returns: range average, range total, interval count -- float, int, int
"""
data = performance_data.get(utils.RESULT_LIST)
result = data.get(utils.RESULT)
total = 0
for timestamp in result:
total += timestamp.get(metric)
return total / len(result), total, len(result)
def _get_port_group_performance_stats(
self, array_id, port_group_id, f_date, l_date, metric,
data_format):
"""Get performance data for a given port group and performance metric.
:param array_id: the array serial number -- str
:param port_group_id: the port group id -- str
:param f_date: first date for stats -- int
:param l_date: last date for stats -- int
:param metric: performance metric -- str
:param data_format: performance data format -- str
:returns: range average, range total, interval count -- float, float,
int
"""
request_body = {
utils.SYMM_ID: array_id, utils.PORT_GROUP_ID: port_group_id,
utils.S_DATE: f_date, utils.E_DATE: l_date,
utils.DATA_FORMAT: data_format, utils.METRICS: [metric]}
port_group_uri = self.rest.build_uri(
category=utils.PERFORMANCE, resource_level=utils.PORT_GROUP,
resource_type=utils.METRICS, no_version=True)
result = self.rest.post_request(
port_group_uri, 'Port Group performance metrics',
request_body)
return self._process_load(result, metric)
def _get_port_performance_stats(
self, array_id, director_id, port_id, f_date, l_date, metric,
data_format=None, real_time=False):
"""Get performance data for a given port and performance metric.
:param array_id: the array serial number -- str
:param director_id: the director id -- str
:param port_id: the port id -- str
:param f_date: first date for stats -- int
:param l_date: last date for stats -- int
:param metric: performance metric -- str
:param data_format: performance data format -- str
:param real_time: if metrics are real-time -- bool
:returns: range average, range total, interval count -- float, float,
int
"""
if real_time:
target_uri = self.rest.build_uri(
category=utils.PERFORMANCE, resource_level=utils.REAL_TIME,
resource_type=utils.METRICS, no_version=True)
res_type = 'real-time'
dir_port = ('%(dir)s:%(port)s' % {'dir': director_id,
'port': port_id})
request_body = {
utils.SYMM_ID: array_id, utils.INST_ID: dir_port,
utils.S_DATE: f_date, utils.E_DATE: l_date,
utils.CAT: utils.FE_PORT_RT, utils.METRICS: [metric]}
else:
target_uri = self.rest.build_uri(
category=utils.PERFORMANCE, resource_level=utils.FE_PORT_DIAG,
resource_type=utils.METRICS, no_version=True)
res_type = 'diagnostic'
request_body = {
utils.SYMM_ID: array_id,
utils.DIR_ID: director_id, utils.PORT_ID: port_id,
utils.S_DATE: f_date, utils.E_DATE: l_date,
utils.DATA_FORMAT: data_format, utils.METRICS: [metric]}
resource = '%(res)s Port performance metrics' % {'res': res_type}
result = self.rest.post_request(
target_uri, resource, request_body)
return self._process_load(result, metric)
def process_port_group_load(
self, array_id, port_groups, max_load=False):
"""Calculate the load for one or more port groups.
:param array_id: the array serial number -- str
:param port_groups: port group names -- list
:param max_load: if max load port group should be returned -- bool
:returns: low/max avg, metric, port group -- tuple(float, str, str)
"""
LOG.info("Calculating array %(arr)s load for Port Groups %(pg)s.",
{'arr': array_id, 'pg': port_groups})
data_format = self.config.get('data_format')
lb_window = self.config.get('look_back')
pg_metric = self.config.get('port_group_metric')
__, l_date = self.get_array_performance_keys(array_id)
f_date = self._get_look_back_window_interval_timestamp(
l_date, lb_window)
heap_low, heap_high = [], []
start_time = time.time()
for pg in port_groups:
avg, total, cnt = self._get_port_group_performance_stats(
array_id, pg, f_date, l_date, pg_metric, data_format)
LOG.debug(
"Port Group '%(pg)s' %(df)s %(met)s load for %(interval)s min "
"interval: %(avg)s",
{'pg': pg, 'df': data_format, 'met': pg_metric,
'interval': lb_window, 'avg': avg})
# Add PG average to lowest load heap
heappush(heap_low, (avg, pg_metric, pg))
# Add inverse PG average to highest load heap
heappush(heap_high, (-avg, pg_metric, pg))
LOG.debug("Time taken to analyse Port Group performance: %(t)ss",
{'t': time.time() - start_time})
return heappop(heap_high) if max_load else heappop(heap_low)
def process_port_load(self, array_id, ports, max_load=False):
"""Calculate the load for one or more ports.
:param array_id: the array serial number -- str
:param ports: physical dir:port names -- list
:param max_load: if max load port should be returned -- bool
:returns: low/max avg, metric, port -- tuple(float, str, str)
"""
LOG.info("Calculating array %(arr)s load for Ports %(port)s.",
{'arr': array_id, 'port': ports})
rt_enabled = self.config.get('load_balance_rt')
rt_registered = self.config.get('rt_registered')
if rt_enabled and rt_registered:
real_time, data_format = True, None
lb_window = self.config.get('look_back_rt')
else:
real_time, data_format = False, self.config.get('data_format')
lb_window = self.config.get('look_back')
port_metric = self.config.get('port_metric')
__, l_date = self.get_array_performance_keys(array_id)
f_date = self._get_look_back_window_interval_timestamp(
l_date, lb_window)
heap_low, heap_high = [], []
start_time = time.time()
for port in ports:
dir_id = port.split(':')[0]
port_no = port.split(':')[1]
avg, total, cnt = self._get_port_performance_stats(
array_id, dir_id, port_no, f_date, l_date, port_metric,
data_format, real_time=real_time)
LOG.debug(
"Port '%(dir)s:%(port)s' %(df)s %(met)s load for %(int)s min "
"interval: %(avg)s",
{'dir': dir_id, 'port': port_no,
'df': data_format if data_format else '',
'met': port_metric, 'int': lb_window, 'avg': avg})
# Add PG average to lowest load heap
heappush(heap_low, (avg, port_metric, port))
# Add inverse PG average to highest load heap
heappush(heap_high, (-avg, port_metric, port))
LOG.debug("Time taken to analyse Port Group performance: %(t)ss",
{'t': time.time() - start_time})
return heappop(heap_high) if max_load else heappop(heap_low)

View File

@ -1722,6 +1722,50 @@ class PowerMaxRest(object):
iqn = port_details['symmetrixPort']['identifier']
return ip_addresses, iqn
def get_ip_interface_physical_port(self, array_id, virtual_port,
ip_address):
"""Get the physical port associated with a virtual port and IP address.
:param array_id: the array serial number -- str
:param virtual_port: the director & virtual port identifier -- str
:param ip_address: the ip address associated with the port -- str
:returns: physical director:port -- str
"""
director_id = virtual_port.split(':')[0]
params = {'ip_list': ip_address, 'iscsi_target': False}
target_uri = self.build_uri(
category=SYSTEM, resource_level='symmetrix',
resource_level_id=array_id, resource_type='director',
resource_type_id=director_id, resource='port')
port_info = self.get_request(
target_uri, 'port IP interface', params)
port_key = port_info.get('symmetrixPortKey', [])
if len(port_key) == 1:
port_info = port_key[0]
port_id = port_info.get('portId')
dir_port = '%(d)s:%(p)s' % {'d': director_id, 'p': port_id}
else:
if len(port_key) == 0:
msg = (_(
"Virtual port %(vp)s and IP address %(ip)s are not "
"associated a physical director:port. Please check "
"iSCSI configuration of backend array %(arr)s." % {
'vp': virtual_port, 'ip': ip_address, 'arr': array_id}
))
else:
msg = (_(
"Virtual port %(vp)s and IP address %(ip)s are associated "
"with more than one physical director:port. Please check "
"iSCSI configuration of backend array %(arr)s." % {
'vp': virtual_port, 'ip': ip_address, 'arr': array_id}
))
LOG.error(msg)
raise exception.VolumeBackendAPIException(message=msg)
return dir_port
def get_target_wwns(self, array, portgroup):
"""Get the director ports' wwns.

View File

@ -152,6 +152,62 @@ PMAX_SLS = ['Diamond', 'Platinum', 'Gold', 'Silver', 'Bronze', 'Optimized',
'None', 'NONE']
PMAX_WLS = ['NONE', 'None']
# Performance
# Metrics
PG_METRICS = [
'AvgIOSize', 'IOs', 'MBRead', 'MBWritten', 'MBs', 'PercentBusy',
'Reads', 'Writes']
PORT_METRICS = [
'AvgIOSize', 'IOs', 'MBRead', 'MBWritten', 'MBs', 'MaxSpeedGBs',
'PercentBusy', 'ReadResponseTime', 'Reads', 'ResponseTime', 'SpeedGBs',
'WriteResponseTime', 'Writes']
PORT_RT_METRICS = [
'AvgIOSize', 'IOs', 'MBRead', 'MBWritten', 'MBs', 'PercentBusy', 'Reads',
'ResponseTime', 'Writes']
# Cinder config options
LOAD_BALANCE = 'load_balance'
LOAD_BALANCE_RT = 'load_balance_real_time'
PERF_DATA_FORMAT = 'load_data_format'
LOAD_LOOKBACK = 'load_look_back'
LOAD_LOOKBACK_RT = 'load_look_back_real_time'
PORT_GROUP_LOAD_METRIC = 'port_group_load_metric'
PORT_LOAD_METRIC = 'port_load_metric'
# One minute in milliseconds
ONE_MINUTE = 60000
# Default look back windows in minutes
DEFAULT_DIAG_WINDOw = 60
DEFAULT_RT_WINDOW = 1
# REST API keys
PERFORMANCE = 'performance'
REG_DETAILS = 'registrationdetails'
REG_DETAILS_INFO = 'registrationDetailsInfo'
COLLECTION_INT = 'collectionintervalmins'
DIAGNOSTIC = 'diagnostic'
REAL_TIME = 'realtime'
RESULT_LIST = 'resultList'
RESULT = 'result'
KEYS = 'keys'
METRICS = 'metrics'
CAT = 'category'
F_DATE = 'firstAvailableDate'
S_DATE = 'startDate'
L_DATE = 'lastAvailableDate'
E_DATE = 'endDate'
SYMM_ID = 'symmetrixId'
ARRAY_PERF = 'Array'
ARRAY_INFO = 'arrayInfo'
PORT_GROUP = 'PortGroup'
PORT_GROUP_ID = 'portGroupId'
FE_PORT_RT = 'FEPORT'
FE_PORT_DIAG = 'FEPort'
DATA_FORMAT = 'dataFormat'
INST_ID = 'instanceId'
DIR_ID = 'directorId'
PORT_ID = 'portId'
class PowerMaxUtils(object):
"""Utility class for Rest based PowerMax volume drivers.

View File

@ -0,0 +1,6 @@
---
features:
- |
PowerMax for Cinder driver now supports Port Group and Port load
balancing when attaching Nova Compute instances to volumes on the
backend PowerMax.