From f6d9fbadb23a5dcd7aea026895b38e11f1d3ec2a Mon Sep 17 00:00:00 2001 From: Helen Walsh Date: Mon, 10 Apr 2017 20:18:27 +0100 Subject: [PATCH] VMAX driver - Base functionality, replacing SMI-S with REST In VMAX driver version 3.0, SMI-S will be replaced with unisphere REST. Pike will support VMAX3 hybrid and All Flash arrays. This submission is for base functionality only. Change-Id: Ic0bdf57bd5f2b1af8e7954c70225921c2501060e Implements: blueprint vmax-rest --- cinder/opts.py | 2 +- .../unit/volume/drivers/dell_emc/test_vmax.py | 10850 ---------------- .../volume/drivers/dell_emc/vmax/__init__.py | 0 .../volume/drivers/dell_emc/vmax/test_vmax.py | 4308 ++++++ cinder/volume/drivers/dell_emc/vmax/common.py | 6520 ++-------- cinder/volume/drivers/dell_emc/vmax/fast.py | 828 -- cinder/volume/drivers/dell_emc/vmax/fc.py | 330 +- cinder/volume/drivers/dell_emc/vmax/https.py | 347 - cinder/volume/drivers/dell_emc/vmax/iscsi.py | 306 +- .../volume/drivers/dell_emc/vmax/masking.py | 3590 ++--- .../volume/drivers/dell_emc/vmax/provision.py | 1300 +- .../drivers/dell_emc/vmax/provision_v3.py | 1063 -- cinder/volume/drivers/dell_emc/vmax/rest.py | 1594 +++ cinder/volume/drivers/dell_emc/vmax/utils.py | 3107 +---- .../notes/vmax-rest-94e48bed6f9c134c.yaml | 5 + 15 files changed, 8680 insertions(+), 25470 deletions(-) delete mode 100644 cinder/tests/unit/volume/drivers/dell_emc/test_vmax.py create mode 100644 cinder/tests/unit/volume/drivers/dell_emc/vmax/__init__.py create mode 100644 cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py delete mode 100644 cinder/volume/drivers/dell_emc/vmax/fast.py delete mode 100644 cinder/volume/drivers/dell_emc/vmax/https.py delete mode 100644 cinder/volume/drivers/dell_emc/vmax/provision_v3.py create mode 100644 cinder/volume/drivers/dell_emc/vmax/rest.py create mode 100644 releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml diff --git a/cinder/opts.py b/cinder/opts.py index 3231be1dddf..6b3a20f47f1 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -290,7 +290,7 @@ def list_opts(): common_opts, cinder_volume_drivers_dell_emc_scaleio_driver.scaleio_opts, cinder_volume_drivers_dell_emc_unity_driver.UNITY_OPTS, - cinder_volume_drivers_dell_emc_vmax_common.emc_opts, + cinder_volume_drivers_dell_emc_vmax_common.vmax_opts, cinder_volume_drivers_dell_emc_vnx_common.VNX_OPTS, cinder_volume_drivers_dell_emc_xtremio.XTREMIO_OPTS, cinder_volume_drivers_disco_disco.disco_opts, diff --git a/cinder/tests/unit/volume/drivers/dell_emc/test_vmax.py b/cinder/tests/unit/volume/drivers/dell_emc/test_vmax.py deleted file mode 100644 index b012f5d9faa..00000000000 --- a/cinder/tests/unit/volume/drivers/dell_emc/test_vmax.py +++ /dev/null @@ -1,10850 +0,0 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ast -import os -import shutil -import sys -import tempfile -import unittest -import uuid -from xml.dom import minidom - -import ddt -import mock -from oslo_utils import units -import six - -from cinder import exception -from cinder.i18n import _ -from cinder.objects import consistencygroup -from cinder.objects import fields -from cinder.objects import qos_specs -from cinder import test -from cinder.tests.unit import fake_constants -from cinder.tests.unit import utils as unit_utils -from cinder import utils as cinder_utils - -from cinder.volume import configuration as conf -from cinder.volume.drivers.dell_emc.vmax import common -from cinder.volume.drivers.dell_emc.vmax import fast -from cinder.volume.drivers.dell_emc.vmax import fc -from cinder.volume.drivers.dell_emc.vmax import iscsi -from cinder.volume.drivers.dell_emc.vmax import masking -from cinder.volume.drivers.dell_emc.vmax import provision -from cinder.volume.drivers.dell_emc.vmax import provision_v3 -from cinder.volume.drivers.dell_emc.vmax import utils -from cinder.volume import volume_types - -CINDER_EMC_CONFIG_DIR = '/etc/cinder/' -utils.JOB_RETRIES = 0 -utils.INTERVAL_10_SEC = 0 - - -class EMC_StorageVolume(dict): - pass - - -class CIM_StorageExtent(dict): - pass - - -class SE_InitiatorMaskingGroup(dict): - pass - - -class SE_ConcreteJob(dict): - pass - - -class SE_StorageHardwareID(dict): - pass - - -class CIM_ReplicationServiceCapabilities(dict): - pass - - -class SYMM_SrpStoragePool(dict): - pass - - -class SYMM_LunMasking(dict): - pass - - -class CIM_DeviceMaskingGroup(dict): - pass - - -class EMC_LunMaskingSCSIProtocolController(dict): - pass - - -class CIM_TargetMaskingGroup(dict): - pass - - -class EMC_StorageHardwareID(dict): - pass - - -class CIM_IPProtocolEndpoint(dict): - pass - - -class Symm_ArrayChassis(dict): - pass - - -class CIM_ConnectivityCollection(dict): - pass - - -class SE_ReplicationSettingData(dict): - def __init__(self, *args, **kwargs): - self['DefaultInstance'] = self.createInstance() - - def createInstance(self): - self.DesiredCopyMethodology = 0 - - -class Fake_CIMProperty(object): - - def fake_getCIMProperty(self): - cimproperty = Fake_CIMProperty() - cimproperty.value = True - return cimproperty - - def fake_getBlockSizeCIMProperty(self): - cimproperty = Fake_CIMProperty() - cimproperty.value = '512' - return cimproperty - - def fake_getConsumableBlocksCIMProperty(self): - cimproperty = Fake_CIMProperty() - cimproperty.value = '12345' - return cimproperty - - def fake_getIsConcatenatedCIMProperty(self): - cimproperty = Fake_CIMProperty() - cimproperty.value = True - return cimproperty - - def fake_getIsCompositeCIMProperty(self): - cimproperty = Fake_CIMProperty() - cimproperty.value = False - return cimproperty - - def fake_getTotalManagedSpaceCIMProperty(self): - cimproperty = Fake_CIMProperty() - cimproperty.value = '20000000000' - return cimproperty - - def fake_getRemainingManagedSpaceCIMProperty(self): - cimproperty = Fake_CIMProperty() - cimproperty.value = '10000000000' - return cimproperty - - def fake_getElementNameCIMProperty(self, name): - cimproperty = Fake_CIMProperty() - cimproperty.value = name - return cimproperty - - def fake_getSupportedReplicationTypes(self): - cimproperty = Fake_CIMProperty() - cimproperty.value = [2, 10] - return cimproperty - - def fake_getipv4address(self): - cimproperty = Fake_CIMProperty() - cimproperty.key = 'IPv4Address' - cimproperty.value = '10.10.10.10' - return cimproperty - - def fake_getiqn(self): - cimproperty = Fake_CIMProperty() - cimproperty.key = 'Name' - cimproperty.value = ( - 'iqn.1992-04.com.emc:600009700bca30c01b9c012000000003,t,0x0001') - return cimproperty - - def fake_getSupportedReplicationTypesCIMProperty(self, reptypes): - cimproperty = Fake_CIMProperty() - if reptypes == 'V3': - cimproperty.value = [6, 7] - elif reptypes == 'V3_SYNC': - cimproperty.value = [6] - elif reptypes == 'V3_ASYNC': - cimproperty.value = [7] - elif reptypes == 'V2': - cimproperty.value = [10] - else: - cimproperty.value = [2, 3, 4, 5] - return cimproperty - - -class Fake_CIM_TierPolicyServiceCapabilities(object): - - def fake_getpolicyinstance(self): - classinstance = Fake_CIM_TierPolicyServiceCapabilities() - - classcimproperty = Fake_CIMProperty() - cimproperty = classcimproperty.fake_getCIMProperty() - - cimproperties = {u'SupportsTieringPolicies': cimproperty} - classinstance.properties = cimproperties - - return classinstance - - -class FakeCIMInstanceName(dict): - - def fake_getinstancename(self, classname, bindings): - instancename = FakeCIMInstanceName() - for key in bindings: - instancename[key] = bindings[key] - instancename.classname = classname - instancename.namespace = 'root/emc' - return instancename - - -class FakeDB(object): - - def volume_update(self, context, volume_id, model_update): - pass - - def volume_get(self, context, volume_id): - conn = FakeEcomConnection() - objectpath = {} - objectpath['CreationClassName'] = 'Symm_StorageVolume' - - if volume_id == 'vol1': - device_id = '1' - objectpath['DeviceID'] = device_id - else: - objectpath['DeviceID'] = volume_id - return conn.GetInstance(objectpath) - - def volume_get_all_by_group(self, context, group_id): - volumes = [] - volumes.append(VMAXCommonData.test_source_volume) - return volumes - - def consistencygroup_get(self, context, cg_group_id): - return VMAXCommonData.test_CG - - def snapshot_get_all_for_cgsnapshot(self, context, cgsnapshot_id): - snapshots = [] - snapshots.append(VMAXCommonData.test_snapshot) - return snapshots - - -class VMAXCommonData(object): - wwpn1 = "123456789012345" - wwpn2 = "123456789054321" - connector = {'ip': '10.0.0.2', - 'initiator': 'iqn.1993-08.org.debian: 01: 222', - 'wwpns': [wwpn1, wwpn2], - 'wwnns': ["223456789012345", "223456789054321"], - 'host': 'fakehost'} - - target_wwns = [wwn[::-1] for wwn in connector['wwpns']] - - fabric_name_prefix = "fakeFabric" - end_point_map = {connector['wwpns'][0]: [target_wwns[0]], - connector['wwpns'][1]: [target_wwns[1]]} - zoning_mappings = {'port_group': None, - 'initiator_group': None, - 'target_wwns': target_wwns, - 'init_targ_map': end_point_map} - device_map = {} - for wwn in connector['wwpns']: - fabric_name = ''.join([fabric_name_prefix, - wwn[-2:]]) - target_wwn = wwn[::-1] - fabric_map = {'initiator_port_wwn_list': [wwn], - 'target_port_wwn_list': [target_wwn] - } - device_map[fabric_name] = fabric_map - - default_storage_group = ( - u'//10.10.10.10/root/emc: SE_DeviceMaskingGroup.InstanceID=' - '"SYMMETRIX+000198700440+OS_default_GOLD1_SG"') - default_sg_instance_name = { - 'CreationClassName': 'CIM_DeviceMaskingGroup', - 'ElementName': 'OS_default_GOLD1_SG', - 'SystemName': 'SYMMETRIX+000195900551'} - sg_instance_name = { - 'CreationClassName': 'CIM_DeviceMaskingGroup', - 'ElementName': 'OS-fakehost-SRP_1-Bronze-DSS-I-SG', - 'SystemName': 'SYMMETRIX+000195900551'} - storage_system = 'SYMMETRIX+000195900551' - storage_system_v3 = 'SYMMETRIX-+-000197200056' - port_group = 'OS-portgroup-PG' - port_group_instance = {'ElementName': 'OS-portgroup-PG'} - lunmaskctrl_id = ( - 'SYMMETRIX+000195900551+OS-fakehost-gold-I-MV') - lunmaskctrl_name = ( - 'OS-fakehost-gold-I-MV') - mv_instance_name = { - 'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'OS-fakehost-SRP_1-Bronze-DSS-I-Mv', - 'SystemName': 'SYMMETRIX+000195900551'} - - rdf_group = 'test_rdf' - srdf_group_instance = ( - '//10.73.28.137/root/emc:Symm_RemoteReplicationCollection.' - 'InstanceID="SYMMETRIX-+-000197200056-+-8-+-000195900551-+-8"') - rg_instance_name = { - 'CreationClassName': 'CIM_DeviceMaskingGroup', - 'ElementName': 'OS-SRP_1-gold-DSS-RE-SG', - 'SystemName': 'SYMMETRIX+000197200056' - } - - initiatorgroup_id = ( - 'SYMMETRIX+000195900551+OS-fakehost-IG') - initiatorgroup_name = 'OS-fakehost-I-IG' - initiatorgroup_creationclass = 'SE_InitiatorMaskingGroup' - iscsi_initiator = 'iqn.1993-08.org.debian' - storageextent_creationclass = 'CIM_StorageExtent' - initiator1 = 'iqn.1993-08.org.debian: 01: 1a2b3c4d5f6g' - stconf_service_creationclass = 'Symm_StorageConfigurationService' - ctrlconf_service_creationclass = 'Symm_ControllerConfigurationService' - elementcomp_service_creationclass = 'Symm_ElementCompositionService' - storreloc_service_creationclass = 'Symm_StorageRelocationService' - replication_service_creationclass = 'EMC_ReplicationService' - vol_creationclass = 'Symm_StorageVolume' - pool_creationclass = 'Symm_VirtualProvisioningPool' - lunmask_creationclass = 'Symm_LunMaskingSCSIProtocolController' - lunmask_creationclass2 = 'Symm_LunMaskingView' - hostedservice_creationclass = 'CIM_HostedService' - policycapability_creationclass = 'CIM_TierPolicyServiceCapabilities' - policyrule_creationclass = 'Symm_TierPolicyRule' - assoctierpolicy_creationclass = 'CIM_StorageTier' - storagepool_creationclass = 'Symm_VirtualProvisioningPool' - srpstoragepool_creationclass = 'Symm_SRPStoragePool' - storagegroup_creationclass = 'CIM_DeviceMaskingGroup' - hardwareid_creationclass = 'EMC_StorageHardwareID' - replicationgroup_creationclass = 'CIM_ReplicationGroup' - storagepoolid = 'SYMMETRIX+000195900551+U+gold' - storagegroupname = 'OS-fakehost-gold-I-SG' - defaultstoragegroupname = 'OS_default_GOLD1_SG' - re_storagegroup = 'OS-SRP_1-gold-DSS-RE-SG' - storagevolume_creationclass = 'EMC_StorageVolume' - policyrule = 'gold' - poolname = 'gold' - totalmanagedspace_bits = '1000000000000' - subscribedcapacity_bits = '500000000000' - remainingmanagedspace_bits = '500000000000' - maxsubscriptionpercent = 150 - totalmanagedspace_gbs = 931 - subscribedcapacity_gbs = 465 - remainingmanagedspace_gbs = 465 - fake_host = 'HostX@Backend#gold+1234567891011' - fake_host_v3 = 'HostX@Backend#Bronze+SRP_1+1234567891011' - fake_host_2_v3 = 'HostY@Backend#SRP_1+1234567891011' - fake_host_3_v3 = 'HostX@Backend#Bronze+DSS+SRP_1+1234567891011' - fake_host_4_v3 = 'HostX@Backend#Silver+None+SRP_1+1234567891011' - poolInstanceName = { - 'InstanceID': 'SRP_1', - 'CreationClassName': 'Symm_StorageSystem'} - - unit_creationclass = 'CIM_ProtocolControllerForUnit' - storage_type = 'gold' - keybindings = {'CreationClassName': u'Symm_StorageVolume', - 'SystemName': u'SYMMETRIX+000195900551', - 'DeviceID': u'1', - 'SystemCreationClassName': u'Symm_StorageSystem'} - - keybindings2 = {'CreationClassName': u'Symm_StorageVolume', - 'SystemName': u'SYMMETRIX+000195900551', - 'DeviceID': u'99999', - 'SystemCreationClassName': u'Symm_StorageSystem'} - keybindings3 = {'CreationClassName': u'Symm_StorageVolume', - 'SystemName': u'SYMMETRIX+000195900551', - 'DeviceID': u'10', - 'SystemCreationClassName': u'Symm_StorageSystem'} - re_keybindings = {'CreationClassName': u'Symm_StorageVolume', - 'SystemName': u'SYMMETRIX+000195900551', - 'DeviceID': u'1', - 'SystemCreationClassName': u'Symm_StorageSystem'} - provider_location = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings, - 'version': '2.5.0'} - provider_location2 = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings2} - provider_location3 = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings3} - provider_location_multi_pool = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings, - 'version': '2.2.0'} - - keybindings_manage = {'CreationClassName': 'Symm_StorageVolume', - 'SystemName': 'SYMMETRIX+000195900551', - 'DeviceID': '10', - 'SystemCreationClassName': 'Symm_StorageSystem'} - provider_location_manage = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings_manage} - - manage_vol = EMC_StorageVolume() - manage_vol['CreationClassName'] = 'Symm_StorageVolume' - manage_vol['ElementName'] = 'OS-Test_Manage_vol' - manage_vol['DeviceID'] = '10' - manage_vol['SystemName'] = 'SYMMETRIX+000195900551' - manage_vol['SystemCreationClassName'] = 'Symm_StorageSystem' - manage_vol.path = manage_vol - - replication_driver_data = re_keybindings - block_size = 512 - majorVersion = 1 - minorVersion = 2 - revNumber = 3 - block_size = 512 - - metaHead_volume = {'DeviceID': 10, - 'ConsumableBlocks': 1000} - meta_volume1 = {'DeviceID': 11, - 'ConsumableBlocks': 200} - meta_volume2 = {'DeviceID': 12, - 'ConsumableBlocks': 300} - properties = {'ConsumableBlocks': '12345', - 'BlockSize': '512'} - - array = '000197800123' - array_v3 = '1234567891011' - - test_volume = {'name': 'vol1', - 'size': 1, - 'volume_name': 'vol1', - 'id': '1', - 'device_id': '1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol1', - 'display_description': 'test volume', - 'volume_type_id': 'abc', - 'provider_location': six.text_type(provider_location), - 'status': 'available', - 'host': fake_host, - 'NumberOfBlocks': 100, - 'BlockSize': block_size - } - - test_volume_v2 = {'name': 'vol2', - 'size': 1, - 'volume_name': 'vol2', - 'id': '2', - 'device_id': '1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol1', - 'display_description': 'test volume', - 'volume_type_id': 'abc', - 'provider_location': six.text_type(provider_location), - 'status': 'available', - 'host': fake_host, - 'NumberOfBlocks': 100, - 'BlockSize': block_size - } - - test_volume_v3 = {'name': 'vol3', - 'size': 1, - 'volume_name': 'vol3', - 'id': '3', - 'device_id': '1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol1', - 'display_description': 'test volume', - 'volume_type_id': 'abc', - 'provider_location': six.text_type(provider_location), - 'status': 'available', - 'host': fake_host_v3, - 'NumberOfBlocks': 100, - 'BlockSize': block_size - } - - test_volume_v4 = {'name': 'vol1', - 'size': 1, - 'volume_name': 'vol1', - 'id': '1', - 'device_id': '1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol1', - 'display_description': 'test volume', - 'volume_type_id': 'abc', - 'provider_location': six.text_type(provider_location), - 'status': 'available', - 'host': fake_host_3_v3, - 'NumberOfBlocks': 100, - 'BlockSize': block_size, - 'pool_name': 'Bronze+DSS+SRP_1+1234567891011' - } - - test_volume_CG = {'name': 'volInCG', - 'consistencygroup_id': 'abc', - 'size': 1, - 'volume_name': 'volInCG', - 'id': fake_constants.CONSISTENCY_GROUP2_ID, - 'device_id': '1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'volInCG', - 'display_description': - 'test volume in Consistency group', - 'volume_type_id': 'abc', - 'provider_location': six.text_type(provider_location), - 'status': 'available', - 'host': fake_host - } - - test_volume_CG_v3 = consistencygroup.ConsistencyGroup( - context=None, name='volInCG', consistencygroup_id='abc', size=1, - volume_name='volInCG', id=fake_constants.CONSISTENCY_GROUP2_ID, - device_id='1', status='available', - provider_auth=None, volume_type_id='abc', project_id='project', - display_name='volInCG', - display_description='test volume in Consistency group', - host=fake_host_v3, provider_location=six.text_type(provider_location)) - - test_volume_type_QOS = qos_specs.QualityOfServiceSpecs( - id=fake_constants.QOS_SPEC_ID, - name='qosName', - consumer=fields.QoSConsumerValues.BACK_END, - specs={'maxIOPS': '6000', 'maxMBPS': '6000', - 'DistributionType': 'Always'} - ) - - test_failed_volume = {'name': 'failed_vol', - 'size': 1, - 'volume_name': 'failed_vol', - 'id': '4', - 'device_id': '1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'failed_vol', - 'display_description': 'test failed volume', - 'volume_type_id': 'abc', - 'host': fake_host} - - failed_delete_vol = {'name': 'failed_delete_vol', - 'size': '-1', - 'volume_name': 'failed_delete_vol', - 'id': '99999', - 'device_id': '99999', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'failed delete vol', - 'display_description': 'failed delete volume', - 'volume_type_id': 'abc', - 'provider_location': - six.text_type(provider_location2), - 'host': fake_host} - - test_source_volume = {'size': 1, - 'volume_type_id': 'sourceid', - 'display_name': 'sourceVolume', - 'name': 'sourceVolume', - 'device_id': '10', - 'volume_name': 'vmax-154326', - 'provider_auth': None, - 'project_id': 'project', - 'id': '2', - 'host': fake_host, - 'NumberOfBlocks': 100, - 'BlockSize': block_size, - 'provider_location': - six.text_type(provider_location3), - 'display_description': 'snapshot source volume'} - - test_source_volume_v3 = {'size': 1, - 'volume_type_id': 'sourceid', - 'display_name': 'sourceVolume', - 'name': 'sourceVolume', - 'device_id': '10', - 'volume_name': 'vmax-154326', - 'provider_auth': None, - 'project_id': 'project', - 'id': '2', - 'host': fake_host_v3, - 'NumberOfBlocks': 100, - 'BlockSize': block_size, - 'provider_location': - six.text_type(provider_location3), - 'display_description': 'snapshot source volume'} - - test_source_volume_1_v3 = {'size': 1, - 'volume_type_id': 'sourceid', - 'display_name': 'sourceVolume', - 'name': 'sourceVolume', - 'id': 'sourceVolume', - 'device_id': '10', - 'volume_name': 'vmax-154326', - 'provider_auth': None, - 'project_id': 'project', - 'host': fake_host_4_v3, - 'NumberOfBlocks': 100, - 'BlockSize': block_size, - 'provider_location': - six.text_type(provider_location), - 'display_description': 'snapshot source volume'} - - test_volume_re = {'name': 'vol1', - 'size': 1, - 'volume_name': 'vol1', - 'id': '1', - 'device_id': '1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol1', - 'display_description': 'test volume', - 'volume_type_id': 'abc', - 'provider_location': six.text_type( - provider_location), - 'status': 'available', - 'replication_status': fields.ReplicationStatus.ENABLED, - 'host': fake_host, - 'NumberOfBlocks': 100, - 'BlockSize': block_size, - 'replication_driver_data': six.text_type( - replication_driver_data)} - - test_failed_re_volume = {'name': 'vol1', - 'size': 1, - 'volume_name': 'vol1', - 'id': '1', - 'device_id': '1', - 'display_name': 'vol1', - 'volume_type_id': 'abc', - 'provider_location': six.text_type( - {'keybindings': 'fake_keybindings'}), - 'replication_status': ( - fields.ReplicationStatus.ENABLED), - 'replication_driver_data': 'fake_data', - 'host': fake_host, - 'NumberOfBlocks': 100, - 'BlockSize': block_size - } - - test_snapshot_re = {'name': 'mySnap', - 'id': '1', - 'status': 'available', - 'host': fake_host, - 'volume': test_source_volume, - 'provider_location': six.text_type(provider_location)} - - test_CG = consistencygroup.ConsistencyGroup( - context=None, name='myCG1', id=fake_constants.UUID1, - volume_type_id='abc', status=fields.ConsistencyGroupStatus.AVAILABLE) - source_CG = consistencygroup.ConsistencyGroup( - context=None, name='myCG1', id='12345abcde', - volume_type_id='sourceid', - status=fields.ConsistencyGroupStatus.AVAILABLE) - - deleted_volume = {'id': 'deleted_vol', - 'provider_location': six.text_type(provider_location)} - - test_snapshot = {'name': 'myCG1', - 'id': fake_constants.UUID1, - 'status': 'available', - 'host': fake_host, - 'volume': test_source_volume, - 'provider_location': six.text_type(provider_location) - } - test_snapshot_v3 = {'name': 'myCG1', - 'id': fake_constants.UUID1, - 'status': 'available', - 'host': fake_host_v3, - 'volume': test_source_volume_v3, - 'provider_location': six.text_type(provider_location) - } - test_snapshot_1_v3 = {'name': 'mySnap', - 'id': '1', - 'status': 'available', - 'host': fake_host_4_v3, - 'volume': test_source_volume_1_v3, - 'provider_location': six.text_type(provider_location) - } - test_CG_snapshot = {'name': 'testSnap', - 'id': fake_constants.UUID1, - 'consistencygroup_id': fake_constants.UUID1, - 'status': 'available', - 'snapshots': [], - 'consistencygroup': test_CG - } - location_info = {'location_info': '000195900551#silver#None', - 'storage_protocol': 'ISCSI'} - location_info_v3 = {'location_info': '1234567891011#SRP_1#Bronze#DSS', - 'storage_protocol': 'FC'} - test_host = {'capabilities': location_info, - 'host': 'fake_host'} - test_host_v3 = {'capabilities': location_info_v3, - 'host': fake_host_2_v3} - test_host_1_v3 = {'capabilities': location_info_v3, - 'host': fake_host_4_v3} - initiatorNames = ["123456789012345", "123456789054321"] - storagegroups = [{'CreationClassName': storagegroup_creationclass, - 'ElementName': storagegroupname}, - {'CreationClassName': storagegroup_creationclass, - 'ElementName': 'OS-SRP_1-Bronze-DSS-SG'}] - iqn = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001,t,0x0001' - iscsi_device_info = {'maskingview': u'OS-host-SRP_1-Diamond-NONE-MV', - 'ip_and_iqn': [{'ip': u'123.456.7.8', - 'iqn': iqn}], - 'is_multipath': False, - 'storagesystem': u'SYMMETRIX-+-012345678901', - 'controller': {'host': '10.00.00.00'}, - 'hostlunid': 3} - fc_device_info = {'maskingview': u'OS-host-SRP_1-Diamond-NONE-MV', - 'storagesystem': u'SYMMETRIX-+-012345678901', - 'controller': {'host': '10.00.00.00'}, - 'hostlunid': 3} - test_ctxt = {} - new_type = {'extra_specs': {}} - diff = {} - extra_specs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'V3_BE', - 'storagetype:workload': u'DSS', - 'storagetype:slo': u'Bronze', - 'storagetype:array': u'1234567891011', - 'MultiPoolSupport': False, - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG'} - extra_specs_no_slo = {'storagetype:pool': 'SRP_1', - 'volume_backend_name': 'V3_BE', - 'storagetype:workload': None, - 'storagetype:slo': None, - 'storagetype:array': '1234567891011', - 'isV3': True, - 'portgroupname': 'OS-portgroup-PG'} - - multi_pool_extra_specs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'MULTI_POOL_BE', - 'storagetype:workload': u'DSS', - 'storagetype:slo': u'Bronze', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'pool_name': u'Bronze+DSS+SRP_1+1234567891011'} - - extra_specs_is_re = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'VMAXReplication', - 'storagetype:workload': u'DSS', - 'storagetype:slo': u'Bronze', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'replication_enabled': True, - 'MultiPoolSupport': False} - - remainingSLOCapacity = '123456789' - SYNCHRONIZED = 4 - UNSYNCHRONIZED = 3 - multiPoolSupportEnabled = True - - -class FakeLookupService(object): - def get_device_mapping_from_network(self, initiator_wwns, target_wwns): - return VMAXCommonData.device_map - - -class FakeEcomConnection(object): - - def __init__(self, *args, **kwargs): - self.data = VMAXCommonData() - - def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None, - ElementType=None, Size=None, - SyncType=None, SourceElement=None, TargetElement=None, - Operation=None, Synchronization=None, - TheElements=None, TheElement=None, - LUNames=None, InitiatorPortIDs=None, DeviceAccesses=None, - ProtocolControllers=None, ConnectivityCollection=None, - MaskingGroup=None, Members=None, - HardwareId=None, ElementSource=None, EMCInPools=None, - CompositeType=None, EMCNumberOfMembers=None, - EMCBindElements=None, Mode=None, - InElements=None, TargetPool=None, RequestedState=None, - ReplicationGroup=None, ReplicationType=None, - ReplicationSettingData=None, GroupName=None, Force=None, - RemoveElements=None, RelationshipName=None, - SourceGroup=None, TargetGroup=None, Goal=None, - Type=None, EMCSRP=None, EMCSLO=None, EMCWorkload=None, - EMCCollections=None, InitiatorMaskingGroup=None, - DeviceMaskingGroup=None, TargetMaskingGroup=None, - ProtocolController=None, StorageID=None, IDType=None, - WaitForCopyState=None, Collections=None): - - rc = 0 - myjob = SE_ConcreteJob() - myjob.classname = 'SE_ConcreteJob' - myjob['InstanceID'] = '9999' - myjob['status'] = 'success' - myjob['type'] = ElementName - - if Size == -1073741824 and ( - MethodName == 'CreateOrModifyCompositeElement'): - rc = 0 - myjob = SE_ConcreteJob() - myjob.classname = 'SE_ConcreteJob' - myjob['InstanceID'] = '99999' - myjob['status'] = 'success' - myjob['type'] = 'failed_delete_vol' - - if ElementName == 'failed_vol' and ( - MethodName == 'CreateOrModifyElementFromStoragePool'): - rc = 10 - myjob['status'] = 'failure' - - elif TheElements and TheElements[0]['DeviceID'] == '99999' and ( - MethodName == 'ReturnElementsToStoragePool'): - rc = 10 - myjob['status'] = 'failure' - elif HardwareId: - rc = 0 - targetendpoints = {} - endpoints = [] - endpoint = {} - endpoint['Name'] = (VMAXCommonData.end_point_map[ - VMAXCommonData.connector['wwpns'][0]]) - endpoints.append(endpoint) - endpoint2 = {} - endpoint2['Name'] = (VMAXCommonData.end_point_map[ - VMAXCommonData.connector['wwpns'][1]]) - endpoints.append(endpoint2) - targetendpoints['TargetEndpoints'] = endpoints - return rc, targetendpoints - elif ReplicationType and ( - MethodName == 'GetDefaultReplicationSettingData'): - rc = 0 - rsd = SE_ReplicationSettingData() - rsd['DefaultInstance'] = SE_ReplicationSettingData() - return rc, rsd - if MethodName == 'CreateStorageHardwareID': - ret = {} - rc = 0 - ret['HardwareID'] = self.data.iscsi_initiator - return rc, ret - if MethodName == 'GetSupportedSizeRange': - ret = {} - rc = 0 - ret['EMCInformationSource'] = 3 - ret['EMCRemainingSLOCapacity'] = self.data.remainingSLOCapacity - return rc, ret - elif MethodName == 'GetCompositeElements': - ret = {} - rc = 0 - ret['OutElements'] = [self.data.metaHead_volume, - self.data.meta_volume1, - self.data.meta_volume2] - return rc, ret - if (MethodName == 'CreateGroup' and - GroupName == self.data.initiatorgroup_name): - rc = 0 - job = {} - job['MaskingGroup'] = GroupName - return rc, job - if MethodName == 'CreateGroup' and GroupName == 'IG_unsuccessful': - rc = 10 - job = {} - job['status'] = 'failure' - return rc, job - - job = {'Job': myjob} - return rc, job - - def EnumerateInstanceNames(self, name): - result = None - if name == 'EMC_StorageConfigurationService': - result = self._enum_stconfsvcs() - elif name == 'EMC_ControllerConfigurationService': - result = self._enum_ctrlconfsvcs() - elif name == 'Symm_ElementCompositionService': - result = self._enum_elemcompsvcs() - elif name == 'Symm_StorageRelocationService': - result = self._enum_storrelocsvcs() - elif name == 'EMC_ReplicationService': - result = self._enum_replicsvcs() - elif name == 'EMC_VirtualProvisioningPool': - result = self._enum_pools() - elif name == 'EMC_StorageVolume': - result = self._enum_storagevolumes() - elif name == 'Symm_StorageVolume': - result = self._enum_storagevolumes() - elif name == 'CIM_StorageVolume': - result = self._enum_storagevolumes() - elif name == 'CIM_ProtocolControllerForUnit': - result = self._enum_unitnames() - elif name == 'EMC_LunMaskingSCSIProtocolController': - result = self._enum_lunmaskctrls() - elif name == 'EMC_StorageProcessorSystem': - result = self._enum_processors() - elif name == 'EMC_StorageHardwareIDManagementService': - result = self._enum_hdwidmgmts() - elif name == 'SE_StorageHardwareID': - result = self._enum_storhdwids() - elif name == 'EMC_StorageSystem': - result = self._enum_storagesystems() - elif name == 'Symm_TierPolicyRule': - result = self._enum_policyrules() - elif name == 'CIM_ReplicationServiceCapabilities': - result = self._enum_repservcpbls() - elif name == 'SE_StorageSynchronized_SV_SV': - result = self._enum_storageSyncSvSv() - elif name == 'Symm_SRPStoragePool': - result = self._enum_srpstoragepool() - elif name == 'Symm_ArrayChassis': - result = self._enum_arraychassis() - else: - result = self._default_enum() - return result - - def EnumerateInstances(self, name): - result = None - if name == 'EMC_VirtualProvisioningPool': - result = self._enum_pool_details() - elif name == 'SE_StorageHardwareID': - result = self._enum_storhdwids() - elif name == 'SE_ManagementServerSoftwareIdentity': - result = self._enum_sw_identity() - else: - result = self._default_enum() - return result - - def GetInstance(self, objectpath, LocalOnly=False): - try: - name = objectpath['CreationClassName'] - except KeyError: - name = objectpath.classname - result = None - if name == 'Symm_StorageVolume': - result = self._getinstance_storagevolume(objectpath) - elif name == 'CIM_ProtocolControllerForUnit': - result = self._getinstance_unit(objectpath) - elif name == 'SE_ConcreteJob': - result = self._getinstance_job(objectpath) - elif name == 'SE_StorageSynchronized_SV_SV': - result = self._getinstance_syncsvsv(objectpath) - elif name == 'Symm_TierPolicyServiceCapabilities': - result = self._getinstance_policycapabilities(objectpath) - elif name == 'CIM_TierPolicyServiceCapabilities': - result = self._getinstance_policycapabilities(objectpath) - elif name == 'SE_InitiatorMaskingGroup': - result = self._getinstance_initiatormaskinggroup(objectpath) - elif name == 'CIM_InitiatorMaskingGroup': - result = self._getinstance_initiatormaskinggroup(objectpath) - elif name == 'SE_StorageHardwareID': - result = self._getinstance_storagehardwareid(objectpath) - elif name == 'CIM_ReplicationGroup': - result = self._getinstance_replicationgroup(objectpath) - elif name == 'Symm_SRPStoragePool': - result = self._getinstance_srpstoragepool(objectpath) - elif name == 'CIM_TargetMaskingGroup': - result = self._getinstance_targetmaskinggroup(objectpath) - elif name == 'CIM_DeviceMaskingGroup': - result = self._getinstance_devicemaskinggroup(objectpath) - elif name == 'EMC_StorageHardwareID': - result = self._getinstance_storagehardwareid(objectpath) - elif name == 'Symm_VirtualProvisioningPool': - result = self._getinstance_pool(objectpath) - elif name == 'Symm_ReplicationServiceCapabilities': - result = self._getinstance_replicationServCapabilities(objectpath) - else: - result = self._default_getinstance(objectpath) - - return result - - def ModifyInstance(self, objectpath, PropertyList=None): - pass - - def DeleteInstance(self, objectpath): - pass - - def Associators(self, objectpath, ResultClass='EMC_StorageHardwareID'): - result = None - if '_StorageHardwareID' in ResultClass: - result = self._assoc_hdwid() - elif ResultClass == 'EMC_iSCSIProtocolEndpoint': - result = self._assoc_endpoint() - elif ResultClass == 'EMC_StorageVolume': - result = self._assoc_storagevolume(objectpath) - elif ResultClass == 'Symm_LunMaskingView': - result = self._assoc_maskingview() - elif ResultClass == 'CIM_DeviceMaskingGroup': - result = self._assoc_storagegroup() - elif ResultClass == 'CIM_StorageExtent': - result = self._assoc_storageextent() - elif ResultClass == 'EMC_LunMaskingSCSIProtocolController': - result = self._assoc_lunmaskctrls() - elif ResultClass == 'CIM_TargetMaskingGroup': - result = self._assoc_portgroup() - elif ResultClass == 'CIM_ConnectivityCollection': - result = self._assoc_rdfgroup() - else: - result = self._default_assoc(objectpath) - return result - - def AssociatorNames(self, objectpath, - ResultClass='default', AssocClass='default'): - result = None - if objectpath == 'point_to_storage_instance_names': - result = ['FirstStorageTierInstanceNames'] - - if ResultClass != 'default': - result = self.ResultClassHelper(ResultClass, objectpath) - - if result is None and AssocClass != 'default': - result = self.AssocClassHelper(AssocClass, objectpath) - if result is None: - result = self._default_assocnames(objectpath) - return result - - def AssocClassHelper(self, AssocClass, objectpath): - if AssocClass == 'CIM_HostedService': - result = self._assocnames_hostedservice() - elif AssocClass == 'CIM_AssociatedTierPolicy': - result = self._assocnames_assoctierpolicy() - elif AssocClass == 'CIM_OrderedMemberOfCollection': - result = self._enum_storagevolumes() - elif AssocClass == 'CIM_BindsTo': - result = self._assocnames_bindsto() - elif AssocClass == 'CIM_MemberOfCollection': - result = self._assocnames_memberofcollection() - else: - result = None - return result - - def ResultClassHelper(self, ResultClass, objectpath): - if ResultClass == 'EMC_LunMaskingSCSIProtocolController': - result = self._assocnames_lunmaskctrl() - elif ResultClass == 'CIM_TierPolicyServiceCapabilities': - result = self._assocnames_policyCapabilities() - elif ResultClass == 'Symm_TierPolicyRule': - result = self._assocnames_policyrule() - elif ResultClass == 'CIM_StoragePool': - result = self._assocnames_storagepool() - elif ResultClass == 'EMC_VirtualProvisioningPool': - result = self._assocnames_storagepool() - elif ResultClass == 'CIM_DeviceMaskingGroup': - result = self._assocnames_storagegroup() - elif ResultClass == 'EMC_StorageVolume': - result = self._enum_storagevolumes() - elif ResultClass == 'Symm_StorageVolume': - result = self._enum_storagevolumes() - elif ResultClass == 'SE_InitiatorMaskingGroup': - result = self._enum_initiatorMaskingGroup() - elif ResultClass == 'CIM_InitiatorMaskingGroup': - result = self._enum_initiatorMaskingGroup() - elif ResultClass == 'CIM_StorageExtent': - result = self._enum_storage_extent() - elif ResultClass == 'SE_StorageHardwareID': - result = self._enum_storhdwids() - elif ResultClass == 'CIM_ReplicationServiceCapabilities': - result = self._enum_repservcpbls() - elif ResultClass == 'CIM_ReplicationGroup': - result = self._enum_repgroups() - elif ResultClass == 'Symm_FCSCSIProtocolEndpoint': - result = self._enum_fcscsiendpoint() - elif ResultClass == 'EMC_FCSCSIProtocolEndpoint': - result = self._enum_fcscsiendpoint() - elif ResultClass == 'Symm_SRPStoragePool': - result = self._enum_srpstoragepool() - elif ResultClass == 'Symm_StoragePoolCapabilities': - result = self._enum_storagepoolcapabilities() - elif ResultClass == 'CIM_storageSetting': - result = self._enum_storagesettings() - elif ResultClass == 'CIM_TargetMaskingGroup': - result = self._assocnames_portgroup() - elif ResultClass == 'CIM_InitiatorMaskingGroup': - result = self._enum_initMaskingGroup() - elif ResultClass == 'Symm_LunMaskingView': - result = self._enum_maskingView() - elif ResultClass == 'EMC_Meta': - result = self._enum_metavolume() - elif ResultClass == 'EMC_FrontEndSCSIProtocolController': - result = self._enum_maskingView() - elif ResultClass == 'CIM_TierPolicyRule': - result = self._assocnames_tierpolicy(objectpath) - else: - result = None - return result - - def ReferenceNames(self, objectpath, - ResultClass='CIM_ProtocolControllerForUnit'): - result = None - if ResultClass == 'CIM_ProtocolControllerForUnit': - result = self._ref_unitnames2() - elif ResultClass == 'SE_StorageSynchronized_SV_SV': - result = self._enum_storageSyncSvSv() - else: - result = self._default_ref(objectpath) - return result - - def _ref_unitnames(self): - unitnames = [] - unitname = {} - - dependent = {} - dependent['CreationClassName'] = self.data.vol_creationclass - dependent['DeviceID'] = self.data.test_volume['id'] - dependent['ElementName'] = self.data.test_volume['name'] - dependent['SystemName'] = self.data.storage_system - - antecedent = {} - antecedent['CreationClassName'] = self.data.lunmask_creationclass - antecedent['DeviceID'] = self.data.lunmaskctrl_id - antecedent['SystemName'] = self.data.storage_system - - unitname['Dependent'] = dependent - unitname['Antecedent'] = antecedent - unitname['CreationClassName'] = self.data.unit_creationclass - unitnames.append(unitname) - - return unitnames - - def mv_entry(self, mvname): - unitname = {} - - dependent = {} - dependent['CreationClassName'] = self.data.vol_creationclass - dependent['DeviceID'] = self.data.test_volume['id'] - dependent['ElementName'] = self.data.test_volume['name'] - dependent['SystemName'] = self.data.storage_system - - antecedent = SYMM_LunMasking() - antecedent['CreationClassName'] = self.data.lunmask_creationclass2 - antecedent['SystemName'] = self.data.storage_system - antecedent['ElementName'] = mvname - - classcimproperty = Fake_CIMProperty() - elementName = ( - classcimproperty.fake_getElementNameCIMProperty(mvname)) - properties = {u'ElementName': elementName} - antecedent.properties = properties - - unitname['Dependent'] = dependent - unitname['Antecedent'] = antecedent - unitname['CreationClassName'] = self.data.unit_creationclass - return unitname - - def _ref_unitnames2(self): - unitnames = [] - unitname = self.mv_entry('OS-myhost-MV') - unitnames.append(unitname) - - # Second masking - unitname2 = self.mv_entry('OS-fakehost-MV') - unitnames.append(unitname2) - - # third masking - amended = 'OS-rslong493156848e71b072a17c1c4625e45f75-MV' - unitname3 = self.mv_entry(amended) - unitnames.append(unitname3) - return unitnames - - def _default_ref(self, objectpath): - return objectpath - - def _assoc_hdwid(self): - assocs = [] - assoc = EMC_StorageHardwareID() - assoc['StorageID'] = self.data.connector['initiator'] - assoc['SystemName'] = self.data.storage_system - assoc['CreationClassName'] = 'EMC_StorageHardwareID' - assoc.path = assoc - assocs.append(assoc) - for wwpn in self.data.connector['wwpns']: - assoc2 = EMC_StorageHardwareID() - assoc2['StorageID'] = wwpn - assoc2['SystemName'] = self.data.storage_system - assoc2['CreationClassName'] = 'EMC_StorageHardwareID' - assoc2.path = assoc2 - assocs.append(assoc2) - assocs.append(assoc) - return assocs - - def _assoc_endpoint(self): - assocs = [] - assoc = {} - assoc['Name'] = 'iqn.1992-04.com.emc: 50000973f006dd80' - assoc['SystemName'] = self.data.storage_system - assocs.append(assoc) - return assocs - - def _assoc_storagegroup(self): - assocs = [] - assoc1 = CIM_DeviceMaskingGroup() - assoc1['ElementName'] = self.data.storagegroupname - assoc1['SystemName'] = self.data.storage_system - assoc1['CreationClassName'] = 'CIM_DeviceMaskingGroup' - assoc1.path = assoc1 - assocs.append(assoc1) - assoc2 = CIM_DeviceMaskingGroup() - assoc2['ElementName'] = self.data.defaultstoragegroupname - assoc2['SystemName'] = self.data.storage_system - assoc2['CreationClassName'] = 'CIM_DeviceMaskingGroup' - assoc2.path = assoc2 - assocs.append(assoc2) - return assocs - - def _assoc_portgroup(self): - assocs = [] - assoc = CIM_TargetMaskingGroup() - assoc['ElementName'] = self.data.port_group - assoc['SystemName'] = self.data.storage_system - assoc['CreationClassName'] = 'CIM_TargetMaskingGroup' - assoc.path = assoc - assocs.append(assoc) - return assocs - - def _assoc_lunmaskctrls(self): - ctrls = [] - ctrl = EMC_LunMaskingSCSIProtocolController() - ctrl['CreationClassName'] = self.data.lunmask_creationclass - ctrl['DeviceID'] = self.data.lunmaskctrl_id - ctrl['SystemName'] = self.data.storage_system - ctrl['ElementName'] = self.data.lunmaskctrl_name - ctrl.path = ctrl - ctrls.append(ctrl) - return ctrls - - def _assoc_maskingview(self): - assocs = [] - assoc = SYMM_LunMasking() - assoc['Name'] = 'myMaskingView' - assoc['SystemName'] = self.data.storage_system - assoc['CreationClassName'] = 'Symm_LunMaskingView' - assoc['DeviceID'] = '1234' - assoc['SystemCreationClassName'] = '1234' - assoc['ElementName'] = 'OS-fakehost-gold-I-MV' - assoc.classname = assoc['CreationClassName'] - assoc.path = assoc - assocs.append(assoc) - return assocs - - # Added test for EMC_StorageVolume associators - def _assoc_storagevolume(self, objectpath): - assocs = [] - if 'type' not in objectpath: - vol = self.data.test_volume - elif objectpath['type'] == 'failed_delete_vol': - vol = self.data.failed_delete_vol - elif objectpath['type'] == 'vol1': - vol = self.data.test_volume - elif objectpath['type'] == 'volInCG': - vol = self.data.test_volume_CG - elif objectpath['type'] == 'appendVolume': - vol = self.data.test_volume - elif objectpath['type'] == 'failed_vol': - vol = self.data.test_failed_volume - else: - vol = self.data.test_volume - - vol['DeviceID'] = vol['device_id'] - assoc = self._getinstance_storagevolume(vol) - - assocs.append(assoc) - return assocs - - def _assoc_storageextent(self): - assocs = [] - assoc = CIM_StorageExtent() - assoc['Name'] = 'myStorageExtent' - assoc['SystemName'] = self.data.storage_system - assoc['CreationClassName'] = 'CIM_StorageExtent' - assoc.classname = assoc['CreationClassName'] - assoc.path = assoc - classcimproperty = Fake_CIMProperty() - isConcatenatedcimproperty = ( - classcimproperty.fake_getIsCompositeCIMProperty()) - properties = {u'IsConcatenated': isConcatenatedcimproperty} - assoc.properties = properties - assocs.append(assoc) - return assocs - - def _assoc_rdfgroup(self): - assocs = [] - assoc = CIM_ConnectivityCollection() - assoc['ElementName'] = self.data.rdf_group - assoc.path = self.data.srdf_group_instance - assocs.append(assoc) - return assocs - - def _default_assoc(self, objectpath): - return objectpath - - def _assocnames_lunmaskctrl(self): - return self._enum_lunmaskctrls() - - def _assocnames_hostedservice(self): - return self._enum_hostedservice() - - def _assocnames_policyCapabilities(self): - return self._enum_policycapabilities() - - def _assocnames_policyrule(self): - return self._enum_policyrules() - - def _assocnames_assoctierpolicy(self): - return self._enum_assoctierpolicy() - - def _assocnames_storagepool(self): - return self._enum_storagepool() - - def _assocnames_storagegroup(self): - return self._enum_storagegroup() - - def _assocnames_storagevolume(self): - return self._enum_storagevolume() - - def _assocnames_portgroup(self): - return self._enum_portgroup() - - def _assocnames_memberofcollection(self): - return self._enum_hostedservice() - - def _assocnames_bindsto(self): - return self._enum_ipprotocolendpoint() - - def _default_assocnames(self, objectpath): - return objectpath - - def _getinstance_storagevolume(self, objectpath): - foundinstance = None - instance = EMC_StorageVolume() - vols = self._enum_storagevolumes() - - for vol in vols: - if vol['DeviceID'] == objectpath['DeviceID']: - instance = vol - break - if not instance: - foundinstance = None - else: - foundinstance = instance - - return foundinstance - - def _getinstance_lunmask(self): - lunmask = {} - lunmask['CreationClassName'] = self.data.lunmask_creationclass - lunmask['DeviceID'] = self.data.lunmaskctrl_id - lunmask['SystemName'] = self.data.storage_system - return lunmask - - def _getinstance_initiatormaskinggroup(self, objectpath): - - initiatorgroup = SE_InitiatorMaskingGroup() - initiatorgroup['CreationClassName'] = ( - self.data.initiatorgroup_creationclass) - initiatorgroup['DeviceID'] = self.data.initiatorgroup_id - initiatorgroup['SystemName'] = self.data.storage_system - initiatorgroup['ElementName'] = self.data.initiatorgroup_name - initiatorgroup.path = initiatorgroup - return initiatorgroup - - def _getinstance_storagehardwareid(self, objectpath): - hardwareid = SE_StorageHardwareID() - hardwareid['CreationClassName'] = self.data.hardwareid_creationclass - hardwareid['SystemName'] = self.data.storage_system - hardwareid['StorageID'] = self.data.connector['wwpns'][0] - hardwareid.path = hardwareid - return hardwareid - - def _getinstance_pool(self, objectpath): - pool = {} - pool['CreationClassName'] = 'Symm_VirtualProvisioningPool' - pool['ElementName'] = self.data.poolname - pool['SystemName'] = self.data.storage_system - pool['TotalManagedSpace'] = self.data.totalmanagedspace_bits - pool['EMCSubscribedCapacity'] = self.data.subscribedcapacity_bits - pool['RemainingManagedSpace'] = self.data.remainingmanagedspace_bits - pool['EMCMaxSubscriptionPercent'] = self.data.maxsubscriptionpercent - return pool - - def _getinstance_replicationgroup(self, objectpath): - replicationgroup = {} - replicationgroup['CreationClassName'] = ( - self.data.replicationgroup_creationclass) - replicationgroup['ElementName'] = fake_constants.UUID1 - return replicationgroup - - def _getinstance_srpstoragepool(self, objectpath): - srpstoragepool = SYMM_SrpStoragePool() - srpstoragepool['CreationClassName'] = ( - self.data.srpstoragepool_creationclass) - srpstoragepool['ElementName'] = 'SRP_1' - - classcimproperty = Fake_CIMProperty() - totalManagedSpace = ( - classcimproperty.fake_getTotalManagedSpaceCIMProperty()) - remainingManagedSpace = ( - classcimproperty.fake_getRemainingManagedSpaceCIMProperty()) - properties = {u'TotalManagedSpace': totalManagedSpace, - u'RemainingManagedSpace': remainingManagedSpace} - srpstoragepool.properties = properties - return srpstoragepool - - def _getinstance_targetmaskinggroup(self, objectpath): - targetmaskinggroup = CIM_TargetMaskingGroup() - targetmaskinggroup['CreationClassName'] = 'CIM_TargetMaskingGroup' - targetmaskinggroup['ElementName'] = self.data.port_group - targetmaskinggroup.path = targetmaskinggroup - return targetmaskinggroup - - def _getinstance_devicemaskinggroup(self, objectpath): - targetmaskinggroup = {} - if 'CreationClassName' in objectpath: - targetmaskinggroup['CreationClassName'] = ( - objectpath['CreationClassName']) - else: - targetmaskinggroup['CreationClassName'] = ( - 'CIM_DeviceMaskingGroup') - if 'ElementName' in objectpath: - targetmaskinggroup['ElementName'] = objectpath['ElementName'] - else: - targetmaskinggroup['ElementName'] = ( - self.data.storagegroupname) - if 'EMCMaximumIO' in objectpath: - targetmaskinggroup['EMCMaximumIO'] = objectpath['EMCMaximumIO'] - if 'EMCMaximumBandwidth' in objectpath: - targetmaskinggroup['EMCMaximumBandwidth'] = ( - objectpath['EMCMaximumBandwidth']) - if 'EMCMaxIODynamicDistributionType' in objectpath: - targetmaskinggroup['EMCMaxIODynamicDistributionType'] = ( - objectpath['EMCMaxIODynamicDistributionType']) - return targetmaskinggroup - - def _getinstance_unit(self, objectpath): - unit = {} - - dependent = {} - dependent['CreationClassName'] = self.data.vol_creationclass - dependent['DeviceID'] = self.data.test_volume['id'] - dependent['ElementName'] = self.data.test_volume['name'] - dependent['SystemName'] = self.data.storage_system - - antecedent = {} - antecedent['CreationClassName'] = self.data.lunmask_creationclass - antecedent['DeviceID'] = self.data.lunmaskctrl_id - antecedent['SystemName'] = self.data.storage_system - - unit['Dependent'] = dependent - unit['Antecedent'] = antecedent - unit['CreationClassName'] = self.data.unit_creationclass - unit['DeviceNumber'] = '1' - - return unit - - def _getinstance_job(self, jobpath): - jobinstance = {} - jobinstance['InstanceID'] = '9999' - if jobpath['status'] == 'failure': - jobinstance['JobState'] = 10 - jobinstance['ErrorCode'] = 99 - jobinstance['ErrorDescription'] = 'Failure' - else: - jobinstance['JobState'] = 7 - jobinstance['ErrorCode'] = 0 - jobinstance['ErrorDescription'] = None - jobinstance['OperationalStatus'] = (2, 17) - return jobinstance - - def _getinstance_policycapabilities(self, policycapabilitypath): - instance = Fake_CIM_TierPolicyServiceCapabilities() - fakeinstance = instance.fake_getpolicyinstance() - return fakeinstance - - def _getinstance_syncsvsv(self, objectpath): - svInstance = {} - svInstance['SyncedElement'] = 'SyncedElement' - svInstance['SystemElement'] = 'SystemElement' - svInstance['PercentSynced'] = 100 - if 'PercentSynced' in objectpath and objectpath['PercentSynced'] < 100: - svInstance['PercentSynced'] = 50 - svInstance['CopyState'] = self.data.SYNCHRONIZED - if 'CopyState' in objectpath and ( - objectpath['CopyState'] != self.data.SYNCHRONIZED): - svInstance['CopyState'] = self.data.UNSYNCHRONIZED - return svInstance - - def _getinstance_replicationServCapabilities(self, objectpath): - repServCpblInstance = SYMM_SrpStoragePool() - classcimproperty = Fake_CIMProperty() - repTypesCimproperty = ( - classcimproperty.fake_getSupportedReplicationTypes()) - properties = {u'SupportedReplicationTypes': repTypesCimproperty} - repServCpblInstance.properties = properties - return repServCpblInstance - - def _getinstance_ipprotocolendpoint(self, objectpath): - return self._enum_ipprotocolendpoint()[0] - - def _getinstance_lunmaskingview(self, objectpath): - return self._enum_maskingView()[0] - - def _default_getinstance(self, objectpath): - return objectpath - - def _enum_stconfsvcs(self): - conf_services = [] - conf_service1 = {} - conf_service1['SystemName'] = self.data.storage_system - conf_service1['CreationClassName'] = ( - self.data.stconf_service_creationclass) - conf_services.append(conf_service1) - conf_service2 = {} - conf_service2['SystemName'] = self.data.storage_system_v3 - conf_service2['CreationClassName'] = ( - self.data.stconf_service_creationclass) - conf_services.append(conf_service2) - return conf_services - - def _enum_ctrlconfsvcs(self): - conf_services = [] - conf_service = {} - conf_service['SystemName'] = self.data.storage_system - conf_service['CreationClassName'] = ( - self.data.ctrlconf_service_creationclass) - conf_services.append(conf_service) - conf_service1 = {} - conf_service1['SystemName'] = self.data.storage_system_v3 - conf_service1['CreationClassName'] = ( - self.data.ctrlconf_service_creationclass) - conf_services.append(conf_service1) - return conf_services - - def _enum_elemcompsvcs(self): - comp_services = [] - comp_service = {} - comp_service['SystemName'] = self.data.storage_system - comp_service['CreationClassName'] = ( - self.data.elementcomp_service_creationclass) - comp_services.append(comp_service) - return comp_services - - def _enum_storrelocsvcs(self): - reloc_services = [] - reloc_service = {} - reloc_service['SystemName'] = self.data.storage_system - reloc_service['CreationClassName'] = ( - self.data.storreloc_service_creationclass) - reloc_services.append(reloc_service) - return reloc_services - - def _enum_replicsvcs(self): - replic_services = [] - replic_service = {} - replic_service['SystemName'] = self.data.storage_system - replic_service['CreationClassName'] = ( - self.data.replication_service_creationclass) - replic_services.append(replic_service) - replic_service2 = {} - replic_service2['SystemName'] = self.data.storage_system_v3 - replic_service2['CreationClassName'] = ( - self.data.replication_service_creationclass) - replic_services.append(replic_service2) - return replic_services - - def _enum_pools(self): - pools = [] - pool = {} - pool['InstanceID'] = ( - self.data.storage_system + '+U+' + self.data.storage_type) - pool['CreationClassName'] = 'Symm_VirtualProvisioningPool' - pool['ElementName'] = 'gold' - pools.append(pool) - return pools - - def _enum_pool_details(self): - pools = [] - pool = {} - pool['InstanceID'] = ( - self.data.storage_system + '+U+' + self.data.storage_type) - pool['CreationClassName'] = 'Symm_VirtualProvisioningPool' - pool['TotalManagedSpace'] = 12345678 - pool['RemainingManagedSpace'] = 123456 - pools.append(pool) - return pools - - def _enum_storagevolumes(self): - vols = [] - - vol = EMC_StorageVolume() - vol['Name'] = self.data.test_volume['name'] - vol['CreationClassName'] = 'Symm_StorageVolume' - vol['ElementName'] = self.data.test_volume['id'] - vol['DeviceID'] = self.data.test_volume['device_id'] - vol['Id'] = self.data.test_volume['id'] - vol['SystemName'] = self.data.storage_system - vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] - vol['BlockSize'] = self.data.test_volume['BlockSize'] - - # Added vol to vol.path - vol['SystemCreationClassName'] = 'Symm_StorageSystem' - vol.path = vol - vol.path.classname = vol['CreationClassName'] - - classcimproperty = Fake_CIMProperty() - blocksizecimproperty = classcimproperty.fake_getBlockSizeCIMProperty() - consumableBlockscimproperty = ( - classcimproperty.fake_getConsumableBlocksCIMProperty()) - isCompositecimproperty = ( - classcimproperty.fake_getIsCompositeCIMProperty()) - properties = {u'ConsumableBlocks': blocksizecimproperty, - u'BlockSize': consumableBlockscimproperty, - u'IsComposite': isCompositecimproperty} - vol.properties = properties - - name = {} - name['classname'] = 'Symm_StorageVolume' - keys = {} - keys['CreationClassName'] = 'Symm_StorageVolume' - keys['SystemName'] = self.data.storage_system - keys['DeviceID'] = vol['DeviceID'] - keys['SystemCreationClassName'] = 'Symm_StorageSystem' - name['keybindings'] = keys - - vol['provider_location'] = str(name) - - vols.append(vol) - - failed_delete_vol = EMC_StorageVolume() - failed_delete_vol['name'] = 'failed_delete_vol' - failed_delete_vol['CreationClassName'] = 'Symm_StorageVolume' - failed_delete_vol['ElementName'] = self.data.failed_delete_vol['id'] - failed_delete_vol['DeviceID'] = '99999' - failed_delete_vol['SystemName'] = self.data.storage_system - # Added vol to vol.path - failed_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' - failed_delete_vol.path = failed_delete_vol - failed_delete_vol.path.classname = ( - failed_delete_vol['CreationClassName']) - vols.append(failed_delete_vol) - - failed_vol = EMC_StorageVolume() - failed_vol['name'] = 'failed__vol' - failed_vol['CreationClassName'] = 'Symm_StorageVolume' - failed_vol['ElementName'] = 'failed_vol' - failed_vol['DeviceID'] = '4' - failed_vol['SystemName'] = self.data.storage_system - # Added vol to vol.path - failed_vol['SystemCreationClassName'] = 'Symm_StorageSystem' - failed_vol.path = failed_vol - failed_vol.path.classname = failed_vol['CreationClassName'] - - name_failed = {} - name_failed['classname'] = 'Symm_StorageVolume' - keys_failed = {} - keys_failed['CreationClassName'] = 'Symm_StorageVolume' - keys_failed['SystemName'] = self.data.storage_system - keys_failed['DeviceID'] = failed_vol['DeviceID'] - keys_failed['SystemCreationClassName'] = 'Symm_StorageSystem' - name_failed['keybindings'] = keys_failed - failed_vol['provider_location'] = str(name_failed) - - vols.append(failed_vol) - - volumeHead = EMC_StorageVolume() - volumeHead.classname = 'Symm_StorageVolume' - blockSize = self.data.block_size - volumeHead['ConsumableBlocks'] = ( - self.data.metaHead_volume['ConsumableBlocks']) - volumeHead['BlockSize'] = blockSize - volumeHead['DeviceID'] = self.data.metaHead_volume['DeviceID'] - vols.append(volumeHead) - - metaMember1 = EMC_StorageVolume() - metaMember1.classname = 'Symm_StorageVolume' - metaMember1['ConsumableBlocks'] = ( - self.data.meta_volume1['ConsumableBlocks']) - metaMember1['BlockSize'] = blockSize - metaMember1['DeviceID'] = self.data.meta_volume1['DeviceID'] - vols.append(metaMember1) - - metaMember2 = EMC_StorageVolume() - metaMember2.classname = 'Symm_StorageVolume' - metaMember2['ConsumableBlocks'] = ( - self.data.meta_volume2['ConsumableBlocks']) - metaMember2['BlockSize'] = blockSize - metaMember2['DeviceID'] = self.data.meta_volume2['DeviceID'] - vols.append(metaMember2) - - source_volume = EMC_StorageVolume() - source_volume['name'] = self.data.test_source_volume['name'] - source_volume['CreationClassName'] = 'Symm_StorageVolume' - source_volume['ElementName'] = self.data.test_source_volume['id'] - source_volume['DeviceID'] = self.data.test_source_volume['device_id'] - source_volume['Id'] = self.data.test_source_volume['id'] - source_volume['SystemName'] = self.data.storage_system - source_volume['NumberOfBlocks'] = ( - self.data.test_source_volume['NumberOfBlocks']) - source_volume['BlockSize'] = self.data.test_source_volume['BlockSize'] - source_volume['SystemCreationClassName'] = 'Symm_StorageSystem' - source_volume.path = source_volume - source_volume.path.classname = source_volume['CreationClassName'] - source_volume.properties = properties - vols.append(source_volume) - - return vols - - def _enum_initiatorMaskingGroup(self): - initatorgroups = [] - initatorgroup = {} - initatorgroup['CreationClassName'] = ( - self.data.initiatorgroup_creationclass) - initatorgroup['DeviceID'] = self.data.initiatorgroup_id - initatorgroup['SystemName'] = self.data.storage_system - initatorgroup['ElementName'] = self.data.initiatorgroup_name - initatorgroups.append(initatorgroup) - return initatorgroups - - def _enum_storage_extent(self): - storageExtents = [] - storageExtent = CIM_StorageExtent() - storageExtent['CreationClassName'] = ( - self.data.storageextent_creationclass) - - classcimproperty = Fake_CIMProperty() - isConcatenatedcimproperty = ( - classcimproperty.fake_getIsConcatenatedCIMProperty()) - properties = {u'IsConcatenated': isConcatenatedcimproperty} - storageExtent.properties = properties - - storageExtents.append(storageExtent) - return storageExtents - - def _enum_lunmaskctrls(self): - ctrls = [] - ctrl = {} - ctrl['CreationClassName'] = self.data.lunmask_creationclass - ctrl['DeviceID'] = self.data.lunmaskctrl_id - ctrl['SystemName'] = self.data.storage_system - ctrl['ElementName'] = self.data.lunmaskctrl_name - ctrls.append(ctrl) - return ctrls - - def _enum_hostedservice(self): - hostedservices = [] - hostedservice = {} - hostedservice['CreationClassName'] = ( - self.data.hostedservice_creationclass) - hostedservice['SystemName'] = self.data.storage_system - hostedservice['Name'] = self.data.storage_system - hostedservices.append(hostedservice) - return hostedservices - - def _enum_policycapabilities(self): - policycapabilities = [] - policycapability = {} - policycapability['CreationClassName'] = ( - self.data.policycapability_creationclass) - policycapability['SystemName'] = self.data.storage_system - - propertiesList = [] - CIMProperty = {'is_array': True} - properties = {u'SupportedTierFeatures': CIMProperty} - propertiesList.append(properties) - policycapability['Properties'] = propertiesList - - policycapabilities.append(policycapability) - - return policycapabilities - - def _enum_policyrules(self): - policyrules = [] - policyrule = {} - policyrule['CreationClassName'] = self.data.policyrule_creationclass - policyrule['SystemName'] = self.data.storage_system - policyrule['PolicyRuleName'] = self.data.policyrule - policyrules.append(policyrule) - return policyrules - - def _enum_assoctierpolicy(self): - assoctierpolicies = [] - assoctierpolicy = {} - assoctierpolicy['CreationClassName'] = ( - self.data.assoctierpolicy_creationclass) - assoctierpolicies.append(assoctierpolicy) - return assoctierpolicies - - def _enum_storagepool(self): - storagepools = [] - storagepool = {} - storagepool['CreationClassName'] = self.data.storagepool_creationclass - storagepool['InstanceID'] = self.data.storagepoolid - storagepool['ElementName'] = 'gold' - storagepools.append(storagepool) - return storagepools - - def _enum_srpstoragepool(self): - storagepools = [] - storagepool = {} - storagepool['CreationClassName'] = ( - self.data.srpstoragepool_creationclass) - storagepool['InstanceID'] = 'SYMMETRIX-+-000197200056-+-SRP_1' - storagepool['ElementName'] = 'SRP_1' - storagepools.append(storagepool) - return storagepools - - def _enum_storagepoolcapabilities(self): - storagepoolcaps = [] - storagepoolcap = {} - storagepoolcap['CreationClassName'] = 'Symm_StoragePoolCapabilities' - storagepoolcap['InstanceID'] = 'SYMMETRIX-+-000197200056-+-SRP_1' - storagepoolcaps.append(storagepoolcap) - return storagepoolcaps - - def _enum_storagesettings(self): - storagesettings = [] - storagesetting_bronze = {} - storagesetting_bronze['CreationClassName'] = 'CIM_StoragePoolSetting' - storagesetting_bronze['InstanceID'] = ( - 'SYMMETRIX-+-000197200056-+-SBronze:' - 'DSS-+-F-+-0-+-SR-+-SRP_1') - storagesettings.append(storagesetting_bronze) - storagesetting_silver = {} - storagesetting_silver['CreationClassName'] = 'CIM_StoragePoolSetting' - storagesetting_silver['InstanceID'] = ( - 'SYMMETRIX-+-000197200056-+-SSilver:' - 'DSS-+-F-+-0-+-SR-+-SRP_1') - storagesettings.append(storagesetting_silver) - return storagesettings - - def _enum_targetMaskingGroup(self): - targetMaskingGroups = [] - targetMaskingGroup = {} - targetMaskingGroup['CreationClassName'] = 'CIM_TargetMaskingGroup' - targetMaskingGroup['ElementName'] = self.data.port_group - targetMaskingGroups.append(targetMaskingGroup) - return targetMaskingGroups - - def _enum_initMaskingGroup(self): - initMaskingGroups = [] - initMaskingGroup = {} - initMaskingGroup['CreationClassName'] = 'CIM_InitiatorMaskingGroup' - initMaskingGroup['ElementName'] = 'myInitGroup' - initMaskingGroups.append(initMaskingGroup) - return initMaskingGroups - - def _enum_storagegroup(self): - storagegroups = [] - storagegroup1 = {} - storagegroup1['CreationClassName'] = ( - self.data.storagegroup_creationclass) - storagegroup1['ElementName'] = self.data.storagegroupname - storagegroups.append(storagegroup1) - storagegroup2 = {} - storagegroup2['CreationClassName'] = ( - self.data.storagegroup_creationclass) - storagegroup2['ElementName'] = self.data.defaultstoragegroupname - storagegroup2['SystemName'] = self.data.storage_system - storagegroups.append(storagegroup2) - storagegroup3 = {} - storagegroup3['CreationClassName'] = ( - self.data.storagegroup_creationclass) - storagegroup3['ElementName'] = 'OS-fakehost-SRP_1-Bronze-DSS-SG' - storagegroups.append(storagegroup3) - storagegroup4 = {} - storagegroup4['CreationClassName'] = ( - self.data.storagegroup_creationclass) - storagegroup4['ElementName'] = 'OS-SRP_1-Bronze-DSS-SG' - storagegroups.append(storagegroup4) - return storagegroups - - def _enum_storagevolume(self): - storagevolumes = [] - storagevolume = {} - storagevolume['CreationClassName'] = ( - self.data.storagevolume_creationclass) - storagevolumes.append(storagevolume) - return storagevolumes - - def _enum_hdwidmgmts(self): - services = [] - srv = {} - srv['SystemName'] = self.data.storage_system - services.append(srv) - return services - - def _enum_storhdwids(self): - storhdwids = [] - hdwid = SE_StorageHardwareID() - hdwid['CreationClassName'] = self.data.hardwareid_creationclass - hdwid['StorageID'] = self.data.connector['wwpns'][0] - hdwid['InstanceID'] = "W-+-" + self.data.connector['wwpns'][0] - - hdwid.path = hdwid - storhdwids.append(hdwid) - return storhdwids - - def _enum_storagesystems(self): - storagesystems = [] - storagesystem = {} - storagesystem['SystemName'] = self.data.storage_system - storagesystem['Name'] = self.data.storage_system - storagesystems.append(storagesystem) - return storagesystems - - def _enum_repservcpbls(self): - repservcpbls = [] - servcpbl = CIM_ReplicationServiceCapabilities() - servcpbl['CreationClassName'] = 'Symm_ReplicationServiceCapabilities' - servcpbl['InstanceID'] = self.data.storage_system - repservcpbls.append(servcpbl) - return repservcpbls - - def _enum_repgroups(self): - repgroups = [] - repgroup = {} - repgroup['CreationClassName'] = ( - self.data.replicationgroup_creationclass) - repgroups.append(repgroup) - return repgroups - - def _enum_fcscsiendpoint(self): - wwns = [] - wwn = {} - wwn['Name'] = "5000090000000000" - wwns.append(wwn) - return wwns - - def _enum_maskingView(self): - maskingViews = [] - maskingView = SYMM_LunMasking() - maskingView['CreationClassName'] = 'Symm_LunMaskingView' - maskingView['ElementName'] = self.data.lunmaskctrl_name - - cimproperty = Fake_CIMProperty() - cimproperty.value = self.data.lunmaskctrl_name - properties = {u'ElementName': cimproperty} - maskingView.properties = properties - - maskingViews.append(maskingView) - return maskingViews - - def _enum_portgroup(self): - portgroups = [] - portgroup = {} - portgroup['CreationClassName'] = ( - 'CIM_TargetMaskingGroup') - portgroup['ElementName'] = self.data.port_group - portgroups.append(portgroup) - return portgroups - - def _enum_metavolume(self): - return [] - - def _enum_storageSyncSvSv(self): - conn = FakeEcomConnection() - sourceVolume = {} - sourceVolume['CreationClassName'] = 'Symm_StorageVolume' - sourceVolume['DeviceID'] = self.data.test_volume['device_id'] - sourceInstanceName = conn.GetInstance(sourceVolume) - targetVolume = {} - targetVolume['CreationClassName'] = 'Symm_StorageVolume' - targetVolume['DeviceID'] = self.data.test_volume['device_id'] - targetInstanceName = conn.GetInstance(sourceVolume) - svInstances = [] - svInstance = {} - svInstance['SyncedElement'] = targetInstanceName - svInstance['SystemElement'] = sourceInstanceName - svInstance['CreationClassName'] = 'SE_StorageSynchronized_SV_SV' - svInstance['PercentSynced'] = 100 - svInstance['CopyState'] = 7 - svInstances.append(svInstance) - return svInstances - - def _enum_sw_identity(self): - swIdentities = [] - swIdentity = {} - swIdentity['MajorVersion'] = self.data.majorVersion - swIdentity['MinorVersion'] = self.data.minorVersion - swIdentity['RevisionNumber'] = self.data.revNumber - swIdentities.append(swIdentity) - return swIdentities - - def _enum_ipprotocolendpoint(self): - ipprotocolendpoints = [] - ipprotocolendpoint = CIM_IPProtocolEndpoint() - ipprotocolendpoint['CreationClassName'] = 'CIM_IPProtocolEndpoint' - ipprotocolendpoint['SystemName'] = self.data.storage_system - classcimproperty = Fake_CIMProperty() - ipv4addresscimproperty = ( - classcimproperty.fake_getipv4address()) - properties = {u'IPv4Address': ipv4addresscimproperty} - ipprotocolendpoint.properties = properties - ipprotocolendpoint.path = ipprotocolendpoint - ipprotocolendpoints.append(ipprotocolendpoint) - iqnprotocolendpoint = CIM_IPProtocolEndpoint() - iqnprotocolendpoint['CreationClassName'] = ( - 'Symm_VirtualiSCSIProtocolEndpoint') - iqnprotocolendpoint['SystemName'] = self.data.storage_system - classcimproperty = Fake_CIMProperty() - iqncimproperty = ( - classcimproperty.fake_getiqn()) - properties = {u'Name': iqncimproperty} - iqnprotocolendpoint.properties = properties - iqnprotocolendpoint.path = iqnprotocolendpoint - ipprotocolendpoints.append(iqnprotocolendpoint) - return ipprotocolendpoints - - def _enum_arraychassis(self): - arraychassiss = [] - arraychassis = Symm_ArrayChassis() - arraychassis['CreationClassName'] = ( - 'Symm_ArrayChassis') - arraychassis['SystemName'] = self.data.storage_system_v3 - arraychassis['Tag'] = self.data.storage_system_v3 - cimproperty = Fake_CIMProperty() - cimproperty.value = 'VMAX250F' - properties = {u'Model': cimproperty} - arraychassis.properties = properties - arraychassiss.append(arraychassis) - return arraychassiss - - def _default_enum(self): - names = [] - name = {} - name['Name'] = 'default' - names.append(name) - return names - - -class VMAXISCSIDriverNoFastTestCase(test.TestCase): - def setUp(self): - - self.data = VMAXCommonData() - - self.tempdir = tempfile.mkdtemp() - super(VMAXISCSIDriverNoFastTestCase, self).setUp() - self.config_file_path = None - self.create_fake_config_file_no_fast() - self.addCleanup(self._cleanup) - configuration = conf.Configuration(None) - configuration.append_config_values = mock.Mock(return_value=0) - configuration.config_group = 'ISCSINoFAST' - configuration.cinder_emc_config_file = self.config_file_path - self.mock_object(configuration, 'safe_get', - self.fake_safe_get({'driver_use_ssl': - True, - 'volume_backend_name': - 'ISCSINoFAST'})) - self.mock_object(common.VMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(utils.VMAXUtils, 'isArrayV3', - self.fake_is_v3) - self.mock_object(utils.VMAXUtils, '_is_sync_complete', - return_value=True) - self.mock_object(cinder_utils, 'get_bool_param', - return_value=False) - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - self.driver.utils = utils.VMAXUtils(object) - - def fake_safe_get(self, values): - def _safe_get(key): - return values.get(key) - return _safe_get - - def create_fake_config_file_no_fast(self): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - doc = self.add_array_info(doc, emc) - filename = 'cinder_emc_config_ISCSINoFAST.xml' - self.config_file_path = self.tempdir + '/' + filename - - f = open(self.config_file_path, 'w') - doc.writexml(f) - f.close() - - def create_fake_config_file_no_fast_with_interval_retries(self): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - doc = self.add_array_info(doc, emc) - doc = self.add_interval_and_retries(doc, emc) - filename = 'cinder_emc_config_ISCSINoFAST_int_ret.xml' - config_file_path = self.tempdir + '/' + filename - - f = open(self.config_file_path, 'w') - doc.writexml(f) - f.close() - return config_file_path - - def create_fake_config_file_no_fast_with_interval(self): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - doc = self.add_array_info(doc, emc) - doc = self.add_interval_only(doc, emc) - filename = 'cinder_emc_config_ISCSINoFAST_int.xml' - config_file_path = self.tempdir + '/' + filename - - f = open(self.config_file_path, 'w') - doc.writexml(f) - f.close() - return config_file_path - - def create_fake_config_file_no_fast_with_retries(self): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - doc = self.add_array_info(doc, emc) - doc = self.add_retries_only(doc, emc) - filename = 'cinder_emc_config_ISCSINoFAST_ret.xml' - config_file_path = self.tempdir + '/' + filename - - f = open(self.config_file_path, 'w') - doc.writexml(f) - f.close() - return config_file_path - - def add_array_info(self, doc, emc): - array = doc.createElement("Array") - arraytext = doc.createTextNode("1234567891011") - emc.appendChild(array) - array.appendChild(arraytext) - - ecomserverip = doc.createElement("EcomServerIp") - ecomserveriptext = doc.createTextNode("1.1.1.1") - emc.appendChild(ecomserverip) - ecomserverip.appendChild(ecomserveriptext) - - ecomserverport = doc.createElement("EcomServerPort") - ecomserverporttext = doc.createTextNode("10") - emc.appendChild(ecomserverport) - ecomserverport.appendChild(ecomserverporttext) - - ecomusername = doc.createElement("EcomUserName") - ecomusernametext = doc.createTextNode("user") - emc.appendChild(ecomusername) - ecomusername.appendChild(ecomusernametext) - - ecompassword = doc.createElement("EcomPassword") - ecompasswordtext = doc.createTextNode("pass") - emc.appendChild(ecompassword) - ecompassword.appendChild(ecompasswordtext) - - portgroup = doc.createElement("PortGroup") - portgrouptext = doc.createTextNode(self.data.port_group) - portgroup.appendChild(portgrouptext) - - portgroups = doc.createElement("PortGroups") - portgroups.appendChild(portgroup) - emc.appendChild(portgroups) - - pool = doc.createElement("Pool") - pooltext = doc.createTextNode("gold") - emc.appendChild(pool) - pool.appendChild(pooltext) - - array = doc.createElement("Array") - arraytext = doc.createTextNode("1234567891011") - emc.appendChild(array) - array.appendChild(arraytext) - - timeout = doc.createElement("Timeout") - timeouttext = doc.createTextNode("0") - emc.appendChild(timeout) - timeout.appendChild(timeouttext) - return doc - - def add_interval_and_retries(self, doc, emc): - interval = doc.createElement("Interval") - intervaltext = doc.createTextNode("5") - emc.appendChild(interval) - interval.appendChild(intervaltext) - - retries = doc.createElement("Retries") - retriestext = doc.createTextNode("40") - emc.appendChild(retries) - retries.appendChild(retriestext) - return doc - - def add_interval_only(self, doc, emc): - interval = doc.createElement("Interval") - intervaltext = doc.createTextNode("20") - emc.appendChild(interval) - interval.appendChild(intervaltext) - return doc - - def add_retries_only(self, doc, emc): - retries = doc.createElement("Retries") - retriestext = doc.createTextNode("70") - emc.appendChild(retries) - retries.appendChild(retriestext) - return doc - - # fix for https://bugs.launchpad.net/cinder/+bug/1364232 - def create_fake_config_file_1364232(self): - filename = 'cinder_emc_config_1364232.xml' - config_file_1364232 = self.tempdir + '/' + filename - text_file = open(config_file_1364232, "w") - text_file.write("\n\n" - "10.10.10.10\n" - "5988\n" - "user\t\n" - "password\n" - "OS-PORTGROUP1-PG" - "OS-PORTGROUP2-PG" - " \n" - "OS-PORTGROUP3-PG" - "OS-PORTGROUP4-PG" - "\n000198700439" - " \n\nFC_SLVR1\n" - "\nSILVER1\n" - "") - text_file.close() - return config_file_1364232 - - def fake_ecom_connection(self): - conn = FakeEcomConnection() - return conn - - def fake_is_v3(self, conn, serialNumber): - return False - - def test_slo_empty_tag(self): - filename = 'cinder_emc_config_slo_empty_tag' - tempdir = tempfile.mkdtemp() - config_file = tempdir + '/' + filename - text_file = open(config_file, "w") - text_file.write("\n\n" - "10.10.10.10\n" - "5988\n" - "user\n" - "password\n" - "\n" - "OS-PORTGROUP1-PG\n" - "\n" - "SRP_1\n" - "\n" - "\n" - "") - text_file.close() - - arrayInfo = self.driver.utils.parse_file_to_get_array_map(config_file) - self.assertIsNone(arrayInfo[0]['SLO']) - self.assertIsNone(arrayInfo[0]['Workload']) - bExists = os.path.exists(config_file) - if bExists: - os.remove(config_file) - - def test_filter_list(self): - portgroupnames = ['pg3', 'pg1', 'pg4', 'pg2'] - portgroupnames = ( - self.driver.common.utils._filter_list(portgroupnames)) - self.assertEqual(4, len(portgroupnames)) - self.assertEqual(['pg1', 'pg2', 'pg3', 'pg4'], sorted(portgroupnames)) - - portgroupnames = ['pg1'] - portgroupnames = ( - self.driver.common.utils._filter_list(portgroupnames)) - self.assertEqual(1, len(portgroupnames)) - self.assertEqual(['pg1'], portgroupnames) - - portgroupnames = ['only_pg', '', '', '', '', ''] - portgroupnames = ( - self.driver.common.utils._filter_list(portgroupnames)) - self.assertEqual(1, len(portgroupnames)) - self.assertEqual(['only_pg'], portgroupnames) - - def test_get_random_pg_from_list(self): - portGroupNames = ['pg1', 'pg2', 'pg3', 'pg4'] - portGroupName = ( - self.driver.common.utils.get_random_pg_from_list(portGroupNames)) - self.assertIn('pg', portGroupName) - - portGroupNames = ['pg1'] - portGroupName = ( - self.driver.common.utils.get_random_pg_from_list(portGroupNames)) - self.assertEqual('pg1', portGroupName) - - def test_get_random_portgroup(self): - # 4 portgroups - data = ("\n\n" - "" - "OS-PG1\n" - "OS-PG2\n" - "OS-PG3\n" - "OS-PG4\n" - "" - "") - dom = minidom.parseString(data) - portgroup = self.driver.common.utils._get_random_portgroup(dom) - self.assertIn('OS-PG', portgroup) - - # Duplicate portgroups - data = ("\n\n" - "" - "OS-PG1\n" - "OS-PG1\n" - "OS-PG1\n" - "OS-PG2\n" - "" - "") - dom = minidom.parseString(data) - portgroup = self.driver.common.utils._get_random_portgroup(dom) - self.assertIn('OS-PG', portgroup) - - def test_get_random_portgroup_exception(self): - # Missing PortGroup values - data = ("\n\n" - "" - "\n" - "\n" - "" - "") - dom = minidom.parseString(data) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.common.utils._get_random_portgroup, dom) - - # Missing portgroups - data = ("\n\n" - "" - "" - "") - dom = minidom.parseString(data) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.common.utils._get_random_portgroup, dom) - - def test_get_correct_port_group(self): - self.driver.common.conn = self.fake_ecom_connection() - maskingViewInstanceName = {'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'OS-fakehost-gold-I-MV', - 'SystemName': 'SYMMETRIX+000195900551'} - deviceinfodict = {'controller': maskingViewInstanceName} - portgroupname = self.driver.common._get_correct_port_group( - deviceinfodict, self.data.storage_system) - self.assertEqual('OS-portgroup-PG', portgroupname) - - def test_generate_unique_trunc_pool(self): - pool_under_16_chars = 'pool_under_16' - pool1 = self.driver.utils.generate_unique_trunc_pool( - pool_under_16_chars) - self.assertEqual(pool_under_16_chars, pool1) - - pool_over_16_chars = ( - 'pool_over_16_pool_over_16') - # Should generate truncated string first 8 chars and - # last 7 chars - pool2 = self.driver.utils.generate_unique_trunc_pool( - pool_over_16_chars) - self.assertEqual('pool_ove_over_16', pool2) - - def test_generate_unique_trunc_host(self): - host_under_38_chars = 'host_under_38_chars' - host1 = self.driver.utils.generate_unique_trunc_host( - host_under_38_chars) - self.assertEqual(host_under_38_chars, host1) - - host_over_38_chars = ( - 'host_over_38_chars_host_over_38_chars_host_over_38_chars') - # Check that the same md5 value is retrieved from multiple calls - host2 = self.driver.utils.generate_unique_trunc_host( - host_over_38_chars) - host3 = self.driver.utils.generate_unique_trunc_host( - host_over_38_chars) - self.assertEqual(host2, host3) - - def test_find_ip_protocol_endpoints(self): - conn = self.fake_ecom_connection() - endpoint = self.driver.common._find_ip_protocol_endpoints( - conn, self.data.storage_system, self.data.port_group) - self.assertEqual('10.10.10.10', endpoint[0]['ip']) - - def test_find_device_number(self): - host = 'fakehost' - data, __, __ = ( - self.driver.common.find_device_number(self.data.test_volume, - host)) - self.assertEqual('OS-fakehost-MV', data['maskingview']) - - @mock.patch.object( - FakeEcomConnection, - 'ReferenceNames', - return_value=[]) - def test_find_device_number_false(self, mock_ref_name): - host = 'bogushost' - data, __, __ = ( - self.driver.common.find_device_number(self.data.test_volume, - host)) - self.assertFalse(data) - - def test_find_device_number_long_host(self): - # Long host name - host = 'myhost.mydomain.com' - data, __, __ = ( - self.driver.common.find_device_number(self.data.test_volume, - host)) - self.assertEqual('OS-myhost-MV', data['maskingview']) - - def test_find_device_number_short_name_over_38_chars(self): - # short name over 38 chars - host = 'myShortnameIsOverThirtyEightCharactersLong' - host = self.driver.common.utils.generate_unique_trunc_host(host) - amended = 'OS-' + host + '-MV' - v2_host_over_38 = self.data.test_volume.copy() - # Pool aware scheduler enabled - v2_host_over_38['host'] = host - data, __, __ = ( - self.driver.common.find_device_number(v2_host_over_38, - host)) - self.assertEqual(amended, data['maskingview']) - - def test_unbind_and_get_volume_from_storage_pool(self): - conn = self.fake_ecom_connection() - common = self.driver.common - common.utils.is_volume_bound_to_pool = mock.Mock( - return_value='False') - storageConfigService = ( - common.utils.find_storage_configuration_service( - conn, self.data.storage_system)) - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeName = "unbind-vol" - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': False} - volumeInstance = ( - common._unbind_and_get_volume_from_storage_pool( - conn, storageConfigService, - volumeInstanceName, volumeName, extraSpecs)) - self.assertEqual(self.data.storage_system, - volumeInstance['SystemName']) - self.assertEqual('1', volumeInstance['ElementName']) - - def test_create_hardware_ids(self): - conn = self.fake_ecom_connection() - connector = { - 'ip': '10.0.0.2', - 'initiator': self.data.iscsi_initiator, - 'host': 'fakehost'} - initiatorNames = ( - self.driver.common.masking._find_initiator_names(conn, connector)) - storageHardwareIDInstanceNames = ( - self.driver.common.masking._create_hardware_ids( - conn, initiatorNames, self.data.storage_system)) - self.assertEqual(self.data.iscsi_initiator, - storageHardwareIDInstanceNames[0]) - - def test_get_pool_instance_and_system_name(self): - conn = self.fake_ecom_connection() - # V2 - old '+' separator - storagesystem = {} - storagesystem['SystemName'] = self.data.storage_system - storagesystem['Name'] = self.data.storage_system - pools = conn.EnumerateInstanceNames("EMC_VirtualProvisioningPool") - poolname = 'gold' - poolinstancename, systemname = ( - self.driver.common.utils._get_pool_instance_and_system_name( - conn, pools, storagesystem, poolname)) - self.assertEqual(self.data.storage_system, systemname) - self.assertEqual(self.data.storagepoolid, - poolinstancename['InstanceID']) - # V3 - note: V2 can also have the '-+-' separator - storagesystem = {} - storagesystem['SystemName'] = self.data.storage_system_v3 - storagesystem['Name'] = self.data.storage_system_v3 - pools = conn.EnumerateInstanceNames('Symm_SRPStoragePool') - poolname = 'SRP_1' - poolinstancename, systemname = ( - self.driver.common.utils._get_pool_instance_and_system_name( - conn, pools, storagesystem, poolname)) - self.assertEqual(self.data.storage_system_v3, systemname) - self.assertEqual('SYMMETRIX-+-000197200056-+-SRP_1', - poolinstancename['InstanceID']) - # Invalid poolname - poolname = 'bogus' - poolinstancename, systemname = ( - self.driver.common.utils._get_pool_instance_and_system_name( - conn, pools, storagesystem, poolname)) - self.assertIsNone(poolinstancename) - self.assertEqual(self.data.storage_system_v3, systemname) - - def test_get_hardware_type(self): - iqn_initiator = 'iqn.1992-04.com.emc: 50000973f006dd80' - hardwaretypeid = ( - self.driver.utils._get_hardware_type(iqn_initiator)) - self.assertEqual(5, hardwaretypeid) - wwpn_initiator = '123456789012345' - hardwaretypeid = ( - self.driver.utils._get_hardware_type(wwpn_initiator)) - self.assertEqual(2, hardwaretypeid) - bogus_initiator = 'bogus' - hardwaretypeid = ( - self.driver.utils._get_hardware_type(bogus_initiator)) - self.assertEqual(0, hardwaretypeid) - - def test_check_if_rollback_action_for_masking_required(self): - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': False, - 'storagetype:fastpolicy': 'GOLD1'} - - vol = EMC_StorageVolume() - vol['name'] = self.data.test_volume['name'] - vol['CreationClassName'] = 'Symm_StorageVolume' - vol['ElementName'] = self.data.test_volume['id'] - vol['DeviceID'] = self.data.test_volume['device_id'] - vol['Id'] = self.data.test_volume['id'] - vol['SystemName'] = self.data.storage_system - vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] - vol['BlockSize'] = self.data.test_volume['BlockSize'] - - # Added vol to vol.path - vol['SystemCreationClassName'] = 'Symm_StorageSystem' - vol.path = vol - vol.path.classname = vol['CreationClassName'] - - rollbackDict = {} - rollbackDict['isV3'] = False - rollbackDict['defaultStorageGroupInstanceName'] = ( - self.data.default_storage_group) - rollbackDict['sgName'] = self.data.storagegroupname - rollbackDict['sgGroupName'] = self.data.storagegroupname - rollbackDict['volumeName'] = 'vol1' - rollbackDict['fastPolicyName'] = 'GOLD1' - rollbackDict['volumeInstance'] = vol - rollbackDict['controllerConfigService'] = controllerConfigService - rollbackDict['extraSpecs'] = extraSpecs - rollbackDict['igGroupName'] = self.data.initiatorgroup_name - rollbackDict['connector'] = self.data.connector - # Path 1 - The volume is in another storage group that isn't the - # default storage group - expectedmessage = (_("Rollback - Volume in another storage " - "group besides default storage group.")) - message = ( - self.driver.common.masking. - _check_if_rollback_action_for_masking_required( - conn, rollbackDict)) - self.assertEqual(expectedmessage, message) - # Path 2 - The volume is not in any storage group - rollbackDict['sgName'] = 'sq_not_exist' - rollbackDict['sgGroupName'] = 'sq_not_exist' - expectedmessage = (_("V2 rollback, volume is not in any storage " - "group.")) - message = ( - self.driver.common.masking. - _check_if_rollback_action_for_masking_required( - conn, rollbackDict)) - self.assertEqual(expectedmessage, message) - - def test_migrate_cleanup(self): - conn = self.fake_ecom_connection() - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': False, - 'storagetype:fastpolicy': 'GOLD1'} - - vol = EMC_StorageVolume() - vol['name'] = self.data.test_volume['name'] - vol['CreationClassName'] = 'Symm_StorageVolume' - vol['ElementName'] = self.data.test_volume['id'] - vol['DeviceID'] = self.data.test_volume['device_id'] - vol['Id'] = self.data.test_volume['id'] - vol['SystemName'] = self.data.storage_system - vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] - vol['BlockSize'] = self.data.test_volume['BlockSize'] - - # Added vol to vol.path - vol['SystemCreationClassName'] = 'Symm_StorageSystem' - vol.path = vol - vol.path.classname = vol['CreationClassName'] - # The volume is already belong to default storage group - return_to_default = self.driver.common._migrate_cleanup( - conn, vol, self.data.storage_system, 'GOLD1', - vol['name'], extraSpecs) - self.assertFalse(return_to_default) - # The volume does not belong to default storage group - return_to_default = self.driver.common._migrate_cleanup( - conn, vol, self.data.storage_system, 'BRONZE1', - vol['name'], extraSpecs) - self.assertTrue(return_to_default) - - @unittest.skip("Skip until bug #1578986 is fixed") - def _test_wait_for_job_complete(self): - myjob = SE_ConcreteJob() - myjob.classname = 'SE_ConcreteJob' - myjob['InstanceID'] = '9999' - myjob['status'] = 'success' - myjob['type'] = 'type' - myjob['CreationClassName'] = 'SE_ConcreteJob' - myjob['Job'] = myjob - conn = self.fake_ecom_connection() - - self.driver.utils._is_job_finished = mock.Mock( - return_value=True) - rc, errordesc = self.driver.utils.wait_for_job_complete(conn, myjob) - self.assertEqual(0, rc) - self.assertIsNone(errordesc) - self.driver.utils._is_job_finished.assert_called_once_with( - conn, myjob) - self.assertTrue(self.driver.utils._is_job_finished.return_value) - self.driver.utils._is_job_finished.reset_mock() - - rc, errordesc = self.driver.utils.wait_for_job_complete(conn, myjob) - self.assertEqual(0, rc) - self.assertIsNone(errordesc) - - @unittest.skip("Skip until bug #1578986 is fixed") - def _test_wait_for_job_complete_bad_job_state(self): - myjob = SE_ConcreteJob() - myjob.classname = 'SE_ConcreteJob' - myjob['InstanceID'] = '9999' - myjob['status'] = 'success' - myjob['type'] = 'type' - myjob['CreationClassName'] = 'SE_ConcreteJob' - myjob['Job'] = myjob - conn = self.fake_ecom_connection() - self.driver.utils._is_job_finished = mock.Mock( - return_value=True) - self.driver.utils._verify_job_state = mock.Mock( - return_value=(-1, 'Job finished with an error')) - rc, errordesc = self.driver.utils.wait_for_job_complete(conn, myjob) - self.assertEqual(-1, rc) - self.assertEqual('Job finished with an error', errordesc) - - @unittest.skip("Skip until bug #1578986 is fixed") - def _test_wait_for_sync(self): - mysync = 'fakesync' - conn = self.fake_ecom_connection() - - self.driver.utils._is_sync_complete = mock.Mock( - return_value=True) - self.driver.utils._get_interval_in_secs = mock.Mock(return_value=0) - rc = self.driver.utils.wait_for_sync(conn, mysync) - self.assertIsNotNone(rc) - self.driver.utils._is_sync_complete.assert_called_once_with( - conn, mysync) - self.assertTrue(self.driver.utils._is_sync_complete.return_value) - self.driver.utils._is_sync_complete.reset_mock() - - rc = self.driver.utils.wait_for_sync(conn, mysync) - self.assertIsNotNone(rc) - - @unittest.skip("Skip until bug #1578986 is fixed") - def test_wait_for_sync_extra_specs(self): - mysync = 'fakesync' - conn = self.fake_ecom_connection() - file_name = ( - self.create_fake_config_file_no_fast_with_interval_retries()) - extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} - pool = 'gold+1234567891011' - arrayInfo = self.driver.utils.parse_file_to_get_array_map( - self.config_file_path) - poolRec = self.driver.utils.extract_record(arrayInfo, pool) - extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, - poolRec) - - self.driver.utils._is_sync_complete = mock.Mock( - return_value=True) - self.driver.utils._get_interval_in_secs = mock.Mock(return_value=0) - rc = self.driver.utils.wait_for_sync(conn, mysync, extraSpecs) - self.assertIsNotNone(rc) - self.driver.utils._is_sync_complete.assert_called_once_with( - conn, mysync) - self.assertTrue(self.driver.utils._is_sync_complete.return_value) - self.assertEqual(40, - self.driver.utils._get_max_job_retries(extraSpecs)) - self.assertEqual(5, - self.driver.utils._get_interval_in_secs(extraSpecs)) - self.driver.utils._is_sync_complete.reset_mock() - - rc = self.driver.utils.wait_for_sync(conn, mysync) - self.assertIsNotNone(rc) - bExists = os.path.exists(file_name) - if bExists: - os.remove(file_name) - - # Bug 1395830: _find_lun throws exception when lun is not found. - def test_find_lun(self): - keybindings = {'CreationClassName': u'Symm_StorageVolume', - 'SystemName': u'SYMMETRIX+000195900551', - 'DeviceID': u'1', - 'SystemCreationClassName': u'Symm_StorageSystem'} - provider_location = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings} - volume = EMC_StorageVolume() - volume['name'] = 'vol1' - volume['id'] = '1' - volume['provider_location'] = six.text_type(provider_location) - - self.driver.common.conn = self.driver.common._get_ecom_connection() - findlun = self.driver.common._find_lun(volume) - getinstance = self.driver.common.conn._getinstance_storagevolume( - keybindings) - # Found lun. - self.assertEqual(getinstance, findlun) - - keybindings2 = {'CreationClassName': u'Symm_StorageVolume', - 'SystemName': u'SYMMETRIX+000195900551', - 'DeviceID': u'9', - 'SystemCreationClassName': u'Symm_StorageSystem'} - provider_location2 = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings2} - volume2 = EMC_StorageVolume() - volume2['name'] = 'myVol' - volume2['id'] = 'myVol' - volume2['provider_location'] = six.text_type(provider_location2) - verify_orig = self.driver.common.conn.GetInstance - self.driver.common.conn.GetInstance = mock.Mock( - return_value=None) - findlun2 = self.driver.common._find_lun(volume2) - # Not found. - self.assertIsNone(findlun2) - self.driver.utils.get_instance_name( - provider_location2['classname'], - keybindings2) - self.driver.common.conn.GetInstance.assert_called_once_with( - keybindings2) - self.driver.common.conn.GetInstance.reset_mock() - self.driver.common.conn.GetInstance = verify_orig - - keybindings3 = {'CreationClassName': u'Symm_StorageVolume', - 'SystemName': u'SYMMETRIX+000195900551', - 'DeviceID': u'9999', - 'SystemCreationClassName': u'Symm_StorageSystem'} - provider_location3 = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings3} - instancename3 = self.driver.utils.get_instance_name( - provider_location3['classname'], - keybindings3) - # Error other than not found. - arg = 9999, "test_error" - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.common.utils.process_exception_args, - arg, instancename3) - - # Bug 1403160 - make sure the masking view is cleanly deleted - def test_last_volume_delete_masking_view(self): - extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - - maskingViewInstanceName = ( - self.driver.common.masking._find_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - - maskingViewName = conn.GetInstance( - maskingViewInstanceName)['ElementName'] - - # Deleting Masking View failed - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.common.masking._last_volume_delete_masking_view, - conn, controllerConfigService, maskingViewInstanceName, - maskingViewName, extraSpecs) - - # Deleting Masking view successful - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - self.driver.common.masking._last_volume_delete_masking_view( - conn, controllerConfigService, maskingViewInstanceName, - maskingViewName, extraSpecs) - - # Bug 1403160 - make sure the storage group is cleanly deleted - def test_remove_last_vol_and_delete_sg(self): - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - storageGroupName = self.data.storagegroupname - storageGroupInstanceName = ( - self.driver.utils.find_storage_masking_group( - conn, controllerConfigService, storageGroupName)) - - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeName = "1403160-Vol" - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': False} - - # Deleting Storage Group failed - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.common.masking._remove_last_vol_and_delete_sg, - conn, controllerConfigService, storageGroupInstanceName, - storageGroupName, volumeInstanceName, volumeName, extraSpecs) - - # Deleting Storage group successful - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - self.driver.common.masking._remove_last_vol_and_delete_sg( - conn, controllerConfigService, storageGroupInstanceName, - storageGroupName, volumeInstanceName, volumeName, extraSpecs) - - # Bug 1504192 - if the last volume is being unmapped and the masking view - # goes away, cleanup the initiators and associated initiator group. - def test_delete_initiators_from_initiator_group(self): - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - initiatorGroupName = self.data.initiatorgroup_name - initiatorGroupInstanceName = ( - self.driver.common.masking._get_initiator_group_from_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - conn.InvokeMethod = mock.Mock(return_value=1) - # Deletion of initiators failed. - self.driver.common.masking._delete_initiators_from_initiator_group( - conn, controllerConfigService, initiatorGroupInstanceName, - initiatorGroupName) - conn.InvokeMethod = mock.Mock(return_value=0) - # Deletion of initiators successful. - self.driver.common.masking._delete_initiators_from_initiator_group( - conn, controllerConfigService, initiatorGroupInstanceName, - initiatorGroupName) - - # Bug 1504192 - if the last volume is being unmapped and the masking view - # goes away, cleanup the initiators and associated initiator group. - def test_last_volume_delete_initiator_group_exception(self): - extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} - conn = self.fake_ecom_connection() - host = self.data.lunmaskctrl_name.split("-")[1] - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - initiatorGroupInstanceName = ( - self.driver.common.masking._get_initiator_group_from_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - job = { - 'Job': {'InstanceID': '9999', 'status': 'success', 'type': None}} - conn.InvokeMethod = mock.Mock(return_value=(4096, job)) - self.driver.common.masking.get_masking_views_by_initiator_group = ( - mock.Mock(return_value=[])) - self.driver.common.masking._delete_initiators_from_initiator_group = ( - mock.Mock(return_value=True)) - self.driver.common.masking.utils.wait_for_job_complete = ( - mock.Mock(return_value=(2, 'failure'))) - # Exception occurrs while deleting the initiator group. - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.common.masking._last_volume_delete_initiator_group, - conn, controllerConfigService, initiatorGroupInstanceName, - extraSpecs, host) - - # Bug 1504192 - if the last volume is being unmapped and the masking view - # goes away, cleanup the initiators and associated initiator group. - def test_last_volume_delete_initiator_group(self): - extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} - conn = self.fake_ecom_connection() - host = self.data.lunmaskctrl_name.split("-")[1] - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - initiatorGroupName = self.data.initiatorgroup_name - initiatorGroupInstanceName = ( - self.driver.common.masking._get_initiator_group_from_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - self.assertEqual(initiatorGroupName, - conn.GetInstance( - initiatorGroupInstanceName)['ElementName']) - # Path 1: masking view is associated with the initiator group and - # initiator group will not be deleted. - self.driver.common.masking._last_volume_delete_initiator_group( - conn, controllerConfigService, initiatorGroupInstanceName, - extraSpecs, host) - # Path 2: initiator group name is not the default name so the - # initiator group will not be deleted. - initGroup2 = initiatorGroupInstanceName - initGroup2['ElementName'] = "different-name-ig" - self.driver.common.masking._last_volume_delete_initiator_group( - conn, controllerConfigService, initGroup2, - extraSpecs, host) - # Path 3: No Masking view and IG is the default IG, so initiators - # associated with the Initiator group and the initiator group will - # be deleted. - self.driver.common.masking.get_masking_views_by_initiator_group = ( - mock.Mock(return_value=[])) - self.driver.common.masking._delete_initiators_from_initiator_group = ( - mock.Mock(return_value=True)) - self.driver.common.masking._last_volume_delete_initiator_group( - conn, controllerConfigService, initiatorGroupInstanceName, - extraSpecs, host) - job = { - 'Job': {'InstanceID': '9999', 'status': 'success', 'type': None}} - conn.InvokeMethod = mock.Mock(return_value=(4096, job)) - self.driver.common.masking.utils.wait_for_job_complete = ( - mock.Mock(return_value=(0, 'success'))) - # Deletion of initiator group is successful after waiting for job - # to complete. - self.driver.common.masking._last_volume_delete_initiator_group( - conn, controllerConfigService, initiatorGroupInstanceName, - extraSpecs, host) - - # Tests removal of last volume in a storage group V2 - def test_remove_and_reset_members(self): - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': False} - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = conn.GetInstance(volumeInstanceName) - volumeName = "Last-Vol" - self.driver.common.masking.get_devices_from_storage_group = mock.Mock( - return_value=['one_value']) - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - - self.driver.common.masking.remove_and_reset_members( - conn, controllerConfigService, volumeInstance, - volumeName, extraSpecs) - - @mock.patch.object( - masking.VMAXMasking, - 'get_associated_masking_groups_from_device', - return_value=VMAXCommonData.storagegroups) - @mock.patch.object( - utils.VMAXUtils, - 'get_existing_instance', - return_value=None) - def test_remove_and_reset_members_v3(self, mock_inst, mock_sg): - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'pool': 'SRP_1', - 'workload': 'DSS', - 'slo': 'Bronze'} - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = conn.GetInstance(volumeInstanceName) - volumeName = "1416035-Vol" - - self.driver.common.masking.remove_and_reset_members( - conn, controllerConfigService, volumeInstance, - volumeName, extraSpecs, reset=False) - - # Bug 1393555 - masking view has been deleted by another process. - def test_find_maskingview(self): - conn = self.fake_ecom_connection() - foundMaskingViewInstanceName = ( - self.driver.common.masking._find_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - # The masking view has been found. - self.assertEqual( - self.data.lunmaskctrl_name, - conn.GetInstance(foundMaskingViewInstanceName)['ElementName']) - - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - foundMaskingViewInstanceName2 = ( - self.driver.common.masking._find_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - # The masking view has not been found. - self.assertIsNone(foundMaskingViewInstanceName2) - - # Bug 1393555 - port group has been deleted by another process. - def test_find_portgroup(self): - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - - foundPortGroupInstanceName = ( - self.driver.common.masking.find_port_group( - conn, controllerConfigService, self.data.port_group)) - # The port group has been found. - self.assertEqual( - self.data.port_group, - conn.GetInstance(foundPortGroupInstanceName)['ElementName']) - - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - foundPortGroupInstanceName2 = ( - self.driver.common.masking.find_port_group( - conn, controllerConfigService, self.data.port_group)) - # The port group has not been found as it has been deleted - # externally or by another thread. - self.assertIsNone(foundPortGroupInstanceName2) - - # Bug 1393555 - storage group has been deleted by another process. - def test_get_storage_group_from_masking_view(self): - conn = self.fake_ecom_connection() - foundStorageGroupInstanceName = ( - self.driver.common.masking._get_storage_group_from_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - # The storage group has been found. - self.assertEqual( - self.data.storagegroupname, - conn.GetInstance(foundStorageGroupInstanceName)['ElementName']) - - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - foundStorageGroupInstanceName2 = ( - self.driver.common.masking._get_storage_group_from_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - # The storage group has not been found as it has been deleted - # externally or by another thread. - self.assertIsNone(foundStorageGroupInstanceName2) - - # Bug 1393555 - initiator group has been deleted by another process. - def test_get_initiator_group_from_masking_view(self): - conn = self.fake_ecom_connection() - foundInitiatorGroupInstanceName = ( - self.driver.common.masking._get_initiator_group_from_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - # The initiator group has been found. - self.assertEqual( - self.data.initiatorgroup_name, - conn.GetInstance(foundInitiatorGroupInstanceName)['ElementName']) - - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - foundInitiatorGroupInstanceName2 = ( - self.driver.common.masking._get_storage_group_from_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - # The initiator group has not been found as it has been deleted - # externally or by another thread. - self.assertIsNone(foundInitiatorGroupInstanceName2) - - # Bug 1393555 - port group has been deleted by another process. - def test_get_port_group_from_masking_view(self): - conn = self.fake_ecom_connection() - foundPortGroupInstanceName = ( - self.driver.common.masking._get_port_group_from_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - # The port group has been found. - self.assertEqual( - self.data.port_group, - conn.GetInstance(foundPortGroupInstanceName)['ElementName']) - - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - foundPortGroupInstanceName2 = ( - self.driver.common.masking._get_port_group_from_masking_view( - conn, self.data.lunmaskctrl_name, self.data.storage_system)) - # The port group has not been found as it has been deleted - # externally or by another thread. - self.assertIsNone(foundPortGroupInstanceName2) - - # Bug 1393555 - initiator group has been deleted by another process. - def test_find_initiator_group(self): - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - - foundInitiatorGroupInstanceName = ( - self.driver.common.masking._find_initiator_masking_group( - conn, controllerConfigService, self.data.initiatorNames)) - # The initiator group has been found. - self.assertEqual( - self.data.initiatorgroup_name, - conn.GetInstance(foundInitiatorGroupInstanceName)['ElementName']) - - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - foundInitiatorGroupInstanceName2 = ( - self.driver.common.masking._find_initiator_masking_group( - conn, controllerConfigService, self.data.initiatorNames)) - # The initiator group has not been found as it has been deleted - # externally or by another thread. - self.assertIsNone(foundInitiatorGroupInstanceName2) - - # Bug 1393555 - hardware id has been deleted by another process. - def test_get_storage_hardware_id_instance_names(self): - conn = self.fake_ecom_connection() - foundHardwareIdInstanceNames = ( - self.driver.common.masking._get_storage_hardware_id_instance_names( - conn, self.data.initiatorNames, self.data.storage_system)) - # The hardware id list has been found. - self.assertEqual( - '123456789012345', - conn.GetInstance( - foundHardwareIdInstanceNames[0])['StorageID']) - - self.driver.common.masking.utils.get_existing_instance = mock.Mock( - return_value=None) - foundHardwareIdInstanceNames2 = ( - self.driver.common.masking._get_storage_hardware_id_instance_names( - conn, self.data.initiatorNames, self.data.storage_system)) - # The hardware id list has not been found as it has been removed - # externally. - self.assertEqual(0, len(foundHardwareIdInstanceNames2)) - - # Bug 1393555 - controller has been deleted by another process. - def test_find_lunmasking_scsi_protocol_controller(self): - self.driver.common.conn = self.fake_ecom_connection() - foundControllerInstanceName = ( - self.driver.common._find_lunmasking_scsi_protocol_controller( - self.data.storage_system, self.data.connector)) - # The controller has been found. - self.assertEqual( - 'OS-fakehost-gold-I-MV', - self.driver.common.conn.GetInstance( - foundControllerInstanceName)['ElementName']) - - self.driver.common.utils.get_existing_instance = mock.Mock( - return_value=None) - foundControllerInstanceName2 = ( - self.driver.common._find_lunmasking_scsi_protocol_controller( - self.data.storage_system, self.data.connector)) - # The controller has not been found as it has been removed - # externally. - self.assertIsNone(foundControllerInstanceName2) - - # Bug 1393555 - storage group has been deleted by another process. - def test_get_policy_default_storage_group(self): - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - - foundStorageMaskingGroupInstanceName = ( - self.driver.common.fast.get_policy_default_storage_group( - conn, controllerConfigService, 'OS_default')) - # The storage group has been found. - self.assertEqual( - 'OS_default_GOLD1_SG', - conn.GetInstance( - foundStorageMaskingGroupInstanceName)['ElementName']) - - self.driver.common.fast.utils.get_existing_instance = mock.Mock( - return_value=None) - foundStorageMaskingGroupInstanceName2 = ( - self.driver.common.fast.get_policy_default_storage_group( - conn, controllerConfigService, 'OS_default')) - # The storage group has not been found as it has been removed - # externally. - self.assertIsNone(foundStorageMaskingGroupInstanceName2) - - # Bug 1393555 - policy has been deleted by another process. - def test_get_capacities_associated_to_policy(self): - conn = self.fake_ecom_connection() - (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, - array_max_over_subscription) = ( - self.driver.common.fast.get_capacities_associated_to_policy( - conn, self.data.storage_system, self.data.policyrule)) - # The capacities associated to the policy have been found. - self.assertEqual(self.data.totalmanagedspace_gbs, total_capacity_gb) - self.assertEqual(self.data.remainingmanagedspace_gbs, free_capacity_gb) - - self.driver.common.fast.utils.get_existing_instance = mock.Mock( - return_value=None) - (total_capacity_gb_2, free_capacity_gb_2, provisioned_capacity_gb_2, - array_max_over_subscription_2) = ( - self.driver.common.fast.get_capacities_associated_to_policy( - conn, self.data.storage_system, self.data.policyrule)) - # The capacities have not been found as the policy has been - # removed externally. - self.assertEqual(0, total_capacity_gb_2) - self.assertEqual(0, free_capacity_gb_2) - self.assertEqual(0, provisioned_capacity_gb_2) - - # Bug 1393555 - storage group has been deleted by another process. - def test_find_storage_masking_group(self): - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - - foundStorageMaskingGroupInstanceName = ( - self.driver.common.utils.find_storage_masking_group( - conn, controllerConfigService, self.data.storagegroupname)) - # The storage group has been found. - self.assertEqual( - self.data.storagegroupname, - conn.GetInstance( - foundStorageMaskingGroupInstanceName)['ElementName']) - - self.driver.common.utils.get_existing_instance = mock.Mock( - return_value=None) - foundStorageMaskingGroupInstanceName2 = ( - self.driver.common.utils.find_storage_masking_group( - conn, controllerConfigService, self.data.storagegroupname)) - # The storage group has not been found as it has been removed - # externally. - self.assertIsNone(foundStorageMaskingGroupInstanceName2) - - # Bug 1393555 - pool has been deleted by another process. - def test_get_pool_by_name(self): - conn = self.fake_ecom_connection() - - foundPoolInstanceName = self.driver.common.utils.get_pool_by_name( - conn, self.data.poolname, self.data.storage_system) - # The pool has been found. - self.assertEqual( - self.data.poolname, - conn.GetInstance(foundPoolInstanceName)['ElementName']) - - self.driver.common.utils.get_existing_instance = mock.Mock( - return_value=None) - foundPoolInstanceName2 = self.driver.common.utils.get_pool_by_name( - conn, self.data.poolname, self.data.storage_system) - # The pool has not been found as it has been removed externally. - self.assertIsNone(foundPoolInstanceName2) - - def test_get_volume_stats_1364232(self): - file_name = self.create_fake_config_file_1364232() - - arrayInfo = self.driver.utils.parse_file_to_get_array_map(file_name) - self.assertEqual( - '000198700439', arrayInfo[0]['SerialNumber']) - self.assertEqual( - 'FC_SLVR1', arrayInfo[0]['PoolName']) - self.assertEqual( - 'SILVER1', arrayInfo[0]['FastPolicy']) - self.assertIn('OS-PORTGROUP', arrayInfo[0]['PortGroup']) - bExists = os.path.exists(file_name) - if bExists: - os.remove(file_name) - - def test_intervals_and_retries_override( - self): - file_name = ( - self.create_fake_config_file_no_fast_with_interval_retries()) - extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} - pool = 'gold+1234567891011' - arrayInfo = self.driver.utils.parse_file_to_get_array_map( - self.config_file_path) - poolRec = self.driver.utils.extract_record(arrayInfo, pool) - extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, - poolRec) - self.assertEqual(40, - self.driver.utils._get_max_job_retries(extraSpecs)) - self.assertEqual(5, - self.driver.utils._get_interval_in_secs(extraSpecs)) - - bExists = os.path.exists(file_name) - if bExists: - os.remove(file_name) - - def test_intervals_and_retries_default(self): - extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} - pool = 'gold+1234567891011' - arrayInfo = self.driver.utils.parse_file_to_get_array_map( - self.config_file_path) - poolRec = self.driver.utils.extract_record(arrayInfo, pool) - extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, - poolRec) - # Set JOB_RETRIES and INTERVAL_10_SEC to 0 to avoid timeout - self.assertEqual(0, - self.driver.utils._get_max_job_retries(extraSpecs)) - self.assertEqual(0, - self.driver.utils._get_interval_in_secs(extraSpecs)) - - def test_interval_only(self): - extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} - file_name = self.create_fake_config_file_no_fast_with_interval() - pool = 'gold+1234567891011' - arrayInfo = self.driver.utils.parse_file_to_get_array_map( - self.config_file_path) - poolRec = self.driver.utils.extract_record(arrayInfo, pool) - extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, - poolRec) - # Set JOB_RETRIES 0 to avoid timeout - self.assertEqual(0, - self.driver.utils._get_max_job_retries(extraSpecs)) - self.assertEqual(20, - self.driver.utils._get_interval_in_secs(extraSpecs)) - - bExists = os.path.exists(file_name) - if bExists: - os.remove(file_name) - - def test_retries_only(self): - extraSpecs = {'volume_backend_name': 'ISCSINoFAST'} - file_name = self.create_fake_config_file_no_fast_with_retries() - pool = 'gold+1234567891011' - arrayInfo = self.driver.utils.parse_file_to_get_array_map( - self.config_file_path) - poolRec = self.driver.utils.extract_record(arrayInfo, pool) - extraSpecs = self.driver.common._set_v2_extra_specs(extraSpecs, - poolRec) - self.assertEqual(70, - self.driver.utils._get_max_job_retries(extraSpecs)) - # Set INTERVAL_10_SEC to 0 to avoid timeout - self.assertEqual(0, - self.driver.utils._get_interval_in_secs(extraSpecs)) - - bExists = os.path.exists(file_name) - if bExists: - os.remove(file_name) - - @mock.patch.object( - utils.VMAXUtils, - 'override_ratio', - return_value=2.0) - @mock.patch.object( - utils.VMAXUtils, - 'isArrayV3', - return_value=False) - @mock.patch.object( - utils.VMAXUtils, - 'get_pool_capacities', - return_value=(1234, 1200, 1200, 1)) - @mock.patch.object( - fast.VMAXFast, - 'is_tiering_policy_enabled', - return_value=False) - @mock.patch.object( - utils.VMAXUtils, - 'find_storageSystem', - return_value=None) - def test_get_volume_stats_no_fast(self, - mock_storage_system, - mock_is_fast_enabled, - mock_capacity, - mock_is_v3, - mock_or): - self.driver.common.pool_info['arrays_info'] = ( - [{'EcomServerIp': '1.1.1.1', - 'EcomServerPort': '5989', - 'EcomUserName': 'name', - 'EcomPassword': 'password', - 'SerialNumber': '1234567890', - 'PoolName': 'v2_pool', - 'FastPolicy': 'gold'}]) - self.driver.get_volume_stats(True) - self.driver.common.pool_info['arrays_info'] = [] - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_create_volume_no_fast_success( - self, _mock_volume_type, mock_storage_system): - self.driver.create_volume(self.data.test_volume_v2) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype: stripedmetacount': '4', - 'volume_backend_name': 'ISCSINoFAST'}) - def test_create_volume_no_fast_striped_success( - self, _mock_volume_type, mock_storage_system): - self.driver.create_volume(self.data.test_volume_v2) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_create_volume_in_CG_no_fast_success( - self, _mock_volume_type, mock_storage_system): - self.driver.create_volume(self.data.test_volume_CG) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_delete_volume_no_fast_success( - self, _mock_volume_type, mock_storage_system): - self.driver.delete_volume(self.data.test_volume) - - def test_create_volume_no_fast_failed(self): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - self.data.test_failed_volume) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_delete_volume_no_fast_notfound(self, _mock_volume_type): - notfound_delete_vol = {} - notfound_delete_vol['name'] = 'notfound_delete_vol' - notfound_delete_vol['id'] = '10' - notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume' - notfound_delete_vol['SystemName'] = self.data.storage_system - notfound_delete_vol['DeviceID'] = notfound_delete_vol['id'] - notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' - notfound_delete_vol['volume_type_id'] = 'abc' - notfound_delete_vol['provider_location'] = None - notfound_delete_vol['host'] = self.data.fake_host - name = {} - name['classname'] = 'Symm_StorageVolume' - keys = {} - keys['CreationClassName'] = notfound_delete_vol['CreationClassName'] - keys['SystemName'] = notfound_delete_vol['SystemName'] - keys['DeviceID'] = notfound_delete_vol['DeviceID'] - keys['SystemCreationClassName'] = ( - notfound_delete_vol['SystemCreationClassName']) - name['keybindings'] = keys - - self.driver.delete_volume(notfound_delete_vol) - - @mock.patch.object( - utils.VMAXUtils, - 'wait_for_job_complete', - return_value=(-1, 'error')) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_delete_volume_failed( - self, _mock_volume_type, mock_storage_system, mock_wait): - self.driver.create_volume(self.data.failed_delete_vol) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_volume, - self.data.failed_delete_vol) - - @mock.patch.object( - utils.VMAXUtils, - 'insert_live_migration_record') - @mock.patch.object( - common.VMAXCommon, - '_is_same_host', - return_value=True) - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'hostlunid': 1, - 'storagesystem': VMAXCommonData.storage_system}, - False, {})) - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_already_mapped_no_fast_success( - self, _mock_volume_type, mock_wrap_group, mock_wrap_device, - mock_is_same_host, mock_rec): - self.driver.common._get_correct_port_group = mock.Mock( - return_value=self.data.port_group) - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - - @mock.patch.object( - masking.VMAXMasking, - '_check_adding_volume_to_storage_group', - return_value=None) - @mock.patch.object( - utils.VMAXUtils, - 'find_storage_masking_group', - return_value=VMAXCommonData.default_sg_instance_name) - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_map_new_masking_view_no_fast_success( - self, _mock_volume_type, mock_wrap_group, - mock_storage_group, mock_add_volume): - self.driver.common._wrap_find_device_number = mock.Mock( - return_value=({}, False, {})) - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - - @mock.patch.object( - utils.VMAXUtils, - 'insert_live_migration_record') - @mock.patch.object( - common.VMAXCommon, - '_get_port_group_from_source', - return_value={'CreationClassName': 'CIM_TargetMaskingGroup', - 'ElementName': 'OS-portgroup-PG'}) - @mock.patch.object( - common.VMAXCommon, - '_get_storage_group_from_source', - return_value=VMAXCommonData.default_sg_instance_name) - @mock.patch.object( - common.VMAXCommon, - '_is_same_host', - return_value=False) - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'hostlunid': 1, - 'storagesystem': VMAXCommonData.storage_system}, - True, - {'hostlunid': 1, - 'storagesystem': VMAXCommonData.storage_system})) - @mock.patch.object( - common.VMAXCommon, - '_wrap_find_device_number', - return_value=({}, True, - {'hostlunid': 1, - 'storagesystem': VMAXCommonData.storage_system})) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_map_live_migration_no_fast_success(self, - _mock_volume_type, - mock_wrap_device, - mock_device, - mock_same_host, - mock_sg_from_mv, - mock_pg_from_mv, - mock_rec): - extraSpecs = self.data.extra_specs - rollback_dict = self.driver.common._populate_masking_dict( - self.data.test_volume, self.data.connector, extraSpecs) - with mock.patch.object(self.driver.common.masking, - 'setup_masking_view', - return_value=rollback_dict): - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - - @mock.patch.object( - utils.VMAXUtils, - 'insert_live_migration_record') - @mock.patch.object( - masking.VMAXMasking, - '_get_initiator_group_from_masking_view', - return_value='value') - @mock.patch.object( - masking.VMAXMasking, - '_find_initiator_masking_group', - return_value='value') - @mock.patch.object( - masking.VMAXMasking, - '_find_masking_view', - return_value='value') - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_map_existing_masking_view_no_fast_success( - self, _mock_volume_type, mock_wrap_group, mock_storage_group, - mock_initiator_group, mock_ig_from_mv, mock_rec): - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'storagesystem': VMAXCommonData.storage_system}, - False, {})) - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - def test_map_no_fast_failed(self, mock_wrap_group, mock_wrap_device): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.data.test_volume, - self.data.connector) - - @mock.patch.object( - masking.VMAXMasking, - 'get_initiator_group_from_masking_view', - return_value='myInitGroup') - @mock.patch.object( - masking.VMAXMasking, - '_find_initiator_masking_group', - return_value='myInitGroup') - @mock.patch.object( - utils.VMAXUtils, - 'find_storage_masking_group', - return_value=VMAXCommonData.default_sg_instance_name) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_detach_no_fast_success( - self, mock_volume_type, mock_storage_group, - mock_ig, mock_igc): - self.driver.terminate_connection( - self.data.test_volume, self.data.connector) - - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_size', - return_value='2147483648') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_extend_volume_no_fast_success( - self, _mock_volume_type, mock_volume_size): - newSize = '2' - self.driver.extend_volume(self.data.test_volume, newSize) - - @mock.patch.object( - utils.VMAXUtils, - 'check_if_volume_is_extendable', - return_value='False') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype: stripedmetacount': '4', - 'volume_backend_name': 'ISCSINoFAST'}) - def test_extend_volume_striped_no_fast_failed( - self, _mock_volume_type, _mock_is_extendable): - newSize = '2' - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - self.data.test_volume, - newSize) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567, 7654321]) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_create_snapshot_different_sizes_meta_no_fast_success( - self, mock_volume_type, - mock_meta, mock_size, mock_pool): - common = self.driver.common - volumeDict = {'classname': u'Symm_StorageVolume', - 'keybindings': VMAXCommonData.keybindings} - common.provision.create_volume_from_pool = ( - mock.Mock(return_value=(volumeDict, 0))) - common.provision.get_volume_dict_from_job = ( - mock.Mock(return_value=volumeDict)) - self.driver.create_snapshot(self.data.test_snapshot) - - @mock.patch.object( - utils.VMAXUtils, - 'parse_file_to_get_array_map', - return_value=None) - def test_create_snapshot_no_fast_failed(self, mock_pool): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, - self.data.test_snapshot) - - @unittest.skip("Skip until bug #1578986 is fixed") - @mock.patch.object( - utils.VMAXUtils, - 'compare_size', - return_value=0) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567]) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - utils.VMAXUtils, - 'find_sync_sv_by_volume', - return_value=(None, None)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_create_volume_from_same_size_meta_snapshot( - self, mock_volume_type, mock_sync_sv, mock_meta, mock_size, - mock_compare): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.driver.create_volume_from_snapshot( - self.data.test_volume, self.data.test_volume) - - def test_create_volume_from_snapshot_no_fast_failed(self): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - self.data.test_volume, - self.data.test_volume) - - @mock.patch.object( - utils.VMAXUtils, - 'compare_size', - return_value=0) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=None) - @mock.patch.object( - common.VMAXCommon, - '_find_storage_sync_sv_sv', - return_value=(None, None)) - @mock.patch.object( - FakeDB, - 'volume_get', - return_value=VMAXCommonData.test_source_volume) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_create_clone_simple_volume_no_fast_success( - self, mock_volume_type, mock_volume, mock_sync_sv, - mock_simple_volume, mock_compare): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.driver.create_cloned_volume(self.data.test_volume, - VMAXCommonData.test_source_volume) - - # Bug https://bugs.launchpad.net/cinder/+bug/1440154 - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567, 7654321]) - @mock.patch.object( - FakeDB, - 'volume_get', - return_value=VMAXCommonData.test_source_volume) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - @mock.patch.object( - provision.VMAXProvision, - 'create_element_replica') - @mock.patch.object( - utils.VMAXUtils, - 'find_sync_sv_by_volume', - return_value=(None, None)) - def test_create_clone_assert_clean_up_target_volume( - self, mock_sync, mock_create_replica, mock_volume_type, - mock_volume, mock_capacities, mock_pool, mock_meta_volume): - self.data.test_volume['volume_name'] = "vmax-1234567" - e = exception.VolumeBackendAPIException('CreateElementReplica Ex') - common = self.driver.common - common._delete_from_pool = mock.Mock(return_value=0) - conn = self.fake_ecom_connection() - storageConfigService = ( - common.utils.find_storage_configuration_service( - conn, self.data.storage_system)) - mock_create_replica.side_effect = e - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - self.data.test_volume, - VMAXCommonData.test_source_volume) - extraSpecs = common._initial_setup(self.data.test_volume) - fastPolicy = extraSpecs['storagetype:fastpolicy'] - targetInstance = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - common._delete_from_pool.assert_called_with(storageConfigService, - targetInstance, - targetInstance['Name'], - targetInstance['DeviceID'], - fastPolicy, - extraSpecs) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_migrate_volume_no_fast_success(self, _mock_volume_type): - self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, - self.data.test_host) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_create_CG_no_fast_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.create_consistencygroup( - self.data.test_ctxt, self.data.test_CG) - - @mock.patch.object( - FakeDB, - 'volume_get_all_by_group', - return_value=None) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_delete_CG_no_volumes_no_fast_success( - self, _mock_volume_type, _mock_storage_system, - _mock_db_volumes): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_delete_CG_with_volumes_no_fast_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - utils.VMAXUtils, - 'find_group_sync_rg_by_target', - return_value="") - @mock.patch.object( - common.VMAXCommon, - '_get_members_of_replication_group', - return_value=()) - @mock.patch.object( - common.VMAXCommon, - '_find_consistency_group', - return_value=( - VMAXCommonData.test_CG, - VMAXCommonData.test_CG['name'] + "_" + ( - VMAXCommonData.test_CG['id']))) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_create_snapshot_for_CG_no_fast_success( - self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, - _mock_rg): - self.driver.create_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_delete_snapshot_for_CG_no_fast_success( - self, _mock_volume_type, _mock_storage): - self.driver.delete_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_update_CG_add_volume_no_fast_success( - self, _mock_volume_type, _mock_storage_system): - add_volumes = [] - add_volumes.append(self.data.test_source_volume) - remove_volumes = None - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - # Multiple volumes - add_volumes.append(self.data.test_source_volume) - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - # Can't find CG - self.driver.common._find_consistency_group = mock.Mock( - return_value=(None, 'cg_name')) - self.assertRaises(exception.ConsistencyGroupNotFound, - self.driver.update_consistencygroup, - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_update_CG_remove_volume_no_fast_success( - self, _mock_volume_type, _mock_storage_system): - remove_volumes = [] - remove_volumes.append(self.data.test_source_volume) - add_volumes = None - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - # Multiple volumes - remove_volumes.append(self.data.test_source_volume) - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - - # Bug https://bugs.launchpad.net/cinder/+bug/1442376 - @unittest.skip("Skip until bug #1578986 is fixed") - @mock.patch.object( - utils.VMAXUtils, - 'compare_size', - return_value=0) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567, 7654321]) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - FakeDB, - 'volume_get', - return_value=VMAXCommonData.test_source_volume) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def _test_create_clone_with_different_meta_sizes( - self, mock_volume_type, mock_volume, - mock_meta, mock_size, mock_pool, mock_compare): - self.data.test_volume['volume_name'] = "vmax-1234567" - common = self.driver.common - volumeDict = {'classname': u'Symm_StorageVolume', - 'keybindings': VMAXCommonData.keybindings} - volume = {'size': 0} - common.provision.create_volume_from_pool = ( - mock.Mock(return_value=(volumeDict, volume['size']))) - common.provision.get_volume_dict_from_job = ( - mock.Mock(return_value=volumeDict)) - - common._create_composite_volume = ( - mock.Mock(return_value=(0, - volumeDict, - VMAXCommonData.storage_system))) - self.driver.create_cloned_volume(self.data.test_volume, - VMAXCommonData.test_source_volume) - extraSpecs = self.driver.common._initial_setup(self.data.test_volume) - common._create_composite_volume.assert_called_with( - volume, "TargetBaseVol", 1234567, extraSpecs, 1) - - def test_get_volume_element_name(self): - volumeId = 'ea95aa39-080b-4f11-9856-a03acf9112ad' - util = self.driver.common.utils - volumeElementName = util.get_volume_element_name(volumeId) - expectVolumeElementName = ( - utils.VOLUME_ELEMENT_NAME_PREFIX + volumeId) - self.assertEqual(expectVolumeElementName, volumeElementName) - - def test_get_associated_replication_from_source_volume(self): - conn = self.fake_ecom_connection() - utils = self.driver.common.utils - repInstanceName = ( - utils.get_associated_replication_from_source_volume( - conn, self.data.storage_system, - self.data.test_volume['device_id'])) - expectInstanceName = ( - conn.EnumerateInstanceNames('SE_StorageSynchronized_SV_SV')[0]) - self.assertEqual(expectInstanceName, repInstanceName) - - def test_get_array_and_device_id_success(self): - deviceId = '0123' - arrayId = '1234567891011' - external_ref = {u'source-name': deviceId} - volume = {'volume_metadata': [{'key': 'array', 'value': arrayId}] - } - volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011' - utils = self.driver.common.utils - (arrId, devId) = utils.get_array_and_device_id(volume, external_ref) - self.assertEqual(arrayId, arrId) - self.assertEqual(deviceId, devId) - - def test_get_array_and_device_id_failed(self): - deviceId = '0123' - arrayId = '1234567891011' - external_ref = {u'no-source-name': deviceId} - volume = {'volume_metadata': [{'key': 'array', 'value': arrayId}] - } - volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011' - utils = self.driver.common.utils - self.assertRaises(exception.VolumeBackendAPIException, - utils.get_array_and_device_id, - volume, - external_ref) - - def test_rename_volume(self): - conn = self.fake_ecom_connection() - util = self.driver.common.utils - newName = 'new_name' - volume = {} - volume['CreationClassName'] = 'Symm_StorageVolume' - volume['DeviceID'] = '1' - volume['ElementName'] = 'original_name' - pywbem = mock.Mock() - pywbem.cim_obj = mock.Mock() - pywbem.cim_obj.CIMInstance = mock.Mock() - utils.pywbem = pywbem - volumeInstance = conn.GetInstance(volume) - originalName = volumeInstance['ElementName'] - volumeInstance = util.rename_volume(conn, volumeInstance, newName) - self.assertEqual(newName, volumeInstance['ElementName']) - volumeInstance = util.rename_volume( - conn, volumeInstance, originalName) - self.assertEqual(originalName, volumeInstance['ElementName']) - - def test_get_smi_version(self): - conn = self.fake_ecom_connection() - utils = self.driver.common.utils - version = utils.get_smi_version(conn) - expected = int(str(self.data.majorVersion) - + str(self.data.minorVersion) - + str(self.data.revNumber)) - self.assertEqual(version, expected) - - def test_get_pool_name(self): - conn = self.fake_ecom_connection() - utils = self.driver.common.utils - poolInstanceName = {} - poolInstanceName['InstanceID'] = "SATA_GOLD1" - poolInstanceName['CreationClassName'] = 'Symm_VirtualProvisioningPool' - poolName = utils.get_pool_name(conn, poolInstanceName) - self.assertEqual(poolName, self.data.poolname) - - def test_get_meta_members_capacity_in_byte(self): - conn = self.fake_ecom_connection() - utils = self.driver.common.utils - memberVolumeInstanceNames = [] - volumeHead = EMC_StorageVolume() - volumeHead.classname = 'Symm_StorageVolume' - blockSize = self.data.block_size - volumeHead['ConsumableBlocks'] = ( - self.data.metaHead_volume['ConsumableBlocks']) - volumeHead['BlockSize'] = blockSize - volumeHead['DeviceID'] = self.data.metaHead_volume['DeviceID'] - memberVolumeInstanceNames.append(volumeHead) - metaMember1 = EMC_StorageVolume() - metaMember1.classname = 'Symm_StorageVolume' - metaMember1['ConsumableBlocks'] = ( - self.data.meta_volume1['ConsumableBlocks']) - metaMember1['BlockSize'] = blockSize - metaMember1['DeviceID'] = self.data.meta_volume1['DeviceID'] - memberVolumeInstanceNames.append(metaMember1) - metaMember2 = EMC_StorageVolume() - metaMember2.classname = 'Symm_StorageVolume' - metaMember2['ConsumableBlocks'] = ( - self.data.meta_volume2['ConsumableBlocks']) - metaMember2['BlockSize'] = blockSize - metaMember2['DeviceID'] = self.data.meta_volume2['DeviceID'] - memberVolumeInstanceNames.append(metaMember2) - capacities = utils.get_meta_members_capacity_in_byte( - conn, memberVolumeInstanceNames) - headSize = ( - volumeHead['ConsumableBlocks'] - - metaMember1['ConsumableBlocks'] - - metaMember2['ConsumableBlocks']) - expected = [headSize * blockSize, - metaMember1['ConsumableBlocks'] * blockSize, - metaMember2['ConsumableBlocks'] * blockSize] - self.assertEqual(capacities, expected) - - def test_get_composite_elements(self): - conn = self.fake_ecom_connection() - utils = self.driver.common.utils - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = conn.GetInstance(volumeInstanceName) - memberVolumeInstanceNames = utils.get_composite_elements( - conn, volumeInstance) - expected = [self.data.metaHead_volume, - self.data.meta_volume1, - self.data.meta_volume2] - self.assertEqual(memberVolumeInstanceNames, expected) - - def test_get_volume_model_updates(self): - utils = self.driver.common.utils - status = 'status-string' - volumes = utils.get_volume_model_updates( - self.driver.db.volume_get_all_by_group("", 5), - self.data.test_CG['id'], - status) - self.assertEqual(status, volumes[0]['status']) - - @mock.patch.object( - utils.VMAXUtils, - 'find_group_sync_rg_by_target', - return_value="") - @mock.patch.object( - common.VMAXCommon, - '_find_consistency_group', - return_value=( - VMAXCommonData.source_CG, - VMAXCommonData.source_CG['name'] + "_" + ( - VMAXCommonData.source_CG['id']))) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - def test_create_consistencygroup_from_src( - self, _mock_volume_type, _mock_storage, _mock_cg, _mock_rg): - volumes = [] - volumes.append(self.data.test_source_volume) - snapshots = [] - self.data.test_snapshot['volume_size'] = "10" - snapshots.append(self.data.test_snapshot) - model_update, volumes_model_update = ( - self.driver.create_consistencygroup_from_src( - self.data.test_ctxt, self.data.source_CG, volumes, - self.data.test_CG_snapshot, snapshots)) - self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, - model_update) - for volume_model_update in volumes_model_update: - if 'status' in volume_model_update: - self.assertEqual(volume_model_update['status'], 'available') - if 'id' in volume_model_update: - self.assertEqual(volume_model_update['id'], '2') - self.assertTrue('provider_location' in volume_model_update) - self.assertTrue('admin_metadata' in volume_model_update) - - @mock.patch.object( - common.VMAXCommon, - '_update_pool_stats', - return_value={1, 2, 3, 4, 5}) - @mock.patch.object( - utils.VMAXUtils, - 'override_ratio', - return_value=1.0) - def test_ssl_support(self, mock_ratio, pool_stats): - self.driver.common.pool_info['arrays_info'] = ( - [{'EcomServerIp': '1.1.1.1', - 'EcomServerPort': '5989', - 'EcomUserName': 'name', - 'EcomPassword': 'password', - 'SerialNumber': '1234567890', - 'PoolName': 'v2_pool'}]) - self.driver.common.update_volume_stats() - self.assertTrue(self.driver.common.ecomUseSSL) - - def _cleanup(self): - if self.config_file_path: - bExists = os.path.exists(self.config_file_path) - if bExists: - os.remove(self.config_file_path) - shutil.rmtree(self.tempdir) - - -class VMAXISCSIDriverFastTestCase(test.TestCase): - - def setUp(self): - - self.data = VMAXCommonData() - - self.tempdir = tempfile.mkdtemp() - super(VMAXISCSIDriverFastTestCase, self).setUp() - self.config_file_path = None - self.create_fake_config_file_fast() - self.addCleanup(self._cleanup) - - configuration = mock.Mock() - configuration.cinder_emc_config_file = self.config_file_path - configuration.safe_get.return_value = 'ISCSIFAST' - configuration.config_group = 'ISCSIFAST' - self.mock_object(common.VMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(utils.VMAXUtils, 'isArrayV3', - self.fake_is_v3) - self.mock_object(cinder_utils, 'get_bool_param', - return_value=False) - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - self.patcher = mock.patch( - 'oslo_service.loopingcall.FixedIntervalLoopingCall', - new=unit_utils.ZeroIntervalLoopingCall) - self.patcher.start() - - def create_fake_config_file_fast(self): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - - array = doc.createElement("Array") - arraytext = doc.createTextNode("1234567891011") - emc.appendChild(array) - array.appendChild(arraytext) - - fastPolicy = doc.createElement("FastPolicy") - fastPolicyText = doc.createTextNode("GOLD1") - emc.appendChild(fastPolicy) - fastPolicy.appendChild(fastPolicyText) - - ecomserverip = doc.createElement("EcomServerIp") - ecomserveriptext = doc.createTextNode("1.1.1.1") - emc.appendChild(ecomserverip) - ecomserverip.appendChild(ecomserveriptext) - - ecomserverport = doc.createElement("EcomServerPort") - ecomserverporttext = doc.createTextNode("10") - emc.appendChild(ecomserverport) - ecomserverport.appendChild(ecomserverporttext) - - ecomusername = doc.createElement("EcomUserName") - ecomusernametext = doc.createTextNode("user") - emc.appendChild(ecomusername) - ecomusername.appendChild(ecomusernametext) - - ecompassword = doc.createElement("EcomPassword") - ecompasswordtext = doc.createTextNode("pass") - emc.appendChild(ecompassword) - ecompassword.appendChild(ecompasswordtext) - - timeout = doc.createElement("Timeout") - timeouttext = doc.createTextNode("0") - emc.appendChild(timeout) - timeout.appendChild(timeouttext) - - portgroup = doc.createElement("PortGroup") - portgrouptext = doc.createTextNode(self.data.port_group) - portgroup.appendChild(portgrouptext) - - pool = doc.createElement("Pool") - pooltext = doc.createTextNode("gold") - emc.appendChild(pool) - pool.appendChild(pooltext) - - portgroups = doc.createElement("PortGroups") - portgroups.appendChild(portgroup) - emc.appendChild(portgroups) - - filename = 'cinder_emc_config_ISCSIFAST.xml' - - self.config_file_path = self.tempdir + '/' + filename - - f = open(self.config_file_path, 'w') - doc.writexml(f) - f.close() - - def fake_ecom_connection(self): - conn = FakeEcomConnection() - return conn - - def fake_is_v3(self, conn, serialNumber): - return False - - @mock.patch.object( - utils.VMAXUtils, - 'override_ratio', - return_value=2.0) - @mock.patch.object( - fast.VMAXFast, - 'get_capacities_associated_to_policy', - return_value=(1234, 1200, 1200, 1)) - @mock.patch.object( - utils.VMAXUtils, - 'get_pool_capacities', - return_value=(1234, 1200, 1200, 1)) - @mock.patch.object( - fast.VMAXFast, - 'get_tier_policy_by_name', - return_value=None) - @mock.patch.object( - fast.VMAXFast, - 'is_tiering_policy_enabled', - return_value=True) - @mock.patch.object( - utils.VMAXUtils, - 'find_storageSystem', - return_value=None) - def test_get_volume_stats_fast(self, - mock_storage_system, - mock_is_fast_enabled, - mock_get_policy, - mock_pool_capacities, - mock_capacities_associated_to_policy, - mock_or): - self.driver.common.pool_info['arrays_info'] = ( - [{'EcomServerIp': '1.1.1.1', - 'EcomServerPort': '5989', - 'EcomUserName': 'name', - 'EcomPassword': 'password', - 'SerialNumber': '1234567890', - 'PoolName': 'v2_pool', - 'FastPolicy': 'gold'}]) - self.driver.get_volume_stats(True) - self.driver.common.pool_info['arrays_info'] = [] - - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_create_volume_fast_success( - self, _mock_volume_type, mock_storage_system, mock_pool_policy): - self.driver.create_volume(self.data.test_volume_v2) - - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype: stripedmetacount': '4', - 'volume_backend_name': 'ISCSIFAST'}) - def test_create_volume_fast_striped_success( - self, _mock_volume_type, mock_storage_system, mock_pool_policy): - self.driver.create_volume(self.data.test_volume_v2) - - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_create_volume_in_CG_fast_success( - self, _mock_volume_type, mock_storage_system, mock_pool_policy): - self.driver.create_volume(self.data.test_volume_CG) - - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_delete_volume_fast_success( - self, _mock_volume_type, mock_storage_group): - self.driver.delete_volume(self.data.test_volume) - - def test_create_volume_fast_failed(self): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - self.data.test_failed_volume) - - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_delete_volume_fast_notfound( - self, _mock_volume_type, mock_wrapper): - notfound_delete_vol = {} - notfound_delete_vol['name'] = 'notfound_delete_vol' - notfound_delete_vol['id'] = '10' - notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume' - notfound_delete_vol['SystemName'] = self.data.storage_system - notfound_delete_vol['DeviceID'] = notfound_delete_vol['id'] - notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' - notfound_delete_vol['host'] = self.data.fake_host - name = {} - name['classname'] = 'Symm_StorageVolume' - keys = {} - keys['CreationClassName'] = notfound_delete_vol['CreationClassName'] - keys['SystemName'] = notfound_delete_vol['SystemName'] - keys['DeviceID'] = notfound_delete_vol['DeviceID'] - keys['SystemCreationClassName'] = ( - notfound_delete_vol['SystemCreationClassName']) - name['keybindings'] = keys - notfound_delete_vol['volume_type_id'] = 'abc' - notfound_delete_vol['provider_location'] = None - self.driver.delete_volume(notfound_delete_vol) - - @mock.patch.object( - utils.VMAXUtils, - 'wait_for_job_complete', - return_value=(-1, 'error')) - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_delete_volume_fast_failed( - self, _mock_volume_type, _mock_storage_group, - mock_storage_system, mock_policy_pool, mock_wait): - self.driver.create_volume(self.data.failed_delete_vol) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_volume, - self.data.failed_delete_vol) - - @mock.patch.object( - utils.VMAXUtils, - 'insert_live_migration_record') - @mock.patch.object( - common.VMAXCommon, - '_is_same_host', - return_value=True) - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'hostlunid': 1, - 'storagesystem': VMAXCommonData.storage_system}, - False, {})) - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_already_mapped_fast_success( - self, _mock_volume_type, mock_wrap_group, mock_wrap_device, - mock_is_same_host, mock_rec): - self.driver.common._get_correct_port_group = mock.Mock( - return_value=self.data.port_group) - self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'storagesystem': VMAXCommonData.storage_system}, - False, {})) - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - def test_map_fast_failed(self, mock_wrap_group, mock_wrap_device): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.data.test_volume, - self.data.connector) - - @mock.patch.object( - common.VMAXCommon, - 'get_target_wwns_from_masking_view', - return_value=[{'Name': '5000090000000000'}]) - @mock.patch.object( - masking.VMAXMasking, - 'get_initiator_group_from_masking_view', - return_value='myInitGroup') - @mock.patch.object( - masking.VMAXMasking, - '_find_initiator_masking_group', - return_value='myInitGroup') - @mock.patch.object( - utils.VMAXUtils, - 'find_storage_masking_group', - return_value=VMAXCommonData.default_sg_instance_name) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_detach_fast_success( - self, mock_volume_type, mock_storage_group, - mock_ig, mock_igc, mock_tw): - self.driver.terminate_connection( - self.data.test_volume, self.data.connector) - - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_size', - return_value='2147483648') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_extend_volume_fast_success( - self, _mock_volume_type, mock_volume_size): - newSize = '2' - self.driver.extend_volume(self.data.test_volume, newSize) - - @mock.patch.object( - utils.VMAXUtils, - 'check_if_volume_is_extendable', - return_value='False') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_extend_volume_striped_fast_failed( - self, _mock_volume_type, _mock_is_extendable): - newSize = '2' - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - self.data.test_volume, - newSize) - - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567, 7654321]) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_create_snapshot_different_sizes_meta_fast_success( - self, mock_volume_type, - mock_meta, mock_size, mock_pool, mock_policy): - self.data.test_volume['volume_name'] = "vmax-1234567" - common = self.driver.common - - volumeDict = {'classname': u'Symm_StorageVolume', - 'keybindings': VMAXCommonData.keybindings} - common.provision.create_volume_from_pool = ( - mock.Mock(return_value=(volumeDict, 0))) - common.provision.get_volume_dict_from_job = ( - mock.Mock(return_value=volumeDict)) - common.fast.is_volume_in_default_SG = ( - mock.Mock(return_value=True)) - self.driver.create_snapshot(self.data.test_snapshot) - - @mock.patch.object( - utils.VMAXUtils, - 'parse_file_to_get_array_map', - return_value=None) - def test_create_snapshot_fast_failed(self, mock_pool): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, - self.data.test_snapshot) - - @unittest.skip("Skip until bug #1578986 is fixed") - @mock.patch.object( - utils.VMAXUtils, - 'compare_size', - return_value=0) - @mock.patch.object( - utils.VMAXUtils, - 'wait_for_job_complete', - return_value=(0, 'success')) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567]) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - utils.VMAXUtils, - 'find_sync_sv_by_volume', - return_value=(None, None)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_create_volume_from_same_size_meta_snapshot( - self, mock_volume_type, mock_sync_sv, mock_meta, mock_size, - mock_wait, mock_compare): - self.data.test_volume['volume_name'] = "vmax-1234567" - common = self.driver.common - common.fast.is_volume_in_default_SG = mock.Mock(return_value=True) - self.driver.create_volume_from_snapshot( - self.data.test_volume, self.data.test_volume) - - @mock.patch.object( - utils.VMAXUtils, - 'is_clone_licensed', - return_value=False) - @mock.patch.object( - utils.VMAXUtils, - 'find_sync_sv_by_volume', - return_value=(None, None)) - @mock.patch.object( - utils.VMAXUtils, - 'find_replication_service', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST', - 'FASTPOLICY': 'FC_GOLD1'}) - def test_create_volume_from_snapshot_fast_failed( - self, mock_volume_type, - mock_rep_service, mock_sync_sv, mock_license): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - self.data.test_volume, - VMAXCommonData.test_source_volume) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567, 7654321]) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - FakeDB, - 'volume_get', - return_value=VMAXCommonData.test_source_volume) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_create_clone_fast_failed( - self, mock_volume_type, mock_vol, - mock_policy, mock_meta, mock_size, mock_pool): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.driver.common._modify_and_get_composite_volume_instance = ( - mock.Mock(return_value=(1, None))) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - self.data.test_volume, - VMAXCommonData.test_source_volume) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_migrate_volume_fast_success(self, _mock_volume_type): - self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, - self.data.test_host) - - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - utils.VMAXUtils, - 'parse_pool_instance_id', - return_value=('silver', 'SYMMETRIX+000195900551')) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_retype_volume_fast_success( - self, _mock_volume_type, mock_values, mock_wrap): - self.driver.retype( - self.data.test_ctxt, self.data.test_volume, self.data.new_type, - self.data.diff, self.data.test_host) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_create_CG_fast_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.create_consistencygroup( - self.data.test_ctxt, self.data.test_CG) - - @mock.patch.object( - FakeDB, - 'volume_get_all_by_group', - return_value=None) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_delete_CG_no_volumes_fast_success( - self, _mock_volume_type, _mock_storage_system, - _mock_db_volumes): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_delete_CG_with_volumes_fast_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - utils.VMAXUtils, - 'find_group_sync_rg_by_target', - return_value="") - @mock.patch.object( - common.VMAXCommon, - '_get_members_of_replication_group', - return_value=()) - @mock.patch.object( - common.VMAXCommon, - '_find_consistency_group', - return_value=( - VMAXCommonData.test_CG, - VMAXCommonData.test_CG['name'] + "_" + ( - VMAXCommonData.test_CG['id']))) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_create_snapshot_for_CG_no_fast_success( - self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, - _mock_rg): - self.driver.create_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_delete_snapshot_for_CG_no_fast_success( - self, _mock_volume_type, _mock_storage): - self.driver.delete_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_update_CG_add_volume_fast_success( - self, _mock_volume_type, _mock_storage_system): - add_volumes = [] - add_volumes.append(self.data.test_source_volume) - remove_volumes = None - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - # Multiple volumes - add_volumes.append(self.data.test_source_volume) - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSIFAST'}) - def test_update_CG_remove_volume_fast_success( - self, _mock_volume_type, _mock_storage_system): - remove_volumes = [] - remove_volumes.append(self.data.test_source_volume) - add_volumes = None - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - # Multiple volumes - remove_volumes.append(self.data.test_source_volume) - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - - def _cleanup(self): - bExists = os.path.exists(self.config_file_path) - if bExists: - os.remove(self.config_file_path) - shutil.rmtree(self.tempdir) - - -@ddt.ddt -class VMAXFCDriverNoFastTestCase(test.TestCase): - def setUp(self): - - self.data = VMAXCommonData() - - self.tempdir = tempfile.mkdtemp() - super(VMAXFCDriverNoFastTestCase, self).setUp() - self.config_file_path = None - self.create_fake_config_file_no_fast() - self.addCleanup(self._cleanup) - - configuration = mock.Mock() - configuration.cinder_emc_config_file = self.config_file_path - configuration.safe_get.return_value = 'FCNoFAST' - configuration.config_group = 'FCNoFAST' - - self.mock_object(common.VMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(utils.VMAXUtils, 'isArrayV3', - self.fake_is_v3) - self.mock_object(utils.VMAXUtils, '_is_sync_complete', - return_value=True) - self.mock_object(cinder_utils, 'get_bool_param', - return_value=False) - driver = fc.VMAXFCDriver(configuration=configuration) - driver.db = FakeDB() - driver.common.conn = FakeEcomConnection() - driver.zonemanager_lookup_service = FakeLookupService() - self.driver = driver - self.driver.utils = utils.VMAXUtils(object) - - def create_fake_config_file_no_fast(self): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - - array = doc.createElement("Array") - arraytext = doc.createTextNode("1234567891011") - emc.appendChild(array) - array.appendChild(arraytext) - - ecomserverip = doc.createElement("EcomServerIp") - ecomserveriptext = doc.createTextNode("1.1.1.1") - emc.appendChild(ecomserverip) - ecomserverip.appendChild(ecomserveriptext) - - ecomserverport = doc.createElement("EcomServerPort") - ecomserverporttext = doc.createTextNode("10") - emc.appendChild(ecomserverport) - ecomserverport.appendChild(ecomserverporttext) - - ecomusername = doc.createElement("EcomUserName") - ecomusernametext = doc.createTextNode("user") - emc.appendChild(ecomusername) - ecomusername.appendChild(ecomusernametext) - - ecompassword = doc.createElement("EcomPassword") - ecompasswordtext = doc.createTextNode("pass") - emc.appendChild(ecompassword) - ecompassword.appendChild(ecompasswordtext) - - portgroup = doc.createElement("PortGroup") - portgrouptext = doc.createTextNode(self.data.port_group) - portgroup.appendChild(portgrouptext) - - portgroups = doc.createElement("PortGroups") - portgroups.appendChild(portgroup) - emc.appendChild(portgroups) - - pool = doc.createElement("Pool") - pooltext = doc.createTextNode("gold") - emc.appendChild(pool) - pool.appendChild(pooltext) - - timeout = doc.createElement("Timeout") - timeouttext = doc.createTextNode("0") - emc.appendChild(timeout) - timeout.appendChild(timeouttext) - - filename = 'cinder_emc_config_FCNoFAST.xml' - - self.config_file_path = self.tempdir + '/' + filename - - f = open(self.config_file_path, 'w') - doc.writexml(f) - f.close() - - def fake_ecom_connection(self): - conn = FakeEcomConnection() - return conn - - def fake_is_v3(self, conn, serialNumber): - return False - - @mock.patch.object( - utils.VMAXUtils, - 'override_ratio', - return_value=2.0) - @mock.patch.object( - utils.VMAXUtils, - 'get_pool_capacities', - return_value=(1234, 1200, 1200, 1)) - @mock.patch.object( - fast.VMAXFast, - 'is_tiering_policy_enabled', - return_value=False) - @mock.patch.object( - utils.VMAXUtils, - 'find_storageSystem', - return_value=None) - def test_get_volume_stats_no_fast(self, - mock_storage_system, - mock_is_fast_enabled, - mock_capacity, - mock_or): - self.driver.get_volume_stats(True) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_create_volume_no_fast_success( - self, _mock_volume_type, mock_storage_system): - self.driver.create_volume(self.data.test_volume_v2) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype: stripedmetacount': '4', - 'volume_backend_name': 'FCNoFAST'}) - def test_create_volume_no_fast_striped_success( - self, _mock_volume_type, mock_storage_system): - self.driver.create_volume(self.data.test_volume_v2) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_create_volume_in_CG_no_fast_success( - self, _mock_volume_type, mock_storage_system): - self.driver.create_volume(self.data.test_volume_CG) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_delete_volume_no_fast_success( - self, _mock_volume_type, mock_storage_system): - self.driver.delete_volume(self.data.test_volume) - - def test_create_volume_no_fast_failed(self): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - self.data.test_failed_volume) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_delete_volume_no_fast_notfound(self, _mock_volume_type): - notfound_delete_vol = {} - notfound_delete_vol['name'] = 'notfound_delete_vol' - notfound_delete_vol['id'] = '10' - notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume' - notfound_delete_vol['SystemName'] = self.data.storage_system - notfound_delete_vol['DeviceID'] = notfound_delete_vol['id'] - notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' - notfound_delete_vol['host'] = self.data.fake_host - name = {} - name['classname'] = 'Symm_StorageVolume' - keys = {} - keys['CreationClassName'] = notfound_delete_vol['CreationClassName'] - keys['SystemName'] = notfound_delete_vol['SystemName'] - keys['DeviceID'] = notfound_delete_vol['DeviceID'] - keys['SystemCreationClassName'] = ( - notfound_delete_vol['SystemCreationClassName']) - name['keybindings'] = keys - notfound_delete_vol['volume_type_id'] = 'abc' - notfound_delete_vol['provider_location'] = None - self.driver.delete_volume(notfound_delete_vol) - - @mock.patch.object( - utils.VMAXUtils, - 'wait_for_job_complete', - return_value=(-1, 'error')) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_delete_volume_failed( - self, _mock_volume_type, mock_storage_system, mock_wait): - self.driver.create_volume(self.data.failed_delete_vol) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_volume, - self.data.failed_delete_vol) - - @mock.patch.object( - utils.VMAXUtils, - 'insert_live_migration_record') - @mock.patch.object( - common.VMAXCommon, - '_is_same_host', - return_value=True) - @mock.patch.object( - masking.VMAXMasking, - 'get_masking_view_from_storage_group', - return_value=VMAXCommonData.lunmaskctrl_name) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST', - 'FASTPOLICY': 'FC_GOLD1'}) - def test_map_lookup_service_no_fast_success( - self, _mock_volume_type, mock_maskingview, mock_is_same_host, - mock_rec): - self.data.test_volume['volume_name'] = "vmax-1234567" - common = self.driver.common - common.get_target_wwns_from_masking_view = mock.Mock( - return_value=VMAXCommonData.target_wwns) - common._get_correct_port_group = mock.Mock( - return_value=self.data.port_group) - lookup_service = self.driver.zonemanager_lookup_service - lookup_service.get_device_mapping_from_network = mock.Mock( - return_value=VMAXCommonData.device_map) - data = self.driver.initialize_connection(self.data.test_volume, - self.data.connector) - common.get_target_wwns_from_masking_view.assert_called_once_with( - VMAXCommonData.storage_system, self.data.test_volume, - VMAXCommonData.connector) - lookup_service.get_device_mapping_from_network.assert_called_once_with( - VMAXCommonData.connector['wwpns'], - VMAXCommonData.target_wwns) - - # Test the lookup service code path. - for init, target in data['data']['initiator_target_map'].items(): - self.assertEqual(init, target[0][::-1]) - - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'Name': "0001"}, False, {})) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST', - 'FASTPOLICY': 'FC_GOLD1'}) - def test_map_no_fast_failed(self, _mock_volume_type, mock_wrap_device): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.data.test_volume, - self.data.connector) - - @mock.patch.object( - common.VMAXCommon, - 'check_ig_instance_name', - return_value=None) - @mock.patch.object( - masking.VMAXMasking, - '_find_initiator_masking_group', - return_value='myInitGroup') - @mock.patch.object( - masking.VMAXMasking, - 'get_masking_view_by_volume', - return_value=VMAXCommonData.lunmaskctrl_name) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_detach_no_fast_last_volume_success( - self, mock_volume_type, mock_mv, mock_ig, mock_check_ig): - # last volume so initiatorGroup will be deleted by terminate connection - self.driver.terminate_connection(self.data.test_source_volume, - self.data.connector) - - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_size', - return_value='2147483648') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_extend_volume_no_fast_success(self, _mock_volume_type, - _mock_volume_size): - newSize = '2' - self.driver.extend_volume(self.data.test_volume, newSize) - - @mock.patch.object( - utils.VMAXUtils, - 'check_if_volume_is_extendable', - return_value='False') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_extend_volume_striped_no_fast_failed( - self, _mock_volume_type, _mock_is_extendable): - newSize = '2' - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - self.data.test_volume, - newSize) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_migrate_volume_no_fast_success(self, _mock_volume_type): - self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, - self.data.test_host) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_create_CG_no_fast_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.create_consistencygroup( - self.data.test_ctxt, self.data.test_CG) - - @mock.patch.object( - FakeDB, - 'volume_get_all_by_group', - return_value=None) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_delete_CG_no_volumes_no_fast_success( - self, _mock_volume_type, _mock_storage_system, - _mock_db_volumes): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_delete_CG_with_volumes_no_fast_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - utils.VMAXUtils, - 'find_group_sync_rg_by_target', - return_value="") - @mock.patch.object( - common.VMAXCommon, - '_get_members_of_replication_group', - return_value=()) - @mock.patch.object( - common.VMAXCommon, - '_find_consistency_group', - return_value=( - VMAXCommonData.test_CG, - VMAXCommonData.test_CG['name'] + "_" + ( - VMAXCommonData.test_CG['id']))) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_create_snapshot_for_CG_no_fast_success( - self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, - _mock_rg): - self.driver.create_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCNoFAST'}) - def test_delete_snapshot_for_CG_no_fast_success( - self, _mock_volume_type, _mock_storage): - self.driver.delete_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, []) - - def test_unmanage_no_fast_success(self): - keybindings = {'CreationClassName': u'Symm_StorageVolume', - 'SystemName': u'SYMMETRIX+000195900000', - 'DeviceID': u'1', - 'SystemCreationClassName': u'Symm_StorageSystem'} - provider_location = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings} - - volume = {'name': 'vol1', - 'size': 1, - 'id': '1', - 'device_id': '1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol1', - 'display_description': 'test volume', - 'volume_type_id': 'abc', - 'provider_location': six.text_type(provider_location), - 'status': 'available', - 'host': self.data.fake_host, - 'NumberOfBlocks': 100, - 'BlockSize': self.data.block_size - } - common = self.driver.common - common._initial_setup = mock.Mock( - return_value={'volume_backend_name': 'FCNoFAST', - 'storagetype:fastpolicy': None}) - utils = self.driver.common.utils - utils.rename_volume = mock.Mock(return_value=None) - self.driver.unmanage(volume) - utils.rename_volume.assert_called_once_with( - common.conn, common._find_lun(volume), '1') - - def test_unmanage_no_fast_failed(self): - keybindings = {'CreationClassName': u'Symm_StorageVolume', - 'SystemName': u'SYMMETRIX+000195900000', - 'DeviceID': u'999', - 'SystemCreationClassName': u'Symm_StorageSystem'} - provider_location = {'classname': 'Symm_StorageVolume', - 'keybindings': keybindings} - - volume = {'name': 'NO_SUCH_VOLUME', - 'size': 1, - 'id': '999', - 'device_id': '999', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'No such volume', - 'display_description': 'volume not on the array', - 'volume_type_id': 'abc', - 'provider_location': six.text_type(provider_location), - 'status': 'available', - 'host': self.data.fake_host, - 'NumberOfBlocks': 100, - 'BlockSize': self.data.block_size - } - common = self.driver.common - common._initial_setup = mock.Mock( - return_value={'volume_backend_name': 'FCNoFAST', - 'fastpolicy': None}) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.unmanage, - volume) - - def _cleanup(self): - bExists = os.path.exists(self.config_file_path) - if bExists: - os.remove(self.config_file_path) - shutil.rmtree(self.tempdir) - - -class VMAXFCDriverFastTestCase(test.TestCase): - - def setUp(self): - - self.data = VMAXCommonData() - - self.tempdir = tempfile.mkdtemp() - super(VMAXFCDriverFastTestCase, self).setUp() - self.config_file_path = None - self.create_fake_config_file_fast() - self.addCleanup(self._cleanup) - - self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') - configuration = mock.Mock() - configuration.cinder_emc_config_file = self.config_file_path - configuration.safe_get.return_value = 'FCFAST' - configuration.config_group = 'FCFAST' - - self.mock_object(common.VMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(utils.VMAXUtils, 'isArrayV3', - self.fake_is_v3) - self.mock_object(utils.VMAXUtils, '_is_sync_complete', - return_value=True) - self.mock_object(cinder_utils, 'get_bool_param', - return_value=False) - driver = fc.VMAXFCDriver(configuration=configuration) - driver.db = FakeDB() - driver.common.conn = FakeEcomConnection() - driver.zonemanager_lookup_service = None - self.driver = driver - self.driver.utils = utils.VMAXUtils(object) - self.driver.masking = masking.VMAXMasking('FC') - - def create_fake_config_file_fast(self): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - - fastPolicy = doc.createElement("FastPolicy") - fastPolicyText = doc.createTextNode("GOLD1") - emc.appendChild(fastPolicy) - fastPolicy.appendChild(fastPolicyText) - - ecomserverip = doc.createElement("EcomServerIp") - ecomserveriptext = doc.createTextNode("1.1.1.1") - emc.appendChild(ecomserverip) - ecomserverip.appendChild(ecomserveriptext) - - ecomserverport = doc.createElement("EcomServerPort") - ecomserverporttext = doc.createTextNode("10") - emc.appendChild(ecomserverport) - ecomserverport.appendChild(ecomserverporttext) - - ecomusername = doc.createElement("EcomUserName") - ecomusernametext = doc.createTextNode("user") - emc.appendChild(ecomusername) - ecomusername.appendChild(ecomusernametext) - - ecompassword = doc.createElement("EcomPassword") - ecompasswordtext = doc.createTextNode("pass") - emc.appendChild(ecompassword) - ecompassword.appendChild(ecompasswordtext) - - portgroup = doc.createElement("PortGroup") - portgrouptext = doc.createTextNode(self.data.port_group) - portgroup.appendChild(portgrouptext) - - pool = doc.createElement("Pool") - pooltext = doc.createTextNode("gold") - emc.appendChild(pool) - pool.appendChild(pooltext) - - array = doc.createElement("Array") - arraytext = doc.createTextNode("1234567891011") - emc.appendChild(array) - array.appendChild(arraytext) - - portgroups = doc.createElement("PortGroups") - portgroups.appendChild(portgroup) - emc.appendChild(portgroups) - - timeout = doc.createElement("Timeout") - timeouttext = doc.createTextNode("0") - emc.appendChild(timeout) - timeout.appendChild(timeouttext) - - filename = 'cinder_emc_config_FCFAST.xml' - - self.config_file_path = self.tempdir + '/' + filename - - f = open(self.config_file_path, 'w') - doc.writexml(f) - f.close() - - def fake_ecom_connection(self): - conn = FakeEcomConnection() - return conn - - def fake_is_v3(self, conn, serialNumber): - return False - - @mock.patch.object( - utils.VMAXUtils, - 'override_ratio', - return_value=2.0) - @mock.patch.object( - fast.VMAXFast, - 'get_capacities_associated_to_policy', - return_value=(1234, 1200, 1200, 1)) - @mock.patch.object( - utils.VMAXUtils, - 'get_pool_capacities', - return_value=(1234, 1200, 1200, 1)) - @mock.patch.object( - fast.VMAXFast, - 'get_tier_policy_by_name', - return_value=None) - @mock.patch.object( - fast.VMAXFast, - 'is_tiering_policy_enabled', - return_value=True) - @mock.patch.object( - utils.VMAXUtils, - 'find_storageSystem', - return_value=None) - def test_get_volume_stats_fast(self, - mock_storage_system, - mock_is_fast_enabled, - mock_get_policy, - mock_pool_capacities, - mock_capacities_associated_to_policy, - mock_or): - self.driver.get_volume_stats(True) - - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_create_volume_fast_success( - self, _mock_volume_type, mock_storage_system, mock_pool_policy): - self.driver.create_volume(self.data.test_volume_v2) - - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'storagetype: stripedmetacount': '4', - 'volume_backend_name': 'FCFAST'}) - def test_create_volume_fast_striped_success( - self, _mock_volume_type, mock_storage_system, mock_pool_policy): - self.driver.create_volume(self.data.test_volume_v2) - - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_create_volume_in_CG_fast_success( - self, _mock_volume_type, mock_storage_system, mock_pool_policy): - self.driver.create_volume(self.data.test_volume_CG) - - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_delete_volume_fast_success(self, _mock_volume_type, - mock_storage_group): - self.driver.delete_volume(self.data.test_volume) - - def test_create_volume_fast_failed(self): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - self.data.test_failed_volume) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_delete_volume_fast_notfound(self, _mock_volume_type): - """"Test delete volume with volume not found.""" - notfound_delete_vol = {} - notfound_delete_vol['name'] = 'notfound_delete_vol' - notfound_delete_vol['id'] = '10' - notfound_delete_vol['CreationClassName'] = 'Symmm_StorageVolume' - notfound_delete_vol['SystemName'] = self.data.storage_system - notfound_delete_vol['DeviceID'] = notfound_delete_vol['id'] - notfound_delete_vol['SystemCreationClassName'] = 'Symm_StorageSystem' - notfound_delete_vol['host'] = self.data.fake_host - name = {} - name['classname'] = 'Symm_StorageVolume' - keys = {} - keys['CreationClassName'] = notfound_delete_vol['CreationClassName'] - keys['SystemName'] = notfound_delete_vol['SystemName'] - keys['DeviceID'] = notfound_delete_vol['DeviceID'] - keys['SystemCreationClassName'] = ( - notfound_delete_vol['SystemCreationClassName']) - name['keybindings'] = keys - notfound_delete_vol['volume_type_id'] = 'abc' - notfound_delete_vol['provider_location'] = None - - self.driver.delete_volume(notfound_delete_vol) - - @mock.patch.object( - utils.VMAXUtils, - 'wait_for_job_complete', - return_value=(-1, 'error')) - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - masking.VMAXMasking, - '_wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_delete_volume_fast_failed( - self, _mock_volume_type, mock_wrapper, - mock_storage_system, mock_pool_policy, mock_wait): - self.driver.create_volume(self.data.failed_delete_vol) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.delete_volume, - self.data.failed_delete_vol) - - @mock.patch.object( - utils.VMAXUtils, - 'insert_live_migration_record') - @mock.patch.object( - common.VMAXCommon, - '_is_same_host', - return_value=True) - @mock.patch.object( - masking.VMAXMasking, - 'get_masking_view_from_storage_group', - return_value=VMAXCommonData.lunmaskctrl_name) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST', - 'FASTPOLICY': 'FC_GOLD1'}) - def test_map_fast_success(self, _mock_volume_type, mock_maskingview, - mock_is_same_host, mock_rec): - common = self.driver.common - common.get_target_wwns_list = mock.Mock( - return_value=VMAXCommonData.target_wwns) - self.driver.common._get_correct_port_group = mock.Mock( - return_value=self.data.port_group) - data = self.driver.initialize_connection( - self.data.test_volume, self.data.connector) - # Test the no lookup service, pre-zoned case. - common.get_target_wwns_list.assert_called_once_with( - VMAXCommonData.storage_system, self.data.test_volume, - VMAXCommonData.connector) - for init, target in data['data']['initiator_target_map'].items(): - self.assertIn(init[::-1], target) - - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'Name': "0001"}, False, {})) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST', - 'FASTPOLICY': 'FC_GOLD1'}) - def test_map_fast_failed(self, _mock_volume_type, mock_wrap_device): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.data.test_volume, - self.data.connector) - - @mock.patch.object( - common.VMAXCommon, - 'check_ig_instance_name', - return_value='myInitGroup') - @mock.patch.object( - common.VMAXCommon, - 'get_masking_views_by_port_group', - return_value=[]) - @mock.patch.object( - masking.VMAXMasking, - 'get_initiator_group_from_masking_view', - return_value='myInitGroup') - @mock.patch.object( - masking.VMAXMasking, - '_find_initiator_masking_group', - return_value='myInitGroup') - @mock.patch.object( - masking.VMAXMasking, - 'get_masking_view_by_volume', - return_value=VMAXCommonData.lunmaskctrl_name) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST', - 'FASTPOLICY': 'FC_GOLD1'}) - def test_detach_fast_success(self, mock_volume_type, mock_maskingview, - mock_ig, mock_igc, mock_mv, mock_check_ig): - common = self.driver.common - common.get_target_wwns_list = mock.Mock( - return_value=VMAXCommonData.target_wwns) - data = self.driver.terminate_connection(self.data.test_volume, - self.data.connector) - common.get_target_wwns_list.assert_called_once_with( - VMAXCommonData.storage_system, self.data.test_volume, - VMAXCommonData.connector) - numTargetWwns = len(VMAXCommonData.target_wwns) - self.assertEqual(numTargetWwns, len(data['data'])) - - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_size', - return_value='2147483648') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_extend_volume_fast_success(self, _mock_volume_type, - _mock_volume_size): - newSize = '2' - self.driver.extend_volume(self.data.test_volume, newSize) - - @mock.patch.object( - utils.VMAXUtils, - 'check_if_volume_is_extendable', - return_value='False') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_extend_volume_striped_fast_failed(self, - _mock_volume_type, - _mock_is_extendable): - newSize = '2' - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, - self.data.test_volume, - newSize) - - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567, 7654321]) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_create_snapshot_different_sizes_meta_fast_success( - self, mock_volume_type, - mock_meta, mock_size, mock_pool, mock_policy): - common = self.driver.common - - volumeDict = {'classname': u'Symm_StorageVolume', - 'keybindings': VMAXCommonData.keybindings} - common.provision.create_volume_from_pool = ( - mock.Mock(return_value=(volumeDict, 0))) - common.provision.get_volume_dict_from_job = ( - mock.Mock(return_value=volumeDict)) - common.fast.is_volume_in_default_SG = ( - mock.Mock(return_value=True)) - self.driver.create_snapshot(self.data.test_snapshot) - - @mock.patch.object( - utils.VMAXUtils, - 'parse_file_to_get_array_map', - return_value=None) - def test_create_snapshot_fast_failed(self, mock_pool): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_snapshot, - self.data.test_snapshot) - - @unittest.skip("Skip until bug #1578986 is fixed") - @mock.patch.object( - utils.VMAXUtils, - 'compare_size', - return_value=0) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567]) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - utils.VMAXUtils, - 'find_sync_sv_by_volume', - return_value=(None, None)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_create_volume_from_same_size_meta_snapshot( - self, mock_volume_type, mock_sync_sv, mock_meta, mock_size, - mock_compare): - self.data.test_volume['volume_name'] = "vmax-1234567" - common = self.driver.common - common.fast.is_volume_in_default_SG = mock.Mock(return_value=True) - self.driver.create_volume_from_snapshot( - self.data.test_volume, self.data.test_volume) - - @mock.patch.object( - utils.VMAXUtils, - 'is_clone_licensed', - return_value=False) - @mock.patch.object( - utils.VMAXUtils, - 'find_sync_sv_by_volume', - return_value=(None, None)) - @mock.patch.object( - utils.VMAXUtils, - 'find_replication_service', - return_value=None) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST', - 'FASTPOLICY': 'FC_GOLD1'}) - def test_create_volume_from_snapshot_fast_failed( - self, mock_volume_type, mock_rep_service, mock_sync_sv, - mock_license): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume_from_snapshot, - self.data.test_volume, - VMAXCommonData.test_source_volume) - - @mock.patch.object( - utils.VMAXUtils, - 'compare_size', - return_value=0) - def test_create_clone_simple_volume_fast_success(self, mock_compare): - extraSpecs = {'storagetype:fastpolicy': 'FC_GOLD1', - 'volume_backend_name': 'FCFAST', - 'isV3': False} - self.driver.common._initial_setup = ( - mock.Mock(return_value=extraSpecs)) - self.driver.common.extraSpecs = extraSpecs - self.driver.utils.is_clone_licensed = ( - mock.Mock(return_value=True)) - FakeDB.volume_get = ( - mock.Mock(return_value=VMAXCommonData.test_source_volume)) - self.data.test_volume['volume_name'] = "vmax-1234567" - self.driver.common.fast.is_volume_in_default_SG = ( - mock.Mock(return_value=True)) - self.driver.utils.isArrayV3 = mock.Mock(return_value=False) - self.driver.common._find_storage_sync_sv_sv = ( - mock.Mock(return_value=(None, None))) - self.driver.create_cloned_volume(self.data.test_volume, - VMAXCommonData.test_source_volume) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_meta_members_capacity_in_byte', - return_value=[1234567, 7654321]) - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_meta_head', - return_value=[VMAXCommonData.test_volume]) - @mock.patch.object( - fast.VMAXFast, - 'get_pool_associated_to_policy', - return_value=1) - @mock.patch.object( - FakeDB, - 'volume_get', - return_value=VMAXCommonData.test_source_volume) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_create_clone_fast_failed( - self, mock_volume_type, mock_vol, mock_policy, - mock_meta, mock_size, mock_pool): - self.data.test_volume['volume_name'] = "vmax-1234567" - self.driver.common._modify_and_get_composite_volume_instance = ( - mock.Mock(return_value=(1, None))) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - self.data.test_volume, - VMAXCommonData.test_source_volume) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_migrate_volume_fast_success(self, _mock_volume_type): - self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, - self.data.test_host) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_create_CG_fast_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.create_consistencygroup( - self.data.test_ctxt, self.data.test_CG) - - @mock.patch.object( - FakeDB, - 'volume_get_all_by_group', - return_value=None) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_delete_CG_no_volumes_fast_success( - self, _mock_volume_type, _mock_storage_system, - _mock_db_volumes): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_delete_CG_with_volumes_fast_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - utils.VMAXUtils, - 'find_group_sync_rg_by_target', - return_value="") - @mock.patch.object( - common.VMAXCommon, - '_get_members_of_replication_group', - return_value=()) - @mock.patch.object( - common.VMAXCommon, - '_find_consistency_group', - return_value=( - VMAXCommonData.test_CG, - VMAXCommonData.test_CG['name'] + "_" + ( - VMAXCommonData.test_CG['id']))) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_create_snapshot_for_CG_no_fast_success( - self, _mock_volume_type, _mock_storage, _mock_cg, _mock_members, - _mock_rg): - self.driver.create_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'FCFAST'}) - def test_delete_snapshot_for_CG_no_fast_success( - self, _mock_volume_type, _mock_storage): - self.driver.delete_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, []) - - # Bug 1385450 - @mock.patch.object( - utils.VMAXUtils, - 'is_clone_licensed', - return_value=False) - @mock.patch.object( - utils.VMAXUtils, - 'find_replication_service_capabilities', - return_value={'InstanceID': 'SYMMETRIX+1385450'}) - def test_create_clone_without_license(self, mock_service, mock_license): - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - self.data.test_volume, - VMAXCommonData.test_source_volume) - - def test_manage_existing_fast_failed(self): - volume = {} - metadata = {'key': 'array', - 'value': '12345'} - poolInstanceName = {} - storageSystem = {} - poolInstanceName['InstanceID'] = "SATA_GOLD1" - storageSystem['InstanceID'] = "SYMMETRIX+00019870000" - volume['volume_metadata'] = [metadata] - volume['name'] = "test-volume" - volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011' - external_ref = {'source-name': '0123'} - common = self.driver.common - common._initial_setup = mock.Mock( - return_value={'volume_backend_name': 'FCFAST', - 'storagetype:fastpolicy': 'GOLD'}) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.manage_existing, - volume, - external_ref) - - def _cleanup(self): - bExists = os.path.exists(self.config_file_path) - if bExists: - os.remove(self.config_file_path) - shutil.rmtree(self.tempdir) - - -class EMCV3DriverTestCase(test.TestCase): - - def setUp(self): - - self.data = VMAXCommonData() - - self.data.storage_system = 'SYMMETRIX-+-000197200056' - - self.tempdir = tempfile.mkdtemp() - super(EMCV3DriverTestCase, self).setUp() - self.config_file_path = None - self.create_fake_config_file_v3() - self.addCleanup(self._cleanup) - self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') - self.set_configuration() - - def set_configuration(self): - configuration = mock.MagicMock() - configuration.cinder_emc_config_file = self.config_file_path - configuration.config_group = 'V3' - - self.mock_object(common.VMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) - self.mock_object(cinder_utils, 'get_bool_param', - return_value=False) - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(utils.VMAXUtils, 'isArrayV3', - self.fake_is_v3) - self.patcher = mock.patch( - 'oslo_service.loopingcall.FixedIntervalLoopingCall', - new=unit_utils.ZeroIntervalLoopingCall) - self.patcher.start() - - driver = fc.VMAXFCDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - - def create_fake_config_file_v3(self): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - - ecomserverip = doc.createElement("EcomServerIp") - ecomserveriptext = doc.createTextNode("1.1.1.1") - emc.appendChild(ecomserverip) - ecomserverip.appendChild(ecomserveriptext) - - ecomserverport = doc.createElement("EcomServerPort") - ecomserverporttext = doc.createTextNode("10") - emc.appendChild(ecomserverport) - ecomserverport.appendChild(ecomserverporttext) - - ecomusername = doc.createElement("EcomUserName") - ecomusernametext = doc.createTextNode("user") - emc.appendChild(ecomusername) - ecomusername.appendChild(ecomusernametext) - - ecompassword = doc.createElement("EcomPassword") - ecompasswordtext = doc.createTextNode("pass") - emc.appendChild(ecompassword) - ecompassword.appendChild(ecompasswordtext) - - portgroup = doc.createElement("PortGroup") - portgrouptext = doc.createTextNode(self.data.port_group) - portgroup.appendChild(portgrouptext) - - pool = doc.createElement("Pool") - pooltext = doc.createTextNode("SRP_1") - emc.appendChild(pool) - pool.appendChild(pooltext) - - array = doc.createElement("Array") - arraytext = doc.createTextNode("1234567891011") - emc.appendChild(array) - array.appendChild(arraytext) - - slo = doc.createElement("ServiceLevel") - slotext = doc.createTextNode("Bronze") - emc.appendChild(slo) - slo.appendChild(slotext) - - workload = doc.createElement("Workload") - workloadtext = doc.createTextNode("DSS") - emc.appendChild(workload) - workload.appendChild(workloadtext) - - portgroups = doc.createElement("PortGroups") - portgroups.appendChild(portgroup) - emc.appendChild(portgroups) - - timeout = doc.createElement("Timeout") - timeouttext = doc.createTextNode("0") - emc.appendChild(timeout) - timeout.appendChild(timeouttext) - - filename = 'cinder_emc_config_V3.xml' - - self.config_file_path = self.tempdir + '/' + filename - - f = open(self.config_file_path, 'w') - doc.writexml(f) - f.close() - - def fake_ecom_connection(self): - self.conn = FakeEcomConnection() - return self.conn - - def fake_is_v3(self, conn, serialNumber): - return True - - def fake_gather_info(self): - return - - def default_extraspec(self): - return {'storagetype:pool': 'SRP_1', - 'volume_backend_name': 'V3_BE', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze', - 'storagetype:array': '1234567891011', - 'isV3': True, - 'portgroupname': 'OS-portgroup-PG'} - - def default_vol(self): - vol = EMC_StorageVolume() - vol['name'] = self.data.test_volume['name'] - vol['CreationClassName'] = 'Symm_StorageVolume' - vol['ElementName'] = self.data.test_volume['id'] - vol['DeviceID'] = self.data.test_volume['device_id'] - vol['Id'] = self.data.test_volume['id'] - vol['SystemName'] = self.data.storage_system - vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] - vol['BlockSize'] = self.data.test_volume['BlockSize'] - # Added vol to vol.path - vol['SystemCreationClassName'] = 'Symm_StorageSystem' - vol.path = vol - vol.path.classname = vol['CreationClassName'] - return vol - - def default_storage_group(self): - storagegroup = {} - storagegroup['CreationClassName'] = ( - self.data.storagegroup_creationclass) - storagegroup['ElementName'] = 'no_masking_view' - return storagegroup - - @mock.patch.object( - masking.VMAXMasking, - '_delete_mv_ig_and_sg') - def test_last_vol_in_SG_with_MV(self, mock_delete): - conn = self.fake_ecom_connection() - common = self.driver.common - controllerConfigService = ( - common.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - - extraSpecs = self.default_extraspec() - - storageGroupName = self.data.storagegroupname - storageGroupInstanceName = ( - common.utils.find_storage_masking_group( - conn, controllerConfigService, storageGroupName)) - vol = self.default_vol() - self.assertTrue(common.masking._last_vol_in_SG( - conn, controllerConfigService, storageGroupInstanceName, - storageGroupName, vol, vol['name'], extraSpecs)) - - def test_last_vol_in_SG_no_MV(self): - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.common.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - - extraSpecs = self.default_extraspec() - self.driver.common.masking.get_masking_view_from_storage_group = ( - mock.Mock(return_value=None)) - self.driver.common.masking.utils.get_existing_instance = ( - mock.Mock(return_value=None)) - storagegroup = self.default_storage_group() - - vol = self.default_vol() - self.assertTrue(self.driver.common.masking._last_vol_in_SG( - conn, controllerConfigService, storagegroup, - storagegroup['ElementName'], vol, vol['name'], extraSpecs)) - - def test_last_vol_in_SG_no_MV_fail(self): - self.driver.common.masking.utils.get_existing_instance = ( - mock.Mock(return_value='value')) - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.common.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - - extraSpecs = self.default_extraspec() - vol = self.default_vol() - storagegroup = self.default_storage_group() - storagegroup['ElementName'] = 'no_masking_view' - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.common.masking._last_vol_in_SG, - conn, controllerConfigService, - storagegroup, storagegroup['ElementName'], vol, - vol['name'], extraSpecs) - - @mock.patch.object( - utils.VMAXUtils, - 'override_ratio', - return_value=2.0) - @mock.patch.object( - utils.VMAXUtils, - 'find_storageSystem', - return_value={'Name': VMAXCommonData.storage_system_v3}) - def test_get_volume_stats_v3( - self, mock_storage_system, mock_or): - self.driver.common.pool_info['reserved_percentage'] = 5 - self.driver.get_volume_stats(True) - self.driver.common.pool_info['reserved_percentage'] = 0 - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_create_volume_v3_success( - self, _mock_volume_type, mock_storage_system): - self.data.test_volume_v3['host'] = self.data.fake_host_v3 - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.driver.common._get_or_create_storage_group_v3 = mock.Mock( - return_value = self.data.default_sg_instance_name) - self.driver.create_volume(self.data.test_volume_v3) - - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=(VMAXCommonData.extra_specs_no_slo)) - @mock.patch.object( - common.VMAXCommon, - '_get_or_create_storage_group_v3', - return_value=(VMAXCommonData.default_sg_instance_name)) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_create_volume_v3_no_slo_success( - self, _mock_volume_type, mock_storage_system, mock_sg, - mock_initial_setup): - # This the no fast scenario - v3_vol = self.data.test_volume_v3 - v3_vol['host'] = 'HostX@Backend#NONE+SRP_1+1234567891011' - self.driver.create_volume(v3_vol) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_create_volume_v3_slo_NONE_success( - self, _mock_volume_type, mock_storage_system): - # NONE is a valid SLO - v3_vol = self.data.test_volume_v3 - v3_vol['host'] = 'HostX@Backend#NONE+SRP_1+1234567891011' - instid = 'SYMMETRIX-+-000197200056-+-NONE:DSS-+-F-+-0-+-SR-+-SRP_1' - storagepoolsetting = ( - {'InstanceID': instid, - 'CreationClassName': 'CIM_StoragePoolSetting'}) - self.driver.common.provisionv3.get_storage_pool_setting = mock.Mock( - return_value=storagepoolsetting) - extraSpecs = {'storagetype:pool': 'SRP_1', - 'volume_backend_name': 'V3_BE', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'NONE', - 'storagetype:array': '1234567891011', - 'isV3': True, - 'portgroupname': 'OS-portgroup-PG'} - self.driver.common._initial_setup = mock.Mock( - return_value=extraSpecs) - self.driver.common._get_or_create_storage_group_v3 = mock.Mock( - return_value = self.data.default_sg_instance_name) - - self.driver.create_volume(v3_vol) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_create_volume_v3_invalid_slo_failed( - self, _mock_volume_type, mock_storage_system): - extraSpecs = {'storagetype:pool': 'SRP_1', - 'volume_backend_name': 'V3_BE', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bogus', - 'storagetype:array': '1234567891011', - 'isV3': True, - 'portgroupname': 'OS-portgroup-PG'} - self.driver.common._initial_setup = mock.Mock( - return_value=extraSpecs) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, - self.data.test_volume) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_create_volume_in_CG_v3_success( - self, _mock_volume_type, mock_storage_system): - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.driver.common._get_or_create_storage_group_v3 = mock.Mock( - return_value = self.data.default_sg_instance_name) - self.driver.create_volume(self.data.test_volume_CG_v3) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_delete_volume_v3_success(self, _mock_volume_type): - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.driver.delete_volume(self.data.test_volume_v3) - - @mock.patch.object( - utils.VMAXUtils, - 'get_v3_default_sg_instance_name', - return_value=(None, None, VMAXCommonData.default_sg_instance_name)) - @mock.patch.object( - utils.VMAXUtils, - 'is_clone_licensed', - return_value=True) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_create_snapshot_v3_success( - self, mock_type, mock_pool, mock_licence, mock_sg): - common = self.driver.common - with mock.patch.object(common, '_initial_setup', - return_value=self.default_extraspec()): - self.driver.create_snapshot(self.data.test_snapshot_v3) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_delete_snapshot_v3_success(self, mock_volume_type): - self.data.test_volume_v3['volume_name'] = "vmax-1234567" - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.driver.delete_snapshot(self.data.test_snapshot_v3) - - @mock.patch.object( - utils.VMAXUtils, - 'get_v3_default_sg_instance_name', - return_value=(None, None, VMAXCommonData.default_sg_instance_name)) - @mock.patch.object( - common.VMAXCommon, - '_get_or_create_storage_group_v3', - return_value=VMAXCommonData.default_sg_instance_name) - @mock.patch.object( - utils.VMAXUtils, - 'is_clone_licensed', - return_value=True) - @mock.patch.object( - utils.VMAXUtils, - 'compare_size', - return_value=0) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - @mock.patch.object( - FakeDB, - 'volume_get', - return_value=VMAXCommonData.test_source_volume) - def test_create_cloned_volume_v3_success( - self, mock_volume_db, mock_type, mock_pool, mock_compare, - mock_licence, mock_sg, mock_sg_name): - sourceVol = self.data.test_volume_v3.copy() - sourceVol['volume_name'] = "vmax-1234567" - sourceVol['size'] = 100 - cloneVol = {} - cloneVol['name'] = 'vol1' - cloneVol['id'] = '1' - cloneVol['CreationClassName'] = 'Symmm_StorageVolume' - cloneVol['SystemName'] = self.data.storage_system - cloneVol['DeviceID'] = cloneVol['id'] - cloneVol['SystemCreationClassName'] = 'Symm_StorageSystem' - cloneVol['volume_type_id'] = 'abc' - cloneVol['provider_location'] = None - cloneVol['NumberOfBlocks'] = 100 - cloneVol['BlockSize'] = self.data.block_size - cloneVol['host'] = self.data.fake_host_v3 - cloneVol['size'] = 100 - common = self.driver.common - conn = FakeEcomConnection() - sourceInstance = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - with mock.patch.object(common, '_initial_setup', - return_value=self.default_extraspec()): - with mock.patch.object(common, '_find_lun', - return_value=sourceInstance): - self.driver.create_cloned_volume(cloneVol, sourceVol) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_create_CG_v3_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.create_consistencygroup( - self.data.test_ctxt, self.data.test_volume_CG_v3) - - @mock.patch.object( - FakeDB, - 'volume_get_all_by_group', - return_value=None) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_delete_CG_no_volumes_v3_success( - self, _mock_volume_type, _mock_storage_system, - _mock_db_volumes): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_delete_CG_with_volumes_v3_success( - self, _mock_volume_type, _mock_storage_system): - self.driver.delete_consistencygroup( - self.data.test_ctxt, self.data.test_CG, []) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_migrate_volume_v3_success(self, _mock_volume_type): - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.driver.migrate_volume(self.data.test_ctxt, self.data.test_volume, - self.data.test_host) - - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_element_name', - return_value='1') - @mock.patch.object( - provision_v3.VMAXProvisionV3, - '_find_new_storage_group', - return_value=VMAXCommonData.default_sg_instance_name) - @mock.patch.object( - utils.VMAXUtils, - 'wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - utils.VMAXUtils, - '_get_fast_settings_from_storage_group', - return_value='Gold+DSS_REP') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_retype_volume_v3_success( - self, _mock_volume_type, mock_fast_settings, - mock_storage_group, mock_found_SG, mock_element_name): - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.assertTrue(self.driver.retype( - self.data.test_ctxt, self.data.test_volume_v3, self.data.new_type, - self.data.diff, self.data.test_host_v3)) - - @mock.patch.object( - utils.VMAXUtils, - '_get_fast_settings_from_storage_group', - return_value='Bronze+DSS') - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_retype_volume_same_host_failure( - self, _mock_volume_type, mock_fast_settings): - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.assertFalse(self.driver.retype( - self.data.test_ctxt, self.data.test_volume_v3, self.data.new_type, - self.data.diff, self.data.test_host_v3)) - - @mock.patch.object( - utils.VMAXUtils, - 'find_volume_instance', - return_value=( - FakeEcomConnection().EnumerateInstanceNames( - "EMC_StorageVolume")[0])) - @mock.patch.object( - common.VMAXCommon, - '_create_v3_volume', - return_value=(0, {}, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'find_group_sync_rg_by_target', - return_value=1) - @mock.patch.object( - common.VMAXCommon, - '_get_members_of_replication_group', - return_value=()) - @mock.patch.object( - common.VMAXCommon, - '_find_consistency_group', - return_value=( - VMAXCommonData.test_CG, - VMAXCommonData.test_CG['name'] + "_" + ( - VMAXCommonData.test_CG['id']))) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_volumetype_extraspecs', - return_value={'pool_name': u'Bronze+DSS+SRP_1+1234567891011'}) - def test_create_cgsnapshot_v3_success( - self, _mock_volume_type, _mock_storage, _mock_cg, - _mock_members, mock_rg, mock_create_vol, mock_find): - volume = {} - snapshot = {} - snapshots = [] - volume['volume_type_id'] = 'abc' - volume['size'] = '123' - volume['id'] = '123' - snapshot['volume'] = volume - snapshot['id'] = '456' - snapshots.append(snapshot) - provisionv3 = self.driver.common.provisionv3 - provisionv3.create_group_replica = mock.Mock(return_value=(0, None)) - self.driver.create_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, snapshots) - repServ = self.conn.EnumerateInstanceNames("EMC_ReplicationService")[0] - intervals_retries_dict = ( - {'storagetype:interval': 0, 'storagetype:retries': 0}) - provisionv3.create_group_replica.assert_called_once_with( - self.conn, repServ, - VMAXCommonData.test_CG, - VMAXCommonData.test_CG, '84ab', - intervals_retries_dict) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_delete_cgsnapshot_v3_success( - self, _mock_volume_type, _mock_storage): - self.driver.delete_cgsnapshot( - self.data.test_ctxt, self.data.test_CG_snapshot, []) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system_v3)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_update_CG_add_volume_v3_success( - self, _mock_volume_type, _mock_storage_system): - add_volumes = [] - add_volumes.append(self.data.test_source_volume) - remove_volumes = None - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - # Multiple volumes - add_volumes.append(self.data.test_source_volume) - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - # Can't find CG - self.driver.common._find_consistency_group = mock.Mock( - return_value=(None, 'cg_name')) - self.assertRaises(exception.ConsistencyGroupNotFound, - self.driver.update_consistencygroup, - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system_v3)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_update_CG_remove_volume_v3_success( - self, _mock_volume_type, _mock_storage_system): - remove_volumes = [] - remove_volumes.append(self.data.test_source_volume) - add_volumes = None - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - # Multiple volumes - remove_volumes.append(self.data.test_source_volume) - self.driver.update_consistencygroup( - self.data.test_ctxt, self.data.test_CG, - add_volumes, remove_volumes) - - @mock.patch.object( - utils.VMAXUtils, - 'insert_live_migration_record') - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_element_name', - return_value='1') - @mock.patch.object( - common.VMAXCommon, - '_is_same_host', - return_value=True) - @mock.patch.object( - masking.VMAXMasking, - 'get_masking_view_from_storage_group', - return_value=VMAXCommonData.lunmaskctrl_name) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_map_v3_success( - self, _mock_volume_type, mock_maskingview, mock_is_same_host, - mock_element_name, mock_rec): - common = self.driver.common - common.get_target_wwns_list = mock.Mock( - return_value=VMAXCommonData.target_wwns) - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.driver.common._get_correct_port_group = mock.Mock( - return_value=self.data.port_group) - data = self.driver.initialize_connection( - self.data.test_volume_v3, self.data.connector) - # Test the no lookup service, pre-zoned case. - common.get_target_wwns_list.assert_called_once_with( - VMAXCommonData.storage_system, self.data.test_volume_v3, - VMAXCommonData.connector) - for init, target in data['data']['initiator_target_map'].items(): - self.assertIn(init[::-1], target) - - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'Name': "0001"}, False, {})) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_map_v3_failed(self, _mock_volume_type, mock_wrap_device): - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.initialize_connection, - self.data.test_volume, - self.data.connector) - - @mock.patch.object( - masking.VMAXMasking, - 'get_port_group_from_masking_view', - return_value='myPortGroup') - @mock.patch.object( - masking.VMAXMasking, - 'remove_and_reset_members') - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_element_name', - return_value='1') - @mock.patch.object( - common.VMAXCommon, - 'check_ig_instance_name', - return_value='myInitGroup') - @mock.patch.object( - common.VMAXCommon, - 'get_masking_views_by_port_group', - return_value=[]) - @mock.patch.object( - masking.VMAXMasking, - 'get_initiator_group_from_masking_view', - return_value='myInitGroup') - @mock.patch.object( - masking.VMAXMasking, - '_find_initiator_masking_group', - return_value='myInitGroup') - @mock.patch.object( - masking.VMAXMasking, - 'get_masking_view_from_storage_group', - return_value=[VMAXCommonData.mv_instance_name]) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - def test_detach_v3_success(self, mock_volume_type, mock_maskingview, - mock_ig, mock_igc, mock_mv, mock_check_ig, - mock_element_name, mock_remove, mock_pg): - common = self.driver.common - with mock.patch.object(common, 'get_target_wwns_list', - return_value=VMAXCommonData.target_wwns): - with mock.patch.object(common, '_initial_setup', - return_value=self.default_extraspec()): - data = self.driver.terminate_connection( - self.data.test_volume_v3, self.data.connector) - common.get_target_wwns_list.assert_called_once_with( - VMAXCommonData.storage_system, - self.data.test_volume_v3, - VMAXCommonData.connector) - numTargetWwns = len(VMAXCommonData.target_wwns) - self.assertEqual(numTargetWwns, len(data['data'])) - - # Bug https://bugs.launchpad.net/cinder/+bug/1440154 - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - @mock.patch.object( - FakeDB, - 'volume_get', - return_value=VMAXCommonData.test_source_volume_v3) - @mock.patch.object( - provision_v3.VMAXProvisionV3, - 'create_element_replica') - @mock.patch.object( - utils.VMAXUtils, - 'find_sync_sv_by_volume', - return_value=(None, None)) - def test_create_clone_v3_assert_clean_up_target_volume( - self, mock_sync, mock_create_replica, mock_volume_db, - mock_type, moke_pool): - self.data.test_volume['volume_name'] = "vmax-1234567" - e = exception.VolumeBackendAPIException('CreateElementReplica Ex') - common = self.driver.common - common.utils.is_clone_licensed = ( - mock.Mock(return_value=True)) - volumeDict = {'classname': u'Symm_StorageVolume', - 'keybindings': VMAXCommonData.keybindings} - common._create_v3_volume = ( - mock.Mock(return_value=(0, volumeDict, self.data.storage_system))) - conn = self.fake_ecom_connection() - storageConfigService = [] - storageConfigService = {} - storageConfigService['SystemName'] = VMAXCommonData.storage_system - storageConfigService['CreationClassName'] = ( - self.data.stconf_service_creationclass) - common._delete_from_pool_v3 = mock.Mock(return_value=0) - mock_create_replica.side_effect = e - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_cloned_volume, - self.data.test_volume_v3, - VMAXCommonData.test_source_volume_v3) - extraSpecs = common._initial_setup(self.data.test_volume_v3) - targetInstance = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - deviceID = targetInstance['DeviceID'] - common._delete_from_pool_v3(storageConfigService, targetInstance, - targetInstance['Name'], deviceID, - extraSpecs) - common._delete_from_pool_v3.assert_called_with(storageConfigService, - targetInstance, - targetInstance['Name'], - deviceID, - extraSpecs) - - def test_get_remaining_slo_capacity_wlp(self): - conn = self.fake_ecom_connection() - array_info = {'Workload': u'DSS', 'SLO': u'Bronze'} - storagesystem = self.data.storage_system_v3 - srpPoolInstanceName = {} - srpPoolInstanceName['InstanceID'] = ( - self.data.storage_system_v3 + '+U+' + 'SRP_1') - srpPoolInstanceName['CreationClassName'] = ( - 'Symm_VirtualProvisioningPool') - srpPoolInstanceName['ElementName'] = 'SRP_1' - - remainingCapacityGb = ( - self.driver.common.provisionv3._get_remaining_slo_capacity_wlp( - conn, srpPoolInstanceName, array_info, storagesystem)) - remainingSLOCapacityGb = self.driver.common.utils.convert_bits_to_gbs( - self.data.remainingSLOCapacity) - self.assertEqual(remainingSLOCapacityGb, remainingCapacityGb) - - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_element_name', - return_value='1') - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_size', - return_value='2147483648') - def test_extend_volume(self, mock_volume_size, mock_element_name): - newSize = '2' - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.driver.extend_volume(self.data.test_volume_v3, newSize) - - def test_extend_volume_smaller_size_exception(self): - test_local_volume = {'name': 'vol1', - 'size': 4, - 'volume_name': 'vol1', - 'id': 'vol1', - 'device_id': '1', - 'provider_auth': None, - 'project_id': 'project', - 'display_name': 'vol1', - 'display_description': 'test volume', - 'volume_type_id': 'abc', - 'provider_location': six.text_type( - self.data.provider_location), - 'status': 'available', - 'host': self.data.fake_host_v3, - 'NumberOfBlocks': 100, - 'BlockSize': self.data.block_size - } - newSize = '2' - self.driver.common._initial_setup = mock.Mock( - return_value=self.default_extraspec()) - self.assertRaises( - exception.VolumeBackendAPIException, - self.driver.extend_volume, - test_local_volume, newSize) - - def test_extend_volume_exception(self): - common = self.driver.common - newsize = '2' - common._initial_setup = mock.Mock(return_value=None) - common._find_lun = mock.Mock(return_value=None) - self.assertRaises( - exception.VolumeBackendAPIException, - common.extend_volume, - self.data.test_volume, newsize) - - def test_extend_volume_size_tally_exception(self): - common = self.driver.common - newsize = '2' - self.driver.common._initial_setup = mock.Mock( - return_value=self.data.extra_specs) - vol = {'SystemName': self.data.storage_system} - common._find_lun = mock.Mock(return_value=vol) - common._extend_v3_volume = mock.Mock(return_value=(0, vol)) - common.utils.find_volume_instance = mock.Mock( - return_value='2147483648') - common.utils.get_volume_size = mock.Mock(return_value='2147483646') - self.assertRaises( - exception.VolumeBackendAPIException, - common.extend_volume, - self.data.test_volume, newsize) - - def _cleanup(self): - bExists = os.path.exists(self.config_file_path) - if bExists: - os.remove(self.config_file_path) - shutil.rmtree(self.tempdir) - - -class EMCV3MultiPoolDriverTestCase(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - self.vol_v3 = self.data.test_volume_v4 - self.vol_v3['provider_location'] = ( - six.text_type(self.data.provider_location_multi_pool)) - - super(EMCV3MultiPoolDriverTestCase, self).setUp() - self.set_configuration() - - def set_configuration(self): - configuration = mock.Mock() - configuration.safe_get.return_value = 'MULTI_POOL_V3' - configuration.config_group = 'MULTI_POOL_V3' - self.mock_object(common.VMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) - self.mock_object(common.VMAXCommon, '_gather_info', - self.fake_gather_info) - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(utils.VMAXUtils, 'isArrayV3', - return_value=True) - self.mock_object(utils.VMAXUtils, '_is_sync_complete', - return_value=True) - self.mock_object(common.VMAXCommon, - '_get_multi_pool_support_enabled_flag', - return_value=True) - driver = fc.VMAXFCDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - self.driver.utils = utils.VMAXUtils(object) - - def create_fake_config_file_multi_pool_v3(self, tempdir): - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - - ecomserverip = doc.createElement("EcomServerIp") - ecomserveriptext = doc.createTextNode("1.1.1.1") - emc.appendChild(ecomserverip) - ecomserverip.appendChild(ecomserveriptext) - - ecomserverport = doc.createElement("EcomServerPort") - ecomserverporttext = doc.createTextNode("10") - emc.appendChild(ecomserverport) - ecomserverport.appendChild(ecomserverporttext) - - ecomusername = doc.createElement("EcomUserName") - ecomusernametext = doc.createTextNode("user") - emc.appendChild(ecomusername) - ecomusername.appendChild(ecomusernametext) - - ecompassword = doc.createElement("EcomPassword") - ecompasswordtext = doc.createTextNode("pass") - emc.appendChild(ecompassword) - ecompassword.appendChild(ecompasswordtext) - - portgroup = doc.createElement("PortGroup") - portgrouptext = doc.createTextNode(self.data.port_group) - portgroup.appendChild(portgrouptext) - - pool = doc.createElement("Pool") - pooltext = doc.createTextNode("SRP_1") - emc.appendChild(pool) - pool.appendChild(pooltext) - - array = doc.createElement("Array") - arraytext = doc.createTextNode("1234567891011") - emc.appendChild(array) - array.appendChild(arraytext) - - portgroups = doc.createElement("PortGroups") - portgroups.appendChild(portgroup) - emc.appendChild(portgroups) - - timeout = doc.createElement("Timeout") - timeouttext = doc.createTextNode("0") - emc.appendChild(timeout) - timeout.appendChild(timeouttext) - - filename = 'cinder_emc_config_V3.xml' - - config_file_path = tempdir + '/' + filename - - f = open(config_file_path, 'w') - doc.writexml(f) - f.close() - return config_file_path - - def create_fake_config_file_legacy_v3(self, tempdir): - - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - - ecomserverip = doc.createElement("EcomServerIp") - ecomserveriptext = doc.createTextNode("1.1.1.1") - emc.appendChild(ecomserverip) - ecomserverip.appendChild(ecomserveriptext) - - ecomserverport = doc.createElement("EcomServerPort") - ecomserverporttext = doc.createTextNode("10") - emc.appendChild(ecomserverport) - ecomserverport.appendChild(ecomserverporttext) - - ecomusername = doc.createElement("EcomUserName") - ecomusernametext = doc.createTextNode("user") - emc.appendChild(ecomusername) - ecomusername.appendChild(ecomusernametext) - - ecompassword = doc.createElement("EcomPassword") - ecompasswordtext = doc.createTextNode("pass") - emc.appendChild(ecompassword) - ecompassword.appendChild(ecompasswordtext) - - portgroup = doc.createElement("PortGroup") - portgrouptext = doc.createTextNode(self.data.port_group) - portgroup.appendChild(portgrouptext) - - pool = doc.createElement("Pool") - pooltext = doc.createTextNode("SRP_1") - emc.appendChild(pool) - pool.appendChild(pooltext) - - array = doc.createElement("Array") - arraytext = doc.createTextNode("1234567891011") - emc.appendChild(array) - array.appendChild(arraytext) - - slo = doc.createElement("ServiceLevel") - slotext = doc.createTextNode("Silver") - emc.appendChild(slo) - slo.appendChild(slotext) - - workload = doc.createElement("Workload") - workloadtext = doc.createTextNode("OLTP") - emc.appendChild(workload) - workload.appendChild(workloadtext) - - portgroups = doc.createElement("PortGroups") - portgroups.appendChild(portgroup) - emc.appendChild(portgroups) - - timeout = doc.createElement("Timeout") - timeouttext = doc.createTextNode("0") - emc.appendChild(timeout) - timeout.appendChild(timeouttext) - - filename = 'cinder_emc_config_V3.xml' - - config_file_path = tempdir + '/' + filename - - f = open(config_file_path, 'w') - doc.writexml(f) - f.close() - return config_file_path - - def fake_ecom_connection(self): - self.conn = FakeEcomConnection() - return self.conn - - def fake_gather_info(self): - return - - def default_array_info_list(self): - return [{'EcomServerIp': u'1.1.1.1', - 'EcomServerPort': 10, - 'EcomUserName': u'user', - 'EcomPassword': u'pass', - 'PoolName': u'SRP_1', - 'PortGroup': u'OS-portgroup-PG', - 'SerialNumber': 1234567891011, - 'SLO': u'Bronze', - 'Workload': u'DSS'}] - - def array_info_list_without_slo(self): - return [{'EcomServerIp': u'1.1.1.1', - 'EcomServerPort': 10, - 'EcomUserName': u'user', - 'EcomPassword': u'pass', - 'PoolName': u'SRP_1', - 'PortGroup': u'OS-portgroup-PG', - 'SerialNumber': 1234567891011}] - - def multiple_array_info_list(self): - return [{'EcomServerIp': u'1.1.1.1', - 'EcomServerPort': 10, - 'EcomUserName': u'user', - 'EcomPassword': u'pass', - 'PoolName': u'SRP_1', - 'PortGroup': u'OS-portgroup-PG', - 'SerialNumber': 1234567891011, - 'SLO': u'Bronze', - 'Workload': u'DSS'}, - {'EcomServerIp': u'1.1.1.1', - 'EcomServerPort': 10, - 'EcomUserName': u'user', - 'EcomPassword': u'pass', - 'PoolName': u'SRP_1', - 'PortGroup': u'OS-portgroup-PG', - 'SerialNumber': 1234567891011, - 'SLO': u'Silver', - 'Workload': u'OLTP'}] - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'MULTI_POOL_BE', - 'pool_name': 'Bronze+DSS+SRP_1+1234567891011'}) - def test_initial_setup(self, mock_vol_types): - tempdir = tempfile.mkdtemp() - config_file_path = self.create_fake_config_file_multi_pool_v3(tempdir) - with mock.patch.object( - self.driver.common, '_register_config_file_from_config_group', - return_value=config_file_path): - extraSpecs = self.driver.common._initial_setup(self.vol_v3) - self.assertEqual('SRP_1', extraSpecs['storagetype:pool']) - self.assertEqual('DSS', extraSpecs['storagetype:workload']) - self.assertEqual('Bronze', extraSpecs['storagetype:slo']) - self.assertEqual('1234567891011', extraSpecs['storagetype:array']) - self.assertEqual('OS-portgroup-PG', extraSpecs['portgroupname']) - self.assertTrue(extraSpecs['isV3']) - self.assertTrue(extraSpecs['MultiPoolSupport']) - self.assertEqual('Bronze+DSS+SRP_1+1234567891011', - extraSpecs['pool_name']) - self._cleanup(tempdir, config_file_path) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'MULTI_POOL_BE', - 'pool_name': 'Bronze+DSS+SRP_1+1234567891011'}) - def test_initial_setup_with_legacy_file(self, mock_vol_types): - # Test with legacy config file and verify - # if the values for SLO and workload are used from - # the pool_name and not the config file - tempdir = tempfile.mkdtemp() - config_file_path = self.create_fake_config_file_legacy_v3(tempdir) - with mock.patch.object( - self.driver.common, '_register_config_file_from_config_group', - return_value=config_file_path): - extraSpecs = self.driver.common._initial_setup(self.vol_v3) - self.assertEqual('DSS', extraSpecs['storagetype:workload']) - self.assertEqual('Bronze', extraSpecs['storagetype:slo']) - self._cleanup(tempdir, config_file_path) - - def test_initial_setup_invalid_volume(self): - # Test with volume which don't have pool_name - tempdir = tempfile.mkdtemp() - config_file_path = self.create_fake_config_file_multi_pool_v3(tempdir) - with mock.patch.object( - self.driver.common, '_register_config_file_from_config_group', - return_value=config_file_path): - invalid_vol_v3 = self.data.test_volume_v4.copy() - invalid_vol_v3.pop('host', None) - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.common._initial_setup, - invalid_vol_v3) - self._cleanup(tempdir, config_file_path) - - def test_validate_pool(self): - v3_valid_pool = self.data.test_volume_v4.copy() - # Pool aware scheduler enabled - v3_valid_pool['host'] = self.data.fake_host_3_v3 - # Validate pool uses extraSpecs as a new argument - # Use default extraSpecs as the argument - pool = self.driver.common._validate_pool( - v3_valid_pool, self.data.multi_pool_extra_specs) - self.assertEqual('Bronze+DSS+SRP_1+1234567891011', pool) - - def test_validate_pool_invalid_pool_name(self): - # Validate using older volume dictionary - # and check if a exception is raised if multi_pool_support - # is enabled and pool_name is not specified - extraSpecs = self.data.multi_pool_extra_specs - invalid_pool_name = extraSpecs.copy() - invalid_pool_name['pool_name'] = 'not_valid' - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.common._validate_pool, - self.data.test_volume_v4, invalid_pool_name) - - def test_validate_pool_invalid_host(self): - # Cannot get the pool from the host - v3_valid_pool = self.data.test_volume_v4.copy() - v3_valid_pool['host'] = 'HostX@Backend' - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.common._validate_pool, - v3_valid_pool) - - def test_validate_pool_legacy(self): - # Legacy test. Provider Location does not have the version - v3_valid_pool = self.data.test_volume_v4.copy() - v3_valid_pool['host'] = self.data.fake_host_3_v3 - v3_valid_pool['provider_location'] = self.data.provider_location - pool = self.driver.common._validate_pool(v3_valid_pool) - self.assertIsNone(pool) - - @mock.patch.object( - utils.VMAXUtils, - 'override_ratio', - return_value=2.0) - @mock.patch.object( - utils.VMAXUtils, - 'find_storageSystem', - return_value={'Name': VMAXCommonData.storage_system_v3}) - def test_get_volume_stats_v3( - self, mock_storage_system, mock_or): - self.driver.common.pool_info['reserved_percentage'] = 5 - self.driver.get_volume_stats(True) - self.driver.common.pool_info['reserved_percentage'] = 0 - - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - @mock.patch.object( - common.VMAXCommon, - '_get_or_create_storage_group_v3', - return_value=VMAXCommonData.default_sg_instance_name) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_create_volume_multi_slo_success( - self, mock_storage_system, mock_sg, mock_is): - self.vol_v3['host'] = self.data.fake_host_3_v3 - self.vol_v3['provider_location'] = None - model_update = self.driver.create_volume(self.vol_v3) - # Verify if the device id is provided in the output - provider_location = model_update['provider_location'] - provider_location = ast.literal_eval(provider_location) - keybindings = provider_location['keybindings'] - device_id = keybindings['DeviceID'] - self.assertEqual('1', device_id) - - @mock.patch.object( - masking.VMAXMasking, - 'get_associated_masking_groups_from_device', - return_value=VMAXCommonData.storagegroups) - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_delete_volume_multi_slo_success( - self, mock_storage_system, mock_is, mock_mv): - provider_location = ( - {'classname': 'Symm_StorageVolume', - 'keybindings': - {'CreationClassName': 'Symm_StorageVolume', - 'SystemName': 'SYMMETRIX+000195900551', - 'DeviceID': '1', - 'SystemCreationClassName': 'Symm_StorageSystem' - } - }) - volumeInstanceName = ( - {'NumberOfBlocks': 100, - 'ElementName': '1', - 'Name': 'vol1', - 'BlockSize': 512, - 'provider_location': six.text_type(provider_location), - 'SystemName': 'SYMMETRIX+000195900551', - 'DeviceID': '1', - 'CreationClassName': 'Symm_StorageVolume', - 'Id': '1', - 'SystemCreationClassName': 'Symm_StorageSystem'}) - self.driver.delete_volume(self.vol_v3) - masking = self.driver.common.masking - get_groups_from_device = ( - masking.get_associated_masking_groups_from_device) - get_groups_from_device.assert_called_once_with( - self.conn, volumeInstanceName) - - @mock.patch.object( - common.VMAXCommon, - '_get_or_create_storage_group_v3', - return_value=VMAXCommonData.default_sg_instance_name) - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_create_volume_in_CG_multi_slo_success( - self, mock_storage_system, mock_is, mock_sg): - self.data.test_volume_CG_v3['provider_location'] = None - model_update = self.driver.create_volume(self.data.test_volume_CG_v3) - # Verify if the device id is provided in the output - provider_location = model_update['provider_location'] - provider_location = ast.literal_eval(provider_location) - keybindings = provider_location['keybindings'] - device_id = keybindings['DeviceID'] - self.assertEqual('1', device_id) - - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_element_name', - return_value='1') - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - @mock.patch.object( - provision_v3.VMAXProvisionV3, - '_find_new_storage_group', - return_value=VMAXCommonData.default_sg_instance_name) - @mock.patch.object( - utils.VMAXUtils, - 'wrap_get_storage_group_from_volume', - return_value=None) - @mock.patch.object( - utils.VMAXUtils, - '_get_fast_settings_from_storage_group', - return_value='Gold+DSS_REP') - def test_retype_volume_multi_slo_success( - self, mock_fast_settings, - mock_storage_group, mock_found_SG, mock_is, mock_element_name): - self.assertTrue(self.driver.retype( - self.data.test_ctxt, self.data.test_volume_v4, self.data.new_type, - self.data.diff, self.data.test_host_1_v3)) - - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - # There is only one unique array in the conf file - def test_create_CG_multi_slo_success( - self, _mock_storage_system, mock_is): - self.driver.create_consistencygroup( - self.data.test_ctxt, self.data.test_CG) - - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - @mock.patch.object( - common.VMAXCommon, - '_get_members_of_replication_group', - return_value=None) - @mock.patch.object( - FakeDB, - 'volume_get_all_by_group', - return_value=None) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_delete_CG_no_volumes_multi_slo_success( - self, _mock_storage_system, - _mock_db_volumes, _mock_members, mock_is): - # This is a CG delete with no volumes - # there won't be a deleted status - model_update = {} - ret_model_update, ret_volumes_model_update = ( - self.driver.delete_consistencygroup(self.data.test_ctxt, - self.data.test_CG, [])) - self.assertEqual(model_update, ret_model_update) - - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_delete_CG_with_volumes_multi_slo_success( - self, _mock_storage_system, mock_is): - # Check for the status deleted after a successful delete CG - model_update = {'status': 'deleted'} - ret_model_update, ret_volumes_model_update = ( - self.driver.delete_consistencygroup(self.data.test_ctxt, - self.data.test_CG, [])) - self.assertEqual(model_update, ret_model_update) - - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - def test_migrate_volume_v3_success(self, mock_is): - retVal, retList = self.driver.migrate_volume( - self.data.test_ctxt, self.data.test_volume_v4, - self.data.test_host_1_v3) - self.assertTrue(retVal) - - @mock.patch.object( - utils.VMAXUtils, - 'get_volume_element_name', - return_value='1') - @mock.patch.object( - utils.VMAXUtils, - 'get_v3_default_sg_instance_name', - return_value=(None, None, VMAXCommonData.default_sg_instance_name)) - @mock.patch.object( - utils.VMAXUtils, - 'is_clone_licensed', - return_value=True) - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_create_snapshot_v3_success( - self, mock_pool, mock_is, mock_license, mock_sg, mock_element): - self.data.test_volume_v4['volume_name'] = "vmax-1234567" - self.driver.create_snapshot(self.data.test_snapshot_1_v3) - utils = self.driver.common.provisionv3.utils - utils.get_v3_default_sg_instance_name.assert_called_once_with( - self.conn, u'SRP_1', u'Bronze', u'DSS', u'SYMMETRIX+000195900551', - False) - - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.multi_pool_extra_specs) - def test_delete_snapshot_v3_success(self, mock_is): - masking = self.driver.common.masking - with mock.patch.object( - masking, 'get_associated_masking_groups_from_device', - return_value=self.data.storagegroups): - self.driver.delete_snapshot(self.data.test_snapshot_1_v3) - - @mock.patch.object( - provision_v3.VMAXProvisionV3, - 'get_srp_pool_stats', - return_value=(100, 10, 1, 20, False)) - def test_update_volume_stats_single_array_info(self, mock_stats): - self.driver.common.pool_info['reserved_percentage'] = 5 - self.driver.common.pool_info['arrays_info'] = ( - self.default_array_info_list()) - self.driver.common.multiPoolSupportEnabled = True - data = self.driver.common.update_volume_stats() - pools = data['pools'] - self.assertEqual("Bronze+DSS+SRP_1+1234567891011", - pools[0]['pool_name']) - self.assertEqual("1234567891011#SRP_1#Bronze#DSS", - pools[0]['location_info']) - self._cleanup_pool_info() - - @mock.patch.object( - provision_v3.VMAXProvisionV3, - 'get_srp_pool_stats', - return_value=(100, 10, 1, 20, False)) - def test_update_volume_stats_multiple_array_info_wlp_disabled( - self, mock_stats): - self.driver.common.pool_info['reserved_percentage'] = 5 - self.driver.common.pool_info['arrays_info'] = ( - self.multiple_array_info_list()) - self.driver.common.multiPoolSupportEnabled = True - data = self.driver.common.update_volume_stats() - pools = data['pools'] - self.assertEqual("Bronze+DSS+SRP_1+1234567891011", - pools[0]['pool_name']) - self.assertEqual("1234567891011#SRP_1#Bronze#DSS", - pools[0]['location_info']) - self.assertEqual("Silver+OLTP+SRP_1+1234567891011", - pools[1]['pool_name']) - self.assertEqual("1234567891011#SRP_1#Silver#OLTP", - pools[1]['location_info']) - self._cleanup_pool_info() - - @mock.patch.object( - provision_v3.VMAXProvisionV3, - 'get_srp_pool_stats', - return_value=(100, 10, 1, 20, False)) - def test_update_volume_stats_multiple_array_info_wlp_enabled( - self, mock_stats): - self.driver.common.pool_info['reserved_percentage'] = 5 - self.driver.common.pool_info['arrays_info'] = ( - self.multiple_array_info_list()) - self.driver.common.multiPoolSupportEnabled = True - data = self.driver.common.update_volume_stats() - pools = data['pools'] - self.assertEqual("Bronze+DSS+SRP_1+1234567891011", - pools[0]['pool_name']) - self.assertEqual("1234567891011#SRP_1#Bronze#DSS", - pools[0]['location_info']) - self.assertEqual("Silver+OLTP+SRP_1+1234567891011", - pools[1]['pool_name']) - self.assertEqual("1234567891011#SRP_1#Silver#OLTP", - pools[1]['location_info']) - self._cleanup_pool_info() - - @mock.patch.object( - provision_v3.VMAXProvisionV3, - 'get_srp_pool_stats', - return_value=(100, 10, 1, 20, False)) - def test_update_volume_stats_without_multi_pool(self, mock_stats): - self.driver.common.pool_info['reserved_percentage'] = 5 - self.driver.common.pool_info['arrays_info'] = ( - self.multiple_array_info_list()) - data = self.driver.common.update_volume_stats() - pools = data['pools'] - # Match with the older pool_name format - self.assertEqual("Bronze+SRP_1+1234567891011", - pools[0]['pool_name']) - self.assertEqual("1234567891011#SRP_1#Bronze#DSS", - pools[0]['location_info']) - self.assertEqual("Silver+SRP_1+1234567891011", - pools[1]['pool_name']) - self.assertEqual("1234567891011#SRP_1#Silver#OLTP", - pools[1]['location_info']) - self._cleanup_pool_info() - - @mock.patch.object( - common.VMAXCommon, - '_find_pool_in_array', - return_value=(VMAXCommonData.poolInstanceName, - VMAXCommonData.storage_system)) - def test_get_slo_workload_combinations_with_slo(self, mock_pool): - self.driver.common.multiPoolSupportEnabled = True - final_array_info_list = ( - self.driver.common._get_slo_workload_combinations( - self.default_array_info_list())) - bCheckForSilver = False - for array_info in final_array_info_list: - # Check if 'Silver' is present in the final list - if array_info['SLO'] == 'Silver': - bCheckForSilver = True - self.assertTrue(bCheckForSilver) - self._cleanup_pool_info() - - @mock.patch.object( - common.VMAXCommon, - '_find_pool_in_array', - return_value=(VMAXCommonData.poolInstanceName, - VMAXCommonData.storage_system)) - def test_get_slo_workload_combinations_without_slo(self, mock_pool): - self.driver.common.multiPoolSupportEnabled = True - final_array_info_list = ( - self.driver.common._get_slo_workload_combinations( - self.array_info_list_without_slo())) - bCheckForSilver = False - for array_info in final_array_info_list: - # Check if 'Silver' is present in the final list - if array_info['SLO'] == 'Silver': - bCheckForSilver = True - self.assertTrue(bCheckForSilver) - self._cleanup_pool_info() - - def _cleanup(self, tempdir, config_file_path): - bExists = os.path.exists(config_file_path) - if bExists: - os.remove(config_file_path) - shutil.rmtree(tempdir) - - def _cleanup_pool_info(self): - self.driver.common.pool_info['reserved_percentage'] = 0 - self.driver.common.pool_info['arrays_info'] = [] - self.driver.common.multiPoolSupportEnabled = False - - -class VMAXProvisionV3Test(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXProvisionV3Test, self).setUp() - - configuration = mock.Mock() - configuration.safe_get.return_value = 'ProvisionV3Tests' - configuration.config_group = 'ProvisionV3Tests' - common.VMAXCommon._gather_info = mock.Mock() - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - - def test_get_storage_pool_setting(self): - provisionv3 = self.driver.common.provisionv3 - conn = FakeEcomConnection() - slo = 'Bronze' - workload = 'DSS' - poolInstanceName = {} - poolInstanceName['InstanceID'] = "SATA_GOLD1" - poolInstanceName['CreationClassName'] = ( - self.data.storagepool_creationclass) - - storagePoolCapability = provisionv3.get_storage_pool_capability( - conn, poolInstanceName) - storagepoolsetting = provisionv3.get_storage_pool_setting( - conn, storagePoolCapability, slo, workload) - self.assertIn('Bronze:DSS', storagepoolsetting['InstanceID']) - - def test_get_storage_pool_setting_exception(self): - provisionv3 = self.driver.common.provisionv3 - conn = FakeEcomConnection() - slo = 'Bronze' - workload = 'NONE' - poolInstanceName = {} - poolInstanceName['InstanceID'] = "SATA_GOLD1" - poolInstanceName['CreationClassName'] = ( - self.data.storagepool_creationclass) - - storagePoolCapability = provisionv3.get_storage_pool_capability( - conn, poolInstanceName) - self.assertRaises(exception.VolumeBackendAPIException, - provisionv3.get_storage_pool_setting, - conn, storagePoolCapability, slo, workload) - - def test_extend_volume_in_SG(self): - provisionv3 = self.driver.common.provisionv3 - conn = FakeEcomConnection() - storageConfigService = { - 'CreationClassName': 'Symm_ElementCompositionService', - 'SystemName': 'SYMMETRIX+000195900551'} - theVolumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - inVolumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeSize = 3 - - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': True} - job = { - 'Job': {'InstanceID': '9999', 'status': 'success', 'type': None}} - conn.InvokeMethod = mock.Mock(return_value=(4096, job)) - provisionv3.utils.wait_for_job_complete = mock.Mock(return_value=( - 0, 'Success')) - volumeDict = {'classname': u'Symm_StorageVolume', - 'keybindings': VMAXCommonData.keybindings} - provisionv3.get_volume_dict_from_job = ( - mock.Mock(return_value=volumeDict)) - result = provisionv3.extend_volume_in_SG(conn, storageConfigService, - theVolumeInstanceName, - inVolumeInstanceName, - volumeSize, extraSpecs) - self.assertEqual( - ({'classname': u'Symm_StorageVolume', - 'keybindings': { - 'CreationClassName': u'Symm_StorageVolume', - 'DeviceID': u'1', - 'SystemCreationClassName': u'Symm_StorageSystem', - 'SystemName': u'SYMMETRIX+000195900551'}}, 0), result) - - def test_extend_volume_in_SG_with_Exception(self): - provisionv3 = self.driver.common.provisionv3 - conn = FakeEcomConnection() - storageConfigService = { - 'CreationClassName': 'Symm_ElementCompositionService', - 'SystemName': 'SYMMETRIX+000195900551'} - theVolumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - inVolumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeSize = 3 - - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': True} - job = { - 'Job': {'InstanceID': '9999', 'status': 'success', 'type': None}} - conn.InvokeMethod = mock.Mock(return_value=(4096, job)) - provisionv3.utils.wait_for_job_complete = mock.Mock(return_value=( - 2, 'Failure')) - self.assertRaises( - exception.VolumeBackendAPIException, - provisionv3.extend_volume_in_SG, conn, storageConfigService, - theVolumeInstanceName, inVolumeInstanceName, volumeSize, - extraSpecs) - - def test_create_volume_from_sg(self): - provisionv3 = self.driver.common.provisionv3 - conn = FakeEcomConnection() - storageConfigService = { - 'CreationClassName': 'EMC_StorageConfigurationService', - 'SystemName': 'SYMMETRIX+000195900551'} - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': True} - volumeName = 'v3_vol' - volumeSize = 3 - volumeDict, rc = ( - provisionv3.create_volume_from_sg( - conn, storageConfigService, volumeName, - self.data.default_sg_instance_name, volumeSize, extraSpecs)) - keybindings = volumeDict['keybindings'] - self.assertEqual('1', keybindings['DeviceID']) - self.assertEqual(0, rc) - - @mock.patch.object( - utils.VMAXUtils, - 'wait_for_job_complete', - return_value=(-1, 'error')) - def test_create_volume_from_sg_failed(self, mock_devices): - provisionv3 = self.driver.common.provisionv3 - conn = FakeEcomConnection() - storageConfigService = { - 'CreationClassName': 'EMC_StorageConfigurationService', - 'SystemName': 'SYMMETRIX+000195900551'} - sgInstanceName = self.data.default_sg_instance_name - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': True} - volumeName = 'failed_vol' - volumeSize = 3 - self.assertRaises( - exception.VolumeBackendAPIException, - provisionv3.create_volume_from_sg, - conn, storageConfigService, volumeName, - sgInstanceName, volumeSize, extraSpecs) - - def test_create_storage_group_v3(self): - provisionv3 = self.driver.common.provisionv3 - conn = FakeEcomConnection() - controllerConfigService = { - 'CreationClassName': 'EMC_ControllerConfigurationService', - 'SystemName': 'SYMMETRIX+000195900551'} - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': True} - groupName = self.data.storagegroupname - srp = 'SRP_1' - slo = 'Bronze' - workload = 'DSS' - provisionv3._find_new_storage_group = mock.Mock( - return_value=self.data.default_sg_instance_name) - newstoragegroup = provisionv3.create_storage_group_v3( - conn, controllerConfigService, groupName, srp, slo, workload, - extraSpecs, False) - self.assertEqual(self.data.default_sg_instance_name, newstoragegroup) - - @mock.patch.object( - utils.VMAXUtils, - 'get_v3_default_sg_instance_name', - return_value=(None, None, VMAXCommonData.default_sg_instance_name)) - def test_create_element_replica(self, mock_sg): - provisionv3 = self.driver.common.provisionv3 - conn = FakeEcomConnection() - repServiceInstanceName = { - 'CreationClassName': 'repServiceInstanceName', - 'SystemName': 'SYMMETRIX+000195900551'} - extraSpecs = {'volume_backend_name': 'GOLD_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:slo': 'SRP_1', - 'storagetype:workload': 'SRP_1'} - sourceInstance = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - syncType = 7 - cloneName = 'new_ss' - rc, job = provisionv3.create_element_replica( - conn, repServiceInstanceName, cloneName, syncType, sourceInstance, - extraSpecs) - self.assertEqual(0, rc) - - -class VMAXMaskingTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXMaskingTest, self).setUp() - - configuration = mock.Mock() - configuration.safe_get.return_value = 'MaskingTests' - configuration.config_group = 'MaskingTests' - common.VMAXCommon._get_ecom_connection = mock.Mock( - return_value=self.fake_ecom_connection()) - common.VMAXCommon._gather_info = mock.Mock( - return_value=self.fake_gather_info()) - instancename = FakeCIMInstanceName() - utils.VMAXUtils.get_instance_name = ( - instancename.fake_getinstancename) - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - self.driver.utils = utils.VMAXUtils(object) - - def fake_ecom_connection(self): - conn = FakeEcomConnection() - return conn - - def fake_gather_info(self): - return - - def test_get_v3_default_storage_group_instance_name(self): - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - extraSpecs = self.data.extra_specs - masking._get_and_remove_from_storage_group_v3 = mock.Mock() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - maskingviewdict = self.driver.common._populate_masking_dict( - self.data.test_volume, self.data.connector, extraSpecs) - result = ( - masking._get_v3_default_storagegroup_instancename( - conn, maskingviewdict['volumeInstance'], - maskingviewdict, - controllerConfigService, maskingviewdict['volumeName'])) - self.assertEqual('OS-SRP_1-Bronze-DSS-SG', result['ElementName']) - - def test_get_v3_default_storage_group_instance_name_warning(self): - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - extraSpecs = self.data.extra_specs - masking.utils.get_storage_groups_from_volume = mock.Mock( - return_value=[]) - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - maskingviewdict = self.driver.common._populate_masking_dict( - self.data.test_volume, self.data.connector, extraSpecs) - result = ( - masking._get_v3_default_storagegroup_instancename( - conn, maskingviewdict['volumeInstance'], - maskingviewdict, - controllerConfigService, maskingviewdict['volumeName'])) - self.assertIsNone(result) - - def test_return_volume_to_default_storage_group_v3(self): - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = conn.GetInstance(volumeInstanceName) - volumeName = "V3-Vol" - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze'} - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - masking.provisionv3.create_storage_group_v3 = mock.Mock( - return_value={'Value'}) - masking._is_volume_in_storage_group = mock.Mock( - return_value=True) - masking.return_volume_to_default_storage_group_v3 = mock.Mock() - masking._return_back_to_default_sg( - conn, controllerConfigService, volumeInstance, volumeName, - extraSpecs) - masking.return_volume_to_default_storage_group_v3.assert_called_with( - conn, controllerConfigService, - volumeInstance, volumeName, extraSpecs) - - def test_return_volume_to_default_storage_group_v3_exception(self): - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = conn.GetInstance(volumeInstanceName) - volumeName = "V3-Vol" - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze'} - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - - self.assertRaises( - exception.VolumeBackendAPIException, - masking.return_volume_to_default_storage_group_v3, - conn, controllerConfigService, - volumeInstance, volumeName, extraSpecs) - - def test_add_volume_to_sg_and_verify(self): - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = conn.GetInstance(volumeInstanceName) - volumeName = "V3-Vol" - storageGroupInstanceName = self.data.storagegroups[0] - sgGroupName = self.data.storagegroupname - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze'} - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - msg = masking._add_volume_to_sg_and_verify( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, sgGroupName, extraSpecs) - self.assertIsNone(msg) - - def test_cleanup_deletion_v3(self): - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = conn.GetInstance(volumeInstanceName) - storageGroupInstanceName = self.data.storagegroups[1] - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze'} - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - masking._remove_volume_from_sg = mock.Mock() - masking._cleanup_deletion_v3( - conn, controllerConfigService, volumeInstance, extraSpecs) - masking._remove_volume_from_sg.assert_called_with( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, extraSpecs) - - # Bug 1552426 - failed rollback on V3 when MV issue - def test_check_ig_rollback(self): - # called on masking view rollback - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - connector = self.data.connector - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'slo': 'Bronze', - 'pool': 'SRP_1', - } - igGroupName = self.data.initiatorgroup_name - host = igGroupName.split("-")[1] - igInstance = masking._find_initiator_masking_group( - conn, controllerConfigService, self.data.initiatorNames) - # path 1: The masking view creation process created a now stale - # initiator group before it failed. - with mock.patch.object(masking, - '_last_volume_delete_initiator_group'): - masking._check_ig_rollback(conn, controllerConfigService, - igGroupName, connector, extraSpecs) - (masking._last_volume_delete_initiator_group. - assert_called_once_with(conn, controllerConfigService, - igInstance, extraSpecs, host)) - # path 2: No initiator group was created before the masking - # view process failed. - with mock.patch.object(masking, - '_find_initiator_masking_group', - return_value=None): - masking._last_volume_delete_initiator_group.reset_mock() - masking._check_ig_rollback(conn, controllerConfigService, - igGroupName, connector, extraSpecs) - (masking._last_volume_delete_initiator_group. - assert_not_called()) - - @mock.patch.object( - masking.VMAXMasking, - 'get_associated_masking_groups_from_device', - return_value=VMAXCommonData.storagegroups) - @mock.patch.object( - masking.VMAXMasking, - 'return_volume_to_default_storage_group_v3', - return_value='Returning volume to default sg') - def test_check_if_rollback_action_required_v3( - self, mock_return, mock_group): - conn = self.fake_ecom_connection() - masking = self.driver.common.masking - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - extraSpecs_v3 = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'slo': 'Bronze', - 'pool': 'SRP_1', - 'connector': self.data.connector} - - vol = EMC_StorageVolume() - vol['name'] = self.data.test_volume['name'] - vol['CreationClassName'] = 'Symm_StorageVolume' - vol['ElementName'] = self.data.test_volume['id'] - vol['DeviceID'] = self.data.test_volume['device_id'] - vol['Id'] = self.data.test_volume['id'] - vol['SystemName'] = self.data.storage_system - vol['NumberOfBlocks'] = self.data.test_volume['NumberOfBlocks'] - vol['BlockSize'] = self.data.test_volume['BlockSize'] - - # Added vol to vol.path - vol['SystemCreationClassName'] = 'Symm_StorageSystem' - vol.path = vol - vol.path.classname = vol['CreationClassName'] - rollbackDict = {} - rollbackDict['isV3'] = True - rollbackDict['defaultStorageGroupInstanceName'] = ( - self.data.default_storage_group) - rollbackDict['sgGroupName'] = self.data.storagegroupname - rollbackDict['sgName'] = self.data.storagegroupname - rollbackDict['volumeName'] = 'vol1' - rollbackDict['slo'] = 'Bronze' - rollbackDict['volumeInstance'] = vol - rollbackDict['controllerConfigService'] = controllerConfigService - rollbackDict['extraSpecs'] = extraSpecs_v3 - rollbackDict['igGroupName'] = self.data.initiatorgroup_name - rollbackDict['connector'] = self.data.connector - # v3 Path 1 - The volume is in another storage group that isn't the - # default storage group - expectedmessage = (_("Rollback - Volume in another storage " - "group besides default storage group.")) - message = ( - masking. - _check_if_rollback_action_for_masking_required(conn, - rollbackDict)) - self.assertEqual(expectedmessage, message) - # v3 Path 2 - The volume is not in any storage group - rollbackDict['sgGroupName'] = 'sq_not_exist' - (rollbackDict - ['defaultStorageGroupInstanceName']) = (self.data. - default_sg_instance_name) - expectedmessage = (_("V3 rollback")) - message = ( - masking. - _check_if_rollback_action_for_masking_required(conn, - rollbackDict)) - self.assertEqual(expectedmessage, message) - - def test_remove_volume_from_sg(self): - extraSpecs = self.data.extra_specs - conn = self.fake_ecom_connection() - common = self.driver.common - masking = common.masking - controllerConfigService = ( - common.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - storageGroupName = self.data.storagegroupname - storageGroupInstanceName = ( - self.driver.utils.find_storage_masking_group( - conn, controllerConfigService, storageGroupName)) - volumeInstanceNames = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")) - volumeInstanceName = volumeInstanceNames[0] - volumeInstance = conn.GetInstance(volumeInstanceName) - masking.get_devices_from_storage_group = ( - mock.Mock(return_value=volumeInstanceNames)) - masking._remove_volume_from_sg( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, extraSpecs) - masking.get_devices_from_storage_group.assert_called_with( - conn, storageGroupInstanceName) - - # bug 1555728: _create_initiator_Group uses multiple CIM calls - # where one suffices - def test_create_initiator_group(self): - utils = self.driver.common.utils - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - controllerConfigService = (utils. - find_controller_configuration_service( - conn, self.data.storage_system)) - igGroupName = self.data.initiatorgroup_name - hardwareIdinstanceNames = self.data.initiatorNames - extraSpecs = self.data.extra_specs - # path 1: Initiator Group created successfully - foundInitiatorGroupName = (masking._create_initiator_Group( - conn, controllerConfigService, - igGroupName, hardwareIdinstanceNames, - extraSpecs)) - self.assertEqual(foundInitiatorGroupName, igGroupName) - # path 2: Unsuccessful Initiator Group creation - with mock.patch.object(utils, 'wait_for_job_complete', - return_value=(10, None)): - igGroupName = 'IG_unsuccessful' - self.assertRaises(exception.VolumeBackendAPIException, - masking._create_initiator_Group, - conn, controllerConfigService, - igGroupName, hardwareIdinstanceNames, - extraSpecs) - - @mock.patch.object( - masking.VMAXMasking, - "_delete_initiators_from_initiator_group") - @mock.patch.object( - masking.VMAXMasking, - "_delete_initiator_group") - @mock.patch.object( - masking.VMAXMasking, - "_create_initiator_Group", - return_value=VMAXCommonData.initiatorgroup_name) - # bug 1579934: duplicate IG name error from SMI-S - def test_verify_initiator_group_from_masking_view( - self, create_ig, delete_ig, delete_initiators): - utils = self.driver.common.utils - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - controllerConfigService = ( - utils.find_controller_configuration_service( - conn, self.data.storage_system)) - connector = self.data.connector - maskingViewName = self.data.lunmaskctrl_name - storageSystemName = self.data.storage_system - igGroupName = self.data.initiatorgroup_name - extraSpecs = self.data.extra_specs - initiatorNames = ( - self.driver.common.masking._find_initiator_names(conn, connector)) - storageHardwareIDInstanceNames = ( - masking._get_storage_hardware_id_instance_names( - conn, initiatorNames, storageSystemName)) - foundInitiatorGroupFromMaskingView = ( - masking._get_initiator_group_from_masking_view( - conn, maskingViewName, storageSystemName)) - # path 1: initiator group from masking view matches initiator - # group from connector - verify = masking._verify_initiator_group_from_masking_view( - conn, controllerConfigService, maskingViewName, connector, - storageSystemName, igGroupName, extraSpecs) - masking._create_initiator_Group.assert_not_called() - self.assertTrue(verify) - # path 2: initiator group from masking view does not match - # initiator group from connector - with mock.patch.object( - masking, "_find_initiator_masking_group", - return_value="not_a_match"): - # path 2a: initiator group from connector is not None - # - no new initiator group created - verify = masking._verify_initiator_group_from_masking_view( - conn, controllerConfigService, maskingViewName, - connector, storageSystemName, igGroupName, - extraSpecs) - self.assertTrue(verify) - masking._create_initiator_Group.assert_not_called() - # path 2b: initiator group from connector is None - # - new initiator group created - with mock.patch.object( - masking, "_find_initiator_masking_group", - return_value=None): - masking._verify_initiator_group_from_masking_view( - conn, controllerConfigService, maskingViewName, - connector, storageSystemName, igGroupName, - extraSpecs) - (masking._create_initiator_Group. - assert_called_once_with(conn, controllerConfigService, - igGroupName, - storageHardwareIDInstanceNames, - extraSpecs)) - # path 2b(i) - the name of the initiator group from the - # masking view is the same as the provided igGroupName - # - existing ig must be deleted - (masking._delete_initiator_group. - assert_called_once_with(conn, controllerConfigService, - foundInitiatorGroupFromMaskingView, - igGroupName, extraSpecs)) - # path 2b(ii) - the name of the ig from the masking view - # is different - do not delete the existing ig - masking._delete_initiator_group.reset_mock() - with mock.patch.object( - conn, "GetInstance", - return_value={'ElementName': "different_name"}): - masking._verify_initiator_group_from_masking_view( - conn, controllerConfigService, maskingViewName, - connector, storageSystemName, igGroupName, - extraSpecs) - masking._delete_initiator_group.assert_not_called() - # path 3 - the masking view cannot be verified - with mock.patch.object( - masking, "_get_storage_group_from_masking_view", - return_value=None): - verify = masking._verify_initiator_group_from_masking_view( - conn, controllerConfigService, maskingViewName, - connector, storageSystemName, igGroupName, - extraSpecs) - self.assertFalse(verify) - - @mock.patch.object( - masking.VMAXMasking, - "_check_adding_volume_to_storage_group", - return_value=None) - @mock.patch.object( - masking.VMAXMasking, - "_validate_masking_view", - return_value=("mv_instance", VMAXCommonData.sg_instance_name, None)) - @mock.patch.object( - masking.VMAXMasking, - "_get_and_remove_from_storage_group_v3") - @mock.patch.object( - masking.VMAXMasking, - '_check_if_rollback_action_for_masking_required') - def test_get_or_create_masking_view_and_map_lun(self, check_rb, rm_sg, - validate_mv, check_sg): - common = self.driver.common - common.conn = self.fake_ecom_connection() - masking = common.masking - connector = self.data.connector - extraSpecs = self.data.extra_specs - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - common.conn, self.data.storage_system)) - defaultStorageGroupInstanceName = ( - {'CreationClassName': 'CIM_DeviceMaskingGroup', - 'ElementName': 'OS-SRP_1-Bronze-DSS-SG'}) - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = common.conn.GetInstance(volumeInstanceName) - with mock.patch.object(common, '_find_lun', - return_value=volumeInstance): - maskingViewDict = common._populate_masking_dict( - self.data.test_volume_v3, connector, extraSpecs) - maskingViewDict['isLiveMigration'] = False - rollbackDict = {} - rollbackDict['controllerConfigService'] = controllerConfigService - rollbackDict['defaultStorageGroupInstanceName'] = ( - defaultStorageGroupInstanceName) - rollbackDict['volumeInstance'] = volumeInstance - rollbackDict['volumeName'] = self.data.test_volume_v3['name'] - rollbackDict['fastPolicyName'] = None - rollbackDict['isV3'] = True - rollbackDict['extraSpecs'] = extraSpecs - rollbackDict['sgGroupName'] = 'OS-fakehost-SRP_1-Bronze-DSS-I-SG' - rollbackDict['igGroupName'] = self.data.initiatorgroup_name - rollbackDict['pgGroupName'] = self.data.port_group - rollbackDict['connector'] = self.data.connector - # path 1: masking view creation or retrieval is successful - with mock.patch.object(masking, "_get_port_group_name_from_mv", - return_value=(self.data.port_group, None)): - deviceDict = masking.get_or_create_masking_view_and_map_lun( - common.conn, maskingViewDict, extraSpecs) - (masking._check_if_rollback_action_for_masking_required. - assert_not_called()) - self.assertEqual(rollbackDict, deviceDict) - # path 2: masking view creation or retrieval is unsuccessful - with mock.patch.object(masking, "_get_port_group_name_from_mv", - return_value=(None, "error_message")): - rollbackDict['storageSystemName'] = self.data.storage_system - rollbackDict['slo'] = u'Bronze' - self.assertRaises(exception.VolumeBackendAPIException, - masking.get_or_create_masking_view_and_map_lun, - common.conn, maskingViewDict, extraSpecs) - - @mock.patch.object( - masking.VMAXMasking, - '_get_storage_group_from_masking_view_instance', - return_value=VMAXCommonData.sg_instance_name) - def test_check_existing_storage_group(self, mock_sg_from_mv): - common = self.driver.common - conn = self.fake_ecom_connection() - mv_instance_name = {'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'OS-fakehost-gold-I-MV'} - masking = common.masking - sgFromMvInstanceName, msg = ( - masking._check_existing_storage_group(conn, mv_instance_name)) - self.assertEqual(VMAXCommonData.sg_instance_name, - sgFromMvInstanceName) - self.assertIsNone(msg) - - @mock.patch.object( - masking.VMAXMasking, - '_get_storage_group_from_masking_view_instance', - return_value=None) - def test_check_existing_storage_group_none(self, mock_sg_from_mv): - common = self.driver.common - conn = self.fake_ecom_connection() - mv_instance_name = {'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'OS-fakehost-gold-I-MV'} - masking = common.masking - sgFromMvInstanceName, msg = ( - masking._check_existing_storage_group(conn, mv_instance_name)) - self.assertIsNone(sgFromMvInstanceName) - self.assertIsNotNone(msg) - - @mock.patch.object( - masking.VMAXMasking, - '_get_port_group_from_masking_view', - return_value=VMAXCommonData.port_group) - def test_get_port_group_name_from_mv_success(self, mock_pg_name): - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - mv_name = self.data.lunmaskctrl_name - system_name = self.data.storage_system - - conn.GetInstance = mock.Mock( - return_value=self.data.port_group_instance) - pg_name, err_msg = ( - masking._get_port_group_name_from_mv(conn, mv_name, system_name)) - - self.assertIsNone(err_msg) - self.assertIsNotNone(pg_name) - - @mock.patch.object( - masking.VMAXMasking, - '_get_port_group_from_masking_view', - return_value=None) - def test_get_port_group_name_from_mv_fail_1(self, mock_pg_name): - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - mv_name = self.data.lunmaskctrl_name - system_name = self.data.storage_system - - pg_name, err_msg = ( - masking._get_port_group_name_from_mv(conn, mv_name, system_name)) - - self.assertIsNone(pg_name) - self.assertIsNotNone(err_msg) - - @mock.patch.object( - masking.VMAXMasking, - '_get_port_group_from_masking_view', - return_value=VMAXCommonData.port_group) - def test_get_port_group_name_from_mv_fail_2(self, mock_pg_name): - masking = self.driver.common.masking - conn = self.fake_ecom_connection() - mv_name = self.data.lunmaskctrl_name - system_name = self.data.storage_system - - conn.GetInstance = mock.Mock(return_value={}) - pg_name, err_msg = ( - masking._get_port_group_name_from_mv(conn, mv_name, system_name)) - - self.assertIsNone(pg_name) - self.assertIsNotNone(err_msg) - - -class VMAXFCTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXFCTest, self).setUp() - - configuration = mock.Mock() - configuration.safe_get.return_value = 'FCTests' - configuration.config_group = 'FCTests' - common.VMAXCommon._gather_info = mock.Mock() - common.VMAXCommon._get_ecom_connection = mock.Mock( - return_value=FakeEcomConnection()) - driver = fc.VMAXFCDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - - def test_terminate_connection_ig_present(self): - common = self.driver.common - common.conn = FakeEcomConnection() - common._unmap_lun = mock.Mock() - common.get_masking_view_by_volume = mock.Mock( - return_value='testMV') - common.get_masking_views_by_port_group = mock.Mock( - return_value=[]) - common.get_target_wwns_list = mock.Mock( - return_value=VMAXCommonData.target_wwns) - initiatorGroupInstanceName = ( - self.driver.common.masking._get_initiator_group_from_masking_view( - common.conn, self.data.lunmaskctrl_name, - self.data.storage_system)) - with mock.patch.object(self.driver.common, - 'check_ig_instance_name', - return_value=initiatorGroupInstanceName): - data = self.driver.terminate_connection(self.data.test_volume_v3, - self.data.connector) - common.get_target_wwns_list.assert_called_once_with( - VMAXCommonData.storage_system, self.data.test_volume_v3, - VMAXCommonData.connector) - numTargetWwns = len(VMAXCommonData.target_wwns) - self.assertEqual(numTargetWwns, len(data['data'])) - - @mock.patch.object( - common.VMAXCommon, - 'check_ig_instance_name', - return_value=None) - @mock.patch.object( - common.VMAXCommon, - 'get_target_wwns_list', - return_value=VMAXCommonData.target_wwns) - @mock.patch.object( - common.VMAXCommon, - 'get_masking_views_by_port_group', - return_value=[]) - @mock.patch.object( - common.VMAXCommon, - 'get_masking_view_by_volume', - return_value='testMV') - @mock.patch.object( - common.VMAXCommon, - '_unmap_lun') - def test_terminate_connection_no_ig(self, mock_unmap, - mock_mv_vol, mock_mv_pg, - mock_wwns, mock_check_ig): - common = self.driver.common - common.conn = FakeEcomConnection() - data = self.driver.terminate_connection(self.data.test_volume_v3, - self.data.connector) - common.get_target_wwns_list.assert_called_once_with( - VMAXCommonData.storage_system, self.data.test_volume_v3, - VMAXCommonData.connector) - numTargetWwns = len(VMAXCommonData.target_wwns) - self.assertEqual(numTargetWwns, len(data['data'])) - - def test_get_common_masking_views_two_exist(self): - common = self.driver.common - common.conn = FakeEcomConnection() - maskingviews = [{'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'MV1'}, - {'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'MV2'}] - - portGroupInstanceName = ( - self.driver.common.masking._get_port_group_from_masking_view( - common.conn, self.data.lunmaskctrl_name, - self.data.storage_system)) - - initiatorGroupInstanceName = ( - self.driver.common.masking._get_initiator_group_from_masking_view( - common.conn, self.data.lunmaskctrl_name, - self.data.storage_system)) - common.get_masking_views_by_port_group = mock.Mock( - return_value=maskingviews) - common.get_masking_views_by_initiator_group = mock.Mock( - return_value=maskingviews) - - mvInstances = self.driver._get_common_masking_views( - portGroupInstanceName, initiatorGroupInstanceName) - self.assertEqual(2, len(mvInstances)) - - def test_get_common_masking_views_one_overlap(self): - common = self.driver.common - common.conn = FakeEcomConnection() - maskingviewsPG = [{'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'MV1'}, - {'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'MV2'}] - - maskingviewsIG = [{'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'MV1'}] - - portGroupInstanceName = ( - self.driver.common.masking._get_port_group_from_masking_view( - common.conn, self.data.lunmaskctrl_name, - self.data.storage_system)) - - initiatorGroupInstanceName = ( - self.driver.common.masking._get_initiator_group_from_masking_view( - common.conn, self.data.lunmaskctrl_name, - self.data.storage_system)) - common.get_masking_views_by_port_group = mock.Mock( - return_value=maskingviewsPG) - common.get_masking_views_by_initiator_group = mock.Mock( - return_value=maskingviewsIG) - - mvInstances = self.driver._get_common_masking_views( - portGroupInstanceName, initiatorGroupInstanceName) - self.assertEqual(1, len(mvInstances)) - - def test_get_common_masking_views_no_overlap(self): - common = self.driver.common - common.conn = FakeEcomConnection() - maskingviewsPG = [{'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'MV2'}] - - maskingviewsIG = [{'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'MV1'}] - - portGroupInstanceName = ( - self.driver.common.masking._get_port_group_from_masking_view( - common.conn, self.data.lunmaskctrl_name, - self.data.storage_system)) - - initiatorGroupInstanceName = ( - self.driver.common.masking._get_initiator_group_from_masking_view( - common.conn, self.data.lunmaskctrl_name, - self.data.storage_system)) - common.get_masking_views_by_port_group = mock.Mock( - return_value=maskingviewsPG) - common.get_masking_views_by_initiator_group = mock.Mock( - return_value=maskingviewsIG) - - mvInstances = self.driver._get_common_masking_views( - portGroupInstanceName, initiatorGroupInstanceName) - self.assertEqual(0, len(mvInstances)) - - @mock.patch.object( - common.VMAXCommon, - 'initialize_connection', - return_value=VMAXCommonData.fc_device_info) - @mock.patch.object( - fc.VMAXFCDriver, - '_build_initiator_target_map', - return_value=(VMAXCommonData.target_wwns, - VMAXCommonData.end_point_map)) - def test_initialize_connection_snapshot(self, mock_map, mock_conn): - data = self.driver.initialize_connection_snapshot( - self.data.test_snapshot_v3, self.data.connector) - self.assertEqual('fibre_channel', data['driver_volume_type']) - self.assertEqual(3, data['data']['target_lun']) - - @mock.patch.object( - common.VMAXCommon, - '_unmap_lun') - @mock.patch.object( - fc.VMAXFCDriver, - '_get_zoning_mappings', - return_value=(VMAXCommonData.zoning_mappings)) - @mock.patch.object( - common.VMAXCommon, - 'check_ig_instance_name', - return_value=None) - def test_terminate_connection_snapshot( - self, mock_check_ig, mock_zoning_map, mock_unmap): - common = self.driver.common - common.conn = FakeEcomConnection() - data = self.driver.terminate_connection_snapshot( - self.data.test_snapshot_v3, self.data.connector) - self.assertEqual('fibre_channel', data['driver_volume_type']) - self.assertEqual(2, len(data['data']['target_wwn'])) - - @mock.patch.object( - provision.VMAXProvision, - 'remove_device_from_storage_group') - def test_remove_device_from_storage_group(self, mock_remove): - conn = FakeEcomConnection() - common = self.driver.common - controllerConfigService = ( - common.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeName = 'vol1' - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze'} - masking = common.masking - volumeInstance = conn.GetInstance(volumeInstanceName) - storageGroupName = self.data.storagegroupname - storageGroupInstanceName = ( - common.utils.find_storage_masking_group( - conn, controllerConfigService, storageGroupName)) - masking.remove_device_from_storage_group( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, storageGroupName, extraSpecs) - masking.provision.remove_device_from_storage_group.assert_called_with( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstanceName, volumeName, extraSpecs) - - -@ddt.ddt -class VMAXUtilsTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXUtilsTest, self).setUp() - - configuration = mock.Mock() - configuration.safe_get.return_value = 'UtilsTests' - configuration.config_group = 'UtilsTests' - common.VMAXCommon._gather_info = mock.Mock() - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - self.driver.utils = utils.VMAXUtils(object) - - def test_set_target_element_supplier_in_rsd(self): - conn = FakeEcomConnection() - extraSpecs = self.data.extra_specs - repServiceInstanceName = ( - self.driver.utils.find_replication_service( - conn, self.data.storage_system)) - rsdInstance = self.driver.utils.set_target_element_supplier_in_rsd( - conn, repServiceInstanceName, - common.SNAPVX_REPLICATION_TYPE, - common.CREATE_NEW_TARGET, extraSpecs) - self.assertIsNotNone(rsdInstance) - - def test_set_copy_methodology_in_rsd(self): - conn = FakeEcomConnection() - extraSpecs = self.data.extra_specs - repServiceInstanceName = ( - self.driver.utils.find_replication_service( - conn, self.data.storage_system)) - rsdInstance = self.driver.utils.set_copy_methodology_in_rsd( - conn, repServiceInstanceName, - provision.SYNC_CLONE_LOCAL, - provision.COPY_ON_WRITE, extraSpecs) - self.assertIsNotNone(rsdInstance) - - def getinstance_capability(self, reptypes): - repservicecap = CIM_ReplicationServiceCapabilities() - repservicecap['CreationClassName'] = ( - 'CIM_ReplicationServiceCapabilities') - - classcimproperty = Fake_CIMProperty() - supportedReplicationTypes = ( - classcimproperty.fake_getSupportedReplicationTypesCIMProperty( - reptypes)) - properties = {u'SupportedReplicationTypes': supportedReplicationTypes} - repservicecap.properties = properties - return repservicecap - - @ddt.data(('V3', True), ('V3_ASYNC', True), ('V3_SYNC', True), - ('V2', False)) - @ddt.unpack - def test_is_clone_licensed(self, reptypes, isV3): - conn = FakeEcomConnection() - capabilityInstanceName = self.getinstance_capability(reptypes) - conn.GetInstance = mock.Mock( - return_value=capabilityInstanceName) - self.assertTrue(self.driver.utils.is_clone_licensed( - conn, capabilityInstanceName, isV3)) - - def test_is_clone_licensed_false(self): - conn = FakeEcomConnection() - isV3 = True - reptypes = None - capabilityInstanceName = self.getinstance_capability(reptypes) - conn.GetInstance = mock.Mock( - return_value=capabilityInstanceName) - self.assertFalse(self.driver.utils.is_clone_licensed( - conn, capabilityInstanceName, isV3)) - - def test_get_pool_capacities(self): - conn = FakeEcomConnection() - - (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, - array_max_over_subscription) = ( - self.driver.utils.get_pool_capacities( - conn, self.data.poolname, self.data.storage_system)) - self.assertEqual(931, total_capacity_gb) - self.assertEqual(465, free_capacity_gb) - self.assertEqual(465, provisioned_capacity_gb) - self.assertEqual(1.5, array_max_over_subscription) - - def test_get_pool_capacities_none_array_max_oversubscription(self): - conn = FakeEcomConnection() - null_emcmaxsubscriptionpercent = { - 'TotalManagedSpace': '1000000000000', - 'ElementName': 'gold', - 'RemainingManagedSpace': '500000000000', - 'SystemName': 'SYMMETRIX+000195900551', - 'CreationClassName': 'Symm_VirtualProvisioningPool', - 'EMCSubscribedCapacity': '500000000000'} - conn.GetInstance = mock.Mock( - return_value=null_emcmaxsubscriptionpercent) - (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, - array_max_over_subscription) = ( - self.driver.utils.get_pool_capacities( - conn, self.data.poolname, self.data.storage_system)) - self.assertEqual(65534, array_max_over_subscription) - - def test_get_ratio_from_max_sub_per(self): - max_subscription_percent_float = ( - self.driver.utils.get_ratio_from_max_sub_per(150)) - self.assertEqual(1.5, max_subscription_percent_float) - - def test_get_ratio_from_max_sub_per_none_value(self): - max_subscription_percent_float = ( - self.driver.utils.get_ratio_from_max_sub_per(str(0))) - self.assertIsNone(max_subscription_percent_float) - - def test_update_storage_QOS(self): - conn = FakeEcomConnection() - pywbem = mock.Mock() - pywbem.cim_obj = mock.Mock() - pywbem.cim_obj.CIMInstance = mock.Mock() - utils.pywbem = pywbem - - extraSpecs = {'volume_backend_name': 'V3_BE', - 'qos': { - 'maxIOPS': '6000', - 'maxMBPS': '6000', - 'DistributionType': 'Always' - }} - - storageGroupInstanceName = { - 'CreationClassName': 'CIM_DeviceMaskingGroup', - 'EMCMaximumIO': 6000, - 'EMCMaximumBandwidth': 5000, - 'EMCMaxIODynamicDistributionType': 1 - - } - modifiedstorageGroupInstance = { - 'CreationClassName': 'CIM_DeviceMaskingGroup', - 'EMCMaximumIO': 6000, - 'EMCMaximumBandwidth': 6000, - 'EMCMaxIODynamicDistributionType': 1 - - } - conn.ModifyInstance = ( - mock.Mock(return_value=modifiedstorageGroupInstance)) - self.driver.common.utils.update_storagegroup_qos( - conn, storageGroupInstanceName, extraSpecs) - - modifiedInstance = self.driver.common.utils.update_storagegroup_qos( - conn, storageGroupInstanceName, extraSpecs) - self.assertIsNotNone(modifiedInstance) - self.assertEqual( - 6000, modifiedInstance['EMCMaximumIO']) - self.assertEqual( - 6000, modifiedInstance['EMCMaximumBandwidth']) - self.assertEqual( - 1, modifiedInstance['EMCMaxIODynamicDistributionType']) - self.assertEqual('CIM_DeviceMaskingGroup', - modifiedInstance['CreationClassName']) - - def test_get_iqn(self): - conn = FakeEcomConnection() - iqn = "iqn.1992-04.com.emc:600009700bca30c01b9c012000000003,t,0x0001" - ipprotocolendpoints = conn._enum_ipprotocolendpoint() - foundIqn = self.driver.utils.get_iqn(conn, ipprotocolendpoints[1]) - self.assertEqual(iqn, foundIqn) - - # bug #1605193 - Cleanup of Initiator Group fails - def test_check_ig_instance_name_present(self): - conn = FakeEcomConnection() - initiatorgroup = SE_InitiatorMaskingGroup() - initiatorgroup['CreationClassName'] = ( - self.data.initiatorgroup_creationclass) - initiatorgroup['DeviceID'] = self.data.initiatorgroup_id - initiatorgroup['SystemName'] = self.data.storage_system - initiatorgroup['ElementName'] = self.data.initiatorgroup_name - foundIg = self.driver.utils.check_ig_instance_name( - conn, initiatorgroup) - self.assertEqual(initiatorgroup, foundIg) - - # bug #1605193 - Cleanup of Initiator Group fails - def test_check_ig_instance_name_not_present(self): - conn = FakeEcomConnection() - initiatorgroup = None - with mock.patch.object(self.driver.utils, - 'get_existing_instance', - return_value=None): - foundIg = self.driver.utils.check_ig_instance_name( - conn, initiatorgroup) - self.assertIsNone(foundIg) - - @mock.patch.object( - utils.VMAXUtils, - '_is_sync_complete', - return_value=False) - def test_is_sync_complete(self, mock_sync): - conn = FakeEcomConnection() - syncname = SE_ConcreteJob() - syncname.classname = 'SE_StorageSynchronized_SV_SV' - syncname['CopyState'] = self.data.UNSYNCHRONIZED - issynched = self.driver.common.utils._is_sync_complete(conn, syncname) - self.assertFalse(issynched) - - def test_get_v3_storage_group_name_compression_disabled(self): - poolName = 'SRP_1' - slo = 'Diamond' - workload = 'DSS' - isCompressionDisabled = True - storageGroupName = self.driver.utils.get_v3_storage_group_name( - poolName, slo, workload, isCompressionDisabled) - self.assertEqual("OS-SRP_1-Diamond-DSS-CD-SG", storageGroupName) - - @mock.patch.object( - utils.VMAXUtils, - 'get_smi_version', - return_value=831) - def test_is_all_flash(self, mock_version): - conn = FakeEcomConnection() - array = '000197200056' - self.assertTrue(self.driver.utils.is_all_flash(conn, array)) - - def test_find_sync_sv_sv(self): - conn = FakeEcomConnection() - storageSystem = self.data.storage_system - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = conn.GetInstance(volumeInstanceName) - extraSpecs = self.data.extra_specs - syncInstance = (conn.ReferenceNames( - volumeInstance.path, - ResultClass='SE_StorageSynchronized_SV_SV'))[0] - foundSyncInstance = self.driver.utils.find_sync_sv_by_volume( - conn, storageSystem, volumeInstance, extraSpecs) - self.assertEqual(syncInstance, foundSyncInstance) - - def test_get_assoc_v2_pool_from_vol(self): - conn = FakeEcomConnection() - volumeInstanceName = {'CreationClassName': "Symm_StorageVolume", - 'DeviceID': "0123", - 'SystemName': "12345"} - - pool = conn.AssociatorNames( - volumeInstanceName, ResultClass='EMC_VirtualProvisioningPool') - poolName = self.driver.utils.get_assoc_v2_pool_from_volume( - conn, volumeInstanceName) - - self.assertEqual(pool[0]['ElementName'], poolName['ElementName']) - - def test_get_assoc_v2_pool_from_vol_fail(self): - conn = FakeEcomConnection() - volumeInstanceName = {'CreationClassName': "Symm_StorageVolume", - 'DeviceID': "0123", - 'SystemName': "12345"} - - conn.AssociatorNames = mock.Mock(return_value={}) - - poolName = self.driver.utils.get_assoc_v2_pool_from_volume( - conn, volumeInstanceName) - - self.assertIsNone(poolName) - - def test_get_assoc_v3_pool_from_vol(self): - conn = FakeEcomConnection() - volumeInstanceName = {'CreationClassName': "Symm_StorageVolume", - 'DeviceID': "0123", - 'SystemName': "12345"} - - pool = conn.AssociatorNames( - volumeInstanceName, ResultClass='Symm_SRPStoragePool') - poolName = self.driver.utils.get_assoc_v3_pool_from_volume( - conn, volumeInstanceName) - - self.assertEqual(pool[0]['ElementName'], poolName['ElementName']) - - def test_get_assoc_v3_pool_from_vol_fail(self): - conn = FakeEcomConnection() - volumeInstanceName = {'CreationClassName': "Symm_StorageVolume", - 'DeviceID': "0123", - 'SystemName': "12345"} - - conn.AssociatorNames = mock.Mock(return_value={}) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.utils.get_assoc_v3_pool_from_volume, - conn, volumeInstanceName) - - def test_check_volume_no_fast_fail(self): - utils = self.driver.common.utils - initial_setup = {'volume_backend_name': 'FCFAST', - 'storagetype:fastpolicy': 'GOLD'} - - self.assertRaises(exception.VolumeBackendAPIException, - utils.check_volume_no_fast, - initial_setup) - - def test_check_volume_no_fast_pass(self): - utils = self.driver.common.utils - initial_setup = {'volume_backend_name': 'FCnoFAST', - 'storagetype:fastpolicy': None} - - self.assertTrue(utils.check_volume_no_fast( - initial_setup)) - - def test_check_volume_not_in_masking_view_pass(self): - conn = FakeEcomConnection() - utils = self.driver.common.utils - - bindings = {'CreationClassName': 'Symm_StorageVolume', - 'SystemName': self.data.storage_system, - 'DeviceID': self.data.test_volume['device_id'], - 'SystemCreationClassName': 'Symm_StorageSystem'} - inst = FakeCIMInstanceName() - fake_inst = inst.fake_getinstancename('Symm_StorageVolume', bindings) - - sgInstanceNames = conn.AssociatorNames(fake_inst, - ResultClass= - 'CIM_DeviceMaskingGroup') - - conn.AssociatorNames = mock.Mock(return_value={}) - - mock.patch.object(self.driver.utils, 'get_storage_groups_from_volume', - return_value=sgInstanceNames) - - self.assertTrue( - utils.check_volume_not_in_masking_view( - conn, fake_inst, self.data.test_volume['device_id'])) - - def test_check_volume_not_in_masking_view_fail(self): - conn = FakeEcomConnection() - utils = self.driver.common.utils - - bindings = {'CreationClassName': 'Symm_StorageVolume', - 'SystemName': self.data.storage_system, - 'DeviceID': self.data.test_volume['device_id'], - 'SystemCreationClassName': 'Symm_StorageSystem'} - inst = FakeCIMInstanceName() - fake_inst = inst.fake_getinstancename('Symm_StorageVolume', bindings) - - self.assertRaises(exception.VolumeBackendAPIException, - utils.check_volume_not_in_masking_view, - conn, fake_inst, self.data.test_volume['device_id']) - - def test_check_volume_not_replication_source_pass(self): - conn = FakeEcomConnection() - utils = self.driver.common.utils - - self.assertTrue( - utils.check_volume_not_replication_source( - conn, self.data.storage_system_v3, - self.data.test_volume['device_id'])) - - def test_check_volume_not_replication_source_fail(self): - conn = FakeEcomConnection() - utils = self.driver.common.utils - - replication_source = 'testReplicationSync' - - utils.get_associated_replication_from_source_volume = ( - mock.Mock(return_value=replication_source)) - - self.assertRaises( - exception.VolumeBackendAPIException, - utils.check_volume_not_replication_source, - conn, self.data.storage_system_v3, - self.data.test_volume['device_id']) - - def test_check_is_volume_in_cinder_managed_pool_fail(self): - conn = FakeEcomConnection() - utils = self.driver.common.utils - - volumeInstanceName = {'CreationClassName': "Symm_StorageVolume", - 'DeviceID': "0123", - 'SystemName': "12345"} - - poolInstanceName = {} - poolInstanceName['InstanceID'] = "SATA_GOLD1" - deviceId = '0123' - - self.assertRaises( - exception.VolumeBackendAPIException, - utils.check_is_volume_in_cinder_managed_pool, - conn, volumeInstanceName, poolInstanceName, deviceId) - - def test_check_is_volume_in_cinder_managed_pool_pass(self): - conn = FakeEcomConnection() - utils = self.driver.common.utils - - volumeInstanceName = {} - poolInstanceName = {} - poolInstanceName['InstanceID'] = "SATA_GOLD2" - deviceId = self.data.test_volume['device_id'] - - utils.get_assoc_v2_pool_from_volume = ( - mock.Mock(return_value=poolInstanceName)) - - self.assertTrue( - utils.check_is_volume_in_cinder_managed_pool( - conn, volumeInstanceName, poolInstanceName, deviceId)) - - def test_find_volume_by_device_id_on_array(self): - conn = FakeEcomConnection() - utils = self.driver.common.utils - - bindings = {'CreationClassName': 'Symm_StorageVolume', - 'SystemName': self.data.storage_system, - 'DeviceID': self.data.test_volume['device_id'], - 'SystemCreationClassName': 'Symm_StorageSystem'} - - inst = FakeCIMInstanceName() - fake_inst = inst.fake_getinstancename('Symm_StorageVolume', bindings) - utils.find_volume_by_device_id_on_array = mock.Mock( - return_value=fake_inst) - - volumeInstanceName = utils.find_volume_by_device_id_on_array( - self.data.storage_system, self.data.test_volume['device_id']) - - expectVolume = {} - expectVolume['CreationClassName'] = 'Symm_StorageVolume' - expectVolume['DeviceID'] = self.data.test_volume['device_id'] - expect = conn.GetInstance(expectVolume) - - provider_location = ast.literal_eval(expect['provider_location']) - bindings = provider_location['keybindings'] - - self.assertEqual(bindings, volumeInstanceName) - - def test_get_array_and_device_id(self): - utils = self.driver.common.utils - volume = self.data.test_volume.copy() - volume['volume_metadata'] = {'array': self.data.array_v3} - external_ref = {u'source-name': u'00002'} - array, device_id = utils.get_array_and_device_id( - volume, external_ref) - self.assertEqual(self.data.array_v3, array) - self.assertEqual('00002', device_id) - - def test_get_array_and_device_id_exception(self): - utils = self.driver.common.utils - volume = self.data.test_volume.copy() - volume['volume_metadata'] = {'array': self.data.array} - external_ref = {u'source-name': None} - self.assertRaises(exception.VolumeBackendAPIException, - utils.get_array_and_device_id, volume, external_ref) - - @mock.patch('builtins.open' if sys.version_info >= (3,) - else '__builtin__.open') - def test_insert_live_migration_record(self, mock_open): - volume = {'id': '12345678-87654321'} - tempdir = tempfile.mkdtemp() - utils.LIVE_MIGRATION_FILE = ( - tempdir + '/livemigrationarray') - lm_file_name = ("%(prefix)s-%(volid)s" - % {'prefix': utils.LIVE_MIGRATION_FILE, - 'volid': volume['id'][:8]}) - self.driver.utils.insert_live_migration_record(volume) - mock_open.assert_called_once_with(lm_file_name, "w") - self.driver.utils.delete_live_migration_record(volume) - shutil.rmtree(tempdir) - - def test_delete_live_migration_record(self): - volume = {'id': '12345678-87654321'} - tempdir = tempfile.mkdtemp() - utils.LIVE_MIGRATION_FILE = ( - tempdir + '/livemigrationarray') - lm_file_name = ("%(prefix)s-%(volid)s" - % {'prefix': utils.LIVE_MIGRATION_FILE, - 'volid': volume['id'][:8]}) - m = mock.mock_open() - with mock.patch('{}.open'.format(__name__), m, create=True): - with open(lm_file_name, "w") as f: - f.write('live migration details') - self.driver.utils.insert_live_migration_record(volume) - self.driver.utils.delete_live_migration_record(volume) - m.assert_called_once_with(lm_file_name, "w") - shutil.rmtree(tempdir) - - def test_get_live_migration_record(self): - volume = {'id': '12345678-87654321'} - tempdir = tempfile.mkdtemp() - utils.LIVE_MIGRATION_FILE = ( - tempdir + '/livemigrationarray') - lm_file_name = ("%(prefix)s-%(volid)s" - % {'prefix': utils.LIVE_MIGRATION_FILE, - 'volid': volume['id'][:8]}) - self.driver.utils.insert_live_migration_record(volume) - record = self.driver.utils.get_live_migration_record(volume) - self.assertEqual(volume['id'], record[0]) - os.remove(lm_file_name) - shutil.rmtree(tempdir) - - def test_get_live_migration_file_name(self): - volume = {'id': '12345678-87654321'} - lm_live_migration = self.driver.utils.get_live_migration_file_name( - volume) - self.assertIn('/livemigrationarray-12345678', lm_live_migration) - self.assertIn('/tmp/', lm_live_migration) - - -class VMAXCommonTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXCommonTest, self).setUp() - - configuration = mock.Mock() - configuration.safe_get.return_value = 'CommonTests' - configuration.config_group = 'CommonTests' - common.VMAXCommon._gather_info = mock.Mock() - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(utils.VMAXUtils, - 'find_controller_configuration_service', - return_value=None) - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - self.driver.utils = utils.VMAXUtils(object) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_create_duplicate_volume(self, mock_pool): - common = self.driver.common - common.conn = FakeEcomConnection() - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - sourceInstance = common.conn.GetInstance(volumeInstanceName) - cloneName = "SS-V3-Vol" - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze'} - targetInstance = common.conn.GetInstance(volumeInstanceName) - common.utils.find_volume_instance = mock.Mock( - return_value=targetInstance) - self.driver.common._get_or_create_storage_group_v3 = mock.Mock( - return_value = self.data.default_sg_instance_name) - duplicateVolumeInstance = self.driver.common._create_duplicate_volume( - sourceInstance, cloneName, extraSpecs) - self.assertIsNotNone(duplicateVolumeInstance) - - @mock.patch.object( - common.VMAXCommon, - 'get_target_wwns_from_masking_view', - return_value=["5000090000000000"]) - def test_get_target_wwn_list(self, mock_tw): - common = self.driver.common - common.conn = FakeEcomConnection() - targetWwns = common.get_target_wwns_list( - VMAXCommonData.storage_system, - VMAXCommonData.test_volume_v3, VMAXCommonData.connector) - self.assertListEqual(["5000090000000000"], targetWwns) - - @mock.patch.object( - common.VMAXCommon, - 'get_target_wwns_from_masking_view', - return_value=[]) - def test_get_target_wwn_list_empty(self, mock_tw): - common = self.driver.common - common.conn = FakeEcomConnection() - - self.assertRaises( - exception.VolumeBackendAPIException, - common.get_target_wwns_list, VMAXCommonData.storage_system, - VMAXCommonData.test_volume_v3, VMAXCommonData.connector) - - def test_cleanup_target(self): - common = self.driver.common - common.conn = FakeEcomConnection() - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze'} - targetInstance = common.conn.GetInstance(volumeInstanceName) - repServiceInstanceName = ( - self.driver.utils.find_replication_service( - common.conn, self.data.storage_system)) - common.utils.find_sync_sv_by_volume = mock.Mock( - return_value=(None, None)) - - self.driver.common._cleanup_target( - repServiceInstanceName, targetInstance, extraSpecs) - - def test_get_ip_and_iqn(self): - conn = FakeEcomConnection() - endpoint = {} - ipprotocolendpoints = conn._enum_ipprotocolendpoint() - ip_and_iqn = self.driver.common.get_ip_and_iqn(conn, endpoint, - ipprotocolendpoints[0]) - ip_and_iqn = self.driver.common.get_ip_and_iqn(conn, endpoint, - ipprotocolendpoints[1]) - self.assertEqual( - 'iqn.1992-04.com.emc:600009700bca30c01b9c012000000003,t,0x0001', - ip_and_iqn['iqn']) - self.assertEqual( - '10.10.10.10', ip_and_iqn['ip']) - - @mock.patch.object( - utils.VMAXUtils, - 'compare_size', - return_value=0) - def test_extend_volume(self, mock_compare): - self.driver.common.conn = FakeEcomConnection() - conn = FakeEcomConnection() - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = conn.GetInstance(volumeInstanceName) - new_size_gb = 5 - old_size_gbs = 1 - volumeName = 'extendVol' - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'pool': 'SRP_1', - 'workload': 'DSS', - 'slo': 'Bronze'} - self.driver.common._extend_volume( - self.data.test_volume, volumeInstance, volumeName, - new_size_gb, old_size_gbs, extraSpecs) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=(VMAXCommonData.extra_specs)) - def test_get_consistency_group_utils(self, mock_init, mock_pool): - common = self.driver.common - common.conn = FakeEcomConnection() - replicationService, storageSystem, extraSpecsList, isV3 = ( - common._get_consistency_group_utils( - common.conn, VMAXCommonData.test_CG)) - self.assertEqual( - self.data.extra_specs, extraSpecsList[0]['extraSpecs']) - - self.assertEqual(common.conn.EnumerateInstanceNames( - 'EMC_ReplicationService')[0], replicationService) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_volumetype_extraspecs', - return_value=(VMAXCommonData.multi_pool_extra_specs)) - def test_get_consistency_group_utils_multi_pool_enabled( - self, mock_init, mock_pool): - common = self.driver.common - common.conn = FakeEcomConnection() - replicationService, storageSystem, extraSpecsList, isV3 = ( - common._get_consistency_group_utils( - common.conn, VMAXCommonData.test_CG)) - self.assertEqual( - self.data.multi_pool_extra_specs, extraSpecsList[0]['extraSpecs']) - self.assertEqual(1, len(extraSpecsList)) - self.assertEqual(common.conn.EnumerateInstanceNames( - 'EMC_ReplicationService')[0], replicationService) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - utils.VMAXUtils, - 'get_volumetype_extraspecs', - return_value=(VMAXCommonData.multi_pool_extra_specs)) - def test_get_consistency_group_utils_multi_pool_multi_vp( - self, mock_init, mock_pool): - common = self.driver.common - common.conn = FakeEcomConnection() - test_CG_multi_vp = consistencygroup.ConsistencyGroup( - context=None, name='myCG1', id=uuid.uuid1(), - volume_type_id='abc,def', - status=fields.ConsistencyGroupStatus.AVAILABLE) - replicationService, storageSystem, extraSpecsList, isV3 = ( - common._get_consistency_group_utils( - common.conn, test_CG_multi_vp)) - self.assertEqual(2, len(extraSpecsList)) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=(VMAXCommonData.extra_specs)) - def test_get_consistency_group_utils_single_pool_multi_vp( - self, mock_init, mock_pool): - common = self.driver.common - common.conn = FakeEcomConnection() - test_CG_multi_vp = consistencygroup.ConsistencyGroup( - context=None, name='myCG1', id=uuid.uuid1(), - volume_type_id='abc,def', - status=fields.ConsistencyGroupStatus.AVAILABLE) - self.assertRaises( - exception.VolumeBackendAPIException, - common._get_consistency_group_utils, common.conn, - test_CG_multi_vp) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=(VMAXCommonData.extra_specs)) - def test_get_consistency_group_utils_single_pool_single_vp( - self, mock_init, mock_pool): - common = self.driver.common - common.conn = FakeEcomConnection() - test_CG_single_vp = consistencygroup.ConsistencyGroup( - context=None, name='myCG1', id=uuid.uuid1(), - volume_type_id='abc,', - status=fields.ConsistencyGroupStatus.AVAILABLE) - replicationService, storageSystem, extraSpecsList, isV3 = ( - common._get_consistency_group_utils( - common.conn, test_CG_single_vp)) - self.assertEqual(1, len(extraSpecsList)) - - def test_update_consistency_group_name(self): - common = self.driver.common - cg_name = common._update_consistency_group_name( - VMAXCommonData.test_CG) - self.assertEqual('myCG1_%s' % fake_constants.UUID1, - cg_name) - - def test_update_consistency_group_name_truncate_name(self): - common = self.driver.common - test_cg = {'name': 'This_is_too_long_a_name_for_a_consistency_group', - 'id': fake_constants.UUID1, - 'volume_type_id': 'abc', - 'status': fields.ConsistencyGroupStatus.AVAILABLE} - cg_name = common._update_consistency_group_name(test_cg) - self.assertEqual( - 'This_is_too_listency_group_%s' % fake_constants.UUID1, - cg_name) - - # Bug 1401297: Cinder volumes can point at wrong backend vol - def test_find_lun_check_element_name(self): - common = self.driver.common - volume = self.data.test_volume - common.conn = FakeEcomConnection() - # Path 1: Volume is retrieved successfully - foundVolumeInstance = common._find_lun(volume) - self.assertEqual(foundVolumeInstance['ElementName'], - volume['id']) - # Path 2: Volume cannot be found - deleted_vol = self.data.deleted_volume - foundVolumeInstance = common._find_lun(deleted_vol) - self.assertIsNone(foundVolumeInstance) - - def populate_masking_dict_setup(self): - extraSpecs = {'storagetype:pool': u'gold_pool', - 'volume_backend_name': 'GOLD_POOL_BE', - 'storagetype:array': u'1234567891011', - 'isV3': False, - 'portgroupname': u'OS-portgroup-PG', - 'storagetype:fastpolicy': u'GOLD'} - return extraSpecs - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_fast(self, mock_find_lun): - extraSpecs = self.populate_masking_dict_setup() - # If fast is enabled it will uniquely determine the SG and MV - # on the host along with the protocol(iSCSI) e.g. I - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, self.data.connector, extraSpecs) - self.assertEqual( - 'OS-fakehost-GOLD-FP-I-SG', maskingViewDict['sgGroupName']) - self.assertEqual( - 'OS-fakehost-GOLD-FP-I-MV', maskingViewDict['maskingViewName']) - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_fast_more_than_14chars(self, mock_find_lun): - # If the length of the FAST policy name is greater than 14 chars - extraSpecs = self.populate_masking_dict_setup() - extraSpecs['storagetype:fastpolicy'] = 'GOLD_MORE_THAN_FOURTEEN_CHARS' - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, self.data.connector, extraSpecs) - self.assertEqual( - 'OS-fakehost-GOLD_MO__CHARS-FP-I-SG', - maskingViewDict['sgGroupName']) - self.assertEqual( - 'OS-fakehost-GOLD_MO__CHARS-FP-I-MV', - maskingViewDict['maskingViewName']) - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_no_fast(self, mock_find_lun): - # If fast isn't enabled the pool will uniquely determine the SG and MV - # on the host along with the protocol(iSCSI) e.g. I - extraSpecs = self.populate_masking_dict_setup() - extraSpecs['storagetype:fastpolicy'] = None - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, self.data.connector, extraSpecs) - self.assertEqual( - 'OS-fakehost-gold_pool-I-SG', maskingViewDict['sgGroupName']) - self.assertEqual( - 'OS-fakehost-gold_pool-I-MV', maskingViewDict['maskingViewName']) - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_fast_both_exceeding(self, mock_find_lun): - # If the length of the FAST policy name is greater than 14 chars and - # the length of the short host is more than 38 characters - extraSpecs = self.populate_masking_dict_setup() - connector = {'host': 'SHORT_HOST_MORE_THEN THIRTY_EIGHT_CHARACTERS'} - extraSpecs['storagetype:fastpolicy'] = ( - 'GOLD_MORE_THAN_FOURTEEN_CHARACTERS') - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, connector, extraSpecs) - self.assertLessEqual(len(maskingViewDict['sgGroupName']), 64) - self.assertLessEqual(len(maskingViewDict['maskingViewName']), 64) - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_no_fast_both_exceeding(self, mock_find_lun): - # If the length of the FAST policy name is greater than 14 chars and - # the length of the short host is more than 38 characters - extraSpecs = self.populate_masking_dict_setup() - connector = {'host': 'SHORT_HOST_MORE_THEN THIRTY_EIGHT_CHARACTERS'} - extraSpecs['storagetype:pool'] = ( - 'GOLD_POOL_MORE_THAN_SIXTEEN_CHARACTERS') - extraSpecs['storagetype:fastpolicy'] = None - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, connector, extraSpecs) - self.assertLessEqual(len(maskingViewDict['sgGroupName']), 64) - self.assertLessEqual(len(maskingViewDict['maskingViewName']), 64) - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_no_slo(self, mock_find_lun): - extraSpecs = {'storagetype:pool': 'SRP_1', - 'volume_backend_name': 'V3_BE', - 'storagetype:workload': None, - 'storagetype:slo': None, - 'storagetype:array': '1234567891011', - 'isV3': True, - 'portgroupname': 'OS-portgroup-PG'} - self.populate_masking_dict_setup() - # If fast is enabled it will uniquely determine the SG and MV - # on the host along with the protocol(iSCSI) e.g. I - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, self.data.connector, extraSpecs) - self.assertEqual( - 'OS-fakehost-No_SLO-I-SG', maskingViewDict['sgGroupName']) - self.assertEqual( - 'OS-fakehost-No_SLO-I-MV', maskingViewDict['maskingViewName']) - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_slo_NONE(self, mock_find_lun): - extraSpecs = {'storagetype:pool': 'SRP_1', - 'volume_backend_name': 'V3_BE', - 'storagetype:workload': 'NONE', - 'storagetype:slo': 'NONE', - 'storagetype:array': '1234567891011', - 'isV3': True, - 'portgroupname': 'OS-portgroup-PG'} - self.populate_masking_dict_setup() - # If fast is enabled it will uniquely determine the SG and MV - # on the host along with the protocol(iSCSI) e.g. I - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, self.data.connector, extraSpecs) - self.assertEqual( - 'OS-fakehost-SRP_1-NONE-NONE-I-SG', maskingViewDict['sgGroupName']) - self.assertEqual( - 'OS-fakehost-SRP_1-NONE-NONE-I-MV', - maskingViewDict['maskingViewName']) - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_v3(self, mock_find_lun): - extraSpecs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'VMAX_ISCSI_BE', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'storagetype:slo': u'Diamond', - 'storagetype:workload': u'DSS'} - connector = {'host': 'fakehost'} - self.populate_masking_dict_setup() - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, connector, extraSpecs) - self.assertEqual('OS-fakehost-SRP_1-Diamond-DSS-I-SG', - maskingViewDict['sgGroupName']) - self.assertEqual('OS-fakehost-SRP_1-Diamond-DSS-I-MV', - maskingViewDict['maskingViewName']) - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_v3_compression(self, mock_find_lun): - extraSpecs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'COMPRESSION_BE', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'storagetype:slo': u'Diamond', - 'storagetype:workload': u'DSS', - 'storagetype:disablecompression': 'True'} - connector = self.data.connector - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, connector, extraSpecs) - self.assertEqual( - 'OS-fakehost-SRP_1-Diamond-DSS-I-CD-SG', - maskingViewDict['sgGroupName']) - self.assertEqual( - 'OS-fakehost-SRP_1-Diamond-DSS-I-CD-MV', - maskingViewDict['maskingViewName']) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'ISCSINoFAST'}) - @mock.patch.object( - volume_types, - 'get_volume_type_qos_specs', - return_value={'qos_specs': VMAXCommonData.test_volume_type_QOS}) - @mock.patch.object( - common.VMAXCommon, - '_register_config_file_from_config_group', - return_value=None) - @mock.patch.object( - utils.VMAXUtils, - 'isArrayV3', - return_value=True) - @mock.patch.object( - common.VMAXCommon, - '_get_ecom_connection', - return_value=FakeEcomConnection()) - def test_initial_setup_qos(self, mock_conn, mock_isArrayV3, - mock_register, mock_volumetype_qos, - mock_volumetype_extra): - array_map = [ - {'EcomCACert': None, 'Workload': None, 'EcomServerIp': u'1.1.1.1', - 'PoolName': u'SRP_1', 'EcomPassword': u'pass', - 'SerialNumber': u'1234567891011', 'EcomServerPort': u'10', - 'PortGroup': u'OS-portgroup-PG', 'EcomUserName': u'user', - 'EcomUseSSL': False, 'EcomNoVerification': False, - 'FastPolicy': None, 'SLO': 'Bronze'}] - with mock.patch.object( - self.driver.common.utils, 'parse_file_to_get_array_map', - return_value=array_map): - with mock.patch.object( - self.driver.common.utils, 'extract_record', - return_value=array_map[0]): - extraSpecs = self.driver.common._initial_setup( - VMAXCommonData.test_volume_v3) - self.assertIsNotNone(extraSpecs) - self.assertEqual( - VMAXCommonData.test_volume_type_QOS.get('specs'), extraSpecs[ - 'qos']) - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict_v3_compression_no_slo(self, mock_find_lun): - # Compression is no applicable when there is no slo - extraSpecs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'COMPRESSION_BE', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'storagetype:slo': None, - 'storagetype:workload': None, - 'storagetype:disablecompression': 'True'} - connector = self.data.connector - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, connector, extraSpecs) - self.assertEqual( - 'OS-fakehost-No_SLO-I-SG', maskingViewDict['sgGroupName']) - self.assertEqual( - 'OS-fakehost-No_SLO-I-MV', maskingViewDict['maskingViewName']) - - @mock.patch.object( - common.VMAXCommon, - '_migrate_volume_v3', - return_value=True) - def test_slo_workload_migration_compression_enabled(self, mock_migrate): - extraSpecs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'COMPRESSION_BE', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'storagetype:slo': u'Diamond', - 'storagetype:workload': u'DSS', - 'storagetype:disablecompression': 'True'} - new_type_extra_specs = extraSpecs.copy() - new_type_extra_specs.pop('storagetype:disablecompression', None) - new_type = {'extra_specs': new_type_extra_specs} - common = self.driver.common - common.conn = FakeEcomConnection() - volumeName = 'retype_compression' - - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = common.conn.GetInstance(volumeInstanceName) - - self.assertTrue(self.driver.common._slo_workload_migration( - volumeInstance, self.data.test_source_volume_1_v3, - self.data.test_host_1_v3, volumeName, 'retyping', new_type, - extraSpecs)) - - @mock.patch.object( - common.VMAXCommon, - '_migrate_volume_v3', - return_value=True) - def test_slo_workload_migration_compression_disabled(self, mock_migrate): - extraSpecs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'COMPRESSION_BE', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'storagetype:slo': u'Diamond', - 'storagetype:workload': u'DSS'} - new_type_extra_specs = extraSpecs.copy() - new_type_extra_specs['storagetype:disablecompression'] = 'True' - new_type = {'extra_specs': new_type_extra_specs} - common = self.driver.common - common.conn = FakeEcomConnection() - volumeName = 'retype_compression' - - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = common.conn.GetInstance(volumeInstanceName) - - self.assertTrue(self.driver.common._slo_workload_migration( - volumeInstance, self.data.test_source_volume_1_v3, - self.data.test_host_1_v3, volumeName, 'retyping', new_type, - extraSpecs)) - - @mock.patch.object( - common.VMAXCommon, - '_migrate_volume_v3', - return_value=True) - def test_slo_workload_migration_compression_false(self, mock_migrate): - # Cannot retype because both volume types have the same slo/workload - # and both are false for disable compression, one by omission - extraSpecs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'COMPRESSION_BE', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'storagetype:slo': u'Diamond', - 'storagetype:workload': u'DSS'} - new_type_extra_specs = extraSpecs.copy() - new_type_extra_specs['storagetype:disablecompression'] = 'false' - new_type = {'extra_specs': new_type_extra_specs} - common = self.driver.common - common.conn = FakeEcomConnection() - volumeName = 'retype_compression' - - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = common.conn.GetInstance(volumeInstanceName) - - self.assertFalse(self.driver.common._slo_workload_migration( - volumeInstance, self.data.test_source_volume_1_v3, - self.data.test_host_1_v3, volumeName, 'retyping', new_type, - extraSpecs)) - - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.extra_specs) - def test_failover_not_replicated(self, mock_setup): - common = self.driver.common - common.conn = FakeEcomConnection() - volumes = [self.data.test_volume] - # Path 1: Failover non replicated volume - verify_update_fo = [{'volume_id': volumes[0]['id'], - 'updates': {'status': 'error'}}] - secondary_id, volume_update = ( - common.failover_host('context', volumes, None)) - self.assertEqual(verify_update_fo, volume_update) - # Path 2: Failback non replicated volume - # Path 2a: Volume still available on primary - common.failover = True - verify_update_fb1 = [{'volume_id': volumes[0]['id'], - 'updates': {'status': 'available'}}] - secondary_id, volume_update_1 = ( - common.failover_host('context', volumes, 'default')) - self.assertEqual(verify_update_fb1, volume_update_1) - # Path 2a: Volume not still available on primary - with mock.patch.object(common, '_find_lun', - return_value=None): - common.failover = True - secondary_id, volume_update_2 = ( - common.failover_host('context', volumes, 'default')) - self.assertEqual(verify_update_fo, volume_update_2) - - # create snapshot and immediately delete it fails when snapshot > 50GB - @mock.patch.object( - utils.VMAXUtils, - 'get_v3_default_sg_instance_name', - return_value=(None, None, VMAXCommonData.default_sg_instance_name)) - @mock.patch.object( - utils.VMAXUtils, - 'is_clone_licensed', - return_value=True) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'V3_BE'}) - @mock.patch.object( - common.VMAXCommon, - '_get_ecom_connection', - return_value=FakeEcomConnection()) - def test_create_and_delete_snapshot_100GB( - self, mock_conn, mock_extraspecs, mock_pool, mock_licence, - mock_sg): - common = self.driver.common - snapshot = self.data.test_snapshot_v3.copy() - snapshot['volume_size'] = '100' - with mock.patch.object(common, '_initial_setup', - return_value=self.data.extra_specs): - self.driver.create_snapshot(snapshot) - self.driver.delete_snapshot(snapshot) - - @mock.patch.object( - masking.VMAXMasking, - 'get_associated_masking_groups_from_device', - return_value=[VMAXCommonData.sg_instance_name]) - @mock.patch.object( - masking.VMAXMasking, - 'get_masking_view_from_storage_group', - return_value=[{'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'OS-fakehost-gold-I-MV'}]) - def test_is_volume_multiple_masking_views_false(self, mock_mv_from_sg, - mock_sg_from_dev): - common = self.driver.common - common.conn = FakeEcomConnection() - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = common.conn.GetInstance(volumeInstanceName) - self.assertFalse( - common._is_volume_multiple_masking_views(volumeInstance)) - - @mock.patch.object( - masking.VMAXMasking, - 'get_associated_masking_groups_from_device', - return_value=[VMAXCommonData.sg_instance_name]) - @mock.patch.object( - masking.VMAXMasking, - 'get_masking_view_from_storage_group', - return_value=[{'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'OS-fakehost-gold-I-MV'}, - {'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'OS-fakehost-bronze-I-MV'}]) - def test_is_volume_multiple_masking_views_true(self, mock_mv_from_sg, - mock_sg_from_dev): - common = self.driver.common - common.conn = FakeEcomConnection() - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = common.conn.GetInstance(volumeInstanceName) - self.assertTrue( - common._is_volume_multiple_masking_views(volumeInstance)) - - @mock.patch.object( - masking.VMAXMasking, - '_get_storage_group_from_masking_view_instance', - return_value=VMAXCommonData.sg_instance_name) - def test_get_storage_group_from_source(self, mock_sg_from_mv): - common = self.driver.common - common.conn = FakeEcomConnection() - mv_instance_name = {'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'OS-fakehost-gold-I-MV'} - deviceInfoDict = {'controller': mv_instance_name} - self.assertEqual(VMAXCommonData.sg_instance_name, - common._get_storage_group_from_source( - deviceInfoDict)) - - @mock.patch.object( - masking.VMAXMasking, - '_get_storage_group_from_masking_view_instance', - return_value=VMAXCommonData.sg_instance_name) - def test_get_storage_group_from_source_except(self, mock_sg_from_mv): - common = self.driver.common - common.conn = FakeEcomConnection() - deviceInfoDict = {} - self.assertRaises( - exception.VolumeBackendAPIException, - common._get_storage_group_from_source, deviceInfoDict) - - @mock.patch.object( - masking.VMAXMasking, - 'get_port_group_from_masking_view_instance', - return_value={'CreationClassName': 'CIM_TargetMaskingGroup', - 'ElementName': 'OS-portgroup-PG'}) - def test_get_port_group_from_source(self, mock_pg_from_mv): - common = self.driver.common - common.conn = FakeEcomConnection() - pg_instance_name = {'CreationClassName': 'CIM_TargetMaskingGroup', - 'ElementName': 'OS-portgroup-PG'} - mv_instance_name = {'CreationClassName': 'Symm_LunMaskingView', - 'ElementName': 'OS-fakehost-gold-I-MV'} - deviceInfoDict = {'controller': mv_instance_name} - self.assertEqual(pg_instance_name, - common._get_port_group_from_source( - deviceInfoDict)) - - @mock.patch.object( - masking.VMAXMasking, - 'get_port_group_from_masking_view_instance', - return_value={'CreationClassName': 'CIM_TargetMaskingGroup', - 'ElementName': 'OS-portgroup-PG'}) - def test_get_port_group_from_source_except(self, mock_pg_from_mv): - common = self.driver.common - common.conn = FakeEcomConnection() - deviceInfoDict = {} - self.assertRaises( - exception.VolumeBackendAPIException, - common._get_port_group_from_source, deviceInfoDict) - - def test_manage_existing_get_size(self): - common = self.driver.common - common.conn = FakeEcomConnection() - - gb_size = 2 - exp_size = 2 - volume = {} - metadata = {'key': 'array', - 'value': '12345'} - volume['volume_metadata'] = [metadata] - volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011' - external_ref = {'source-name': '0123'} - volumeInstanceName = {'CreationClassName': "Symm_StorageVolume", - 'DeviceID': "0123", - 'SystemName': "12345"} - - utils = self.driver.common.utils - utils.get_volume_size = mock.Mock( - return_value=int(gb_size * units.Gi)) - utils.find_volume_by_device_id_on_array = mock.Mock( - return_value=volumeInstanceName) - - size = self.driver.manage_existing_get_size(volume, external_ref) - self.assertEqual(exp_size, size) - - def test_manage_existing_get_size_fail(self): - common = self.driver.common - common.conn = FakeEcomConnection() - - gb_size = 2 - volume = {} - metadata = {'key': 'array', - 'value': '12345'} - volume['volume_metadata'] = [metadata] - volume['host'] = 'HostX@Backend#Bronze+SRP_1+1234567891011' - external_ref = {'source-name': '0123'} - - utils = self.driver.common.utils - utils.get_volume_size = mock.Mock( - return_value=int(gb_size * units.Gi)) - - utils.find_volume_by_device_id_on_array = mock.Mock( - return_value=None) - - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.common.manage_existing_get_size, - volume, external_ref) - - def test_set_volume_replication_if_enabled(self): - common = self.driver.common - common.conn = FakeEcomConnection() - - volume = {} - provider_location = {} - - replication_status = 'replicated' - replication_driver_data = 'replication_data' - - model_update = {} - model_update.update( - {'replication_status': replication_status}) - model_update.update( - {'replication_driver_data': six.text_type( - replication_driver_data)}) - - extra_specs = self.data.extra_specs_is_re - - common.setup_volume_replication = mock.Mock( - return_value=(replication_status, replication_driver_data)) - - new_model_update = common.set_volume_replication_if_enabled( - common.conn, extra_specs, volume, provider_location) - - self.assertEqual(new_model_update, model_update) - - @mock.patch.object( - common.VMAXCommon, - 'set_volume_replication_if_enabled', - return_value={'replication_status': 'replicated', - 'replication_driver_data': 'driver_data', - 'display_name': 'vol1', - 'provider_location': - VMAXCommonData.provider_location3}) - @mock.patch.object( - utils.VMAXUtils, - 'rename_volume', - return_value=VMAXCommonData.manage_vol) - @mock.patch.object( - utils.VMAXUtils, - 'check_is_volume_in_cinder_managed_pool', - return_value=True) - @mock.patch.object( - utils.VMAXUtils, - 'check_volume_not_replication_source', - return_value=True) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=('cinder_pool', 'vmax_storage_system')) - @mock.patch.object( - utils.VMAXUtils, - 'check_volume_not_in_masking_view', - return_value=True) - @mock.patch.object( - utils.VMAXUtils, - 'find_volume_by_device_id_on_array', - return_value=VMAXCommonData.test_volume) - @mock.patch.object( - utils.VMAXUtils, - 'check_volume_no_fast', - return_value=True) - @mock.patch.object( - utils.VMAXUtils, - 'get_array_and_device_id', - return_value=('12345', '1')) - @mock.patch.object( - common.VMAXCommon, - '_get_ecom_connection', - return_value=FakeEcomConnection()) - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=VMAXCommonData.extra_specs_is_re) - def test_manage_existing(self, mock_setup, mock_ecom, mock_ids, - mock_vol_fast, mock_vol_by_deviceId, - mock_vol_in_mv, mock_pool_sg, mock_vol_rep_src, - mock_vol_in_mng_pool, mock_rename_vol, - mock_set_vol_rep): - common = self.driver.common - volume = EMC_StorageVolume() - volume.name = 'vol1' - volume.display_name = 'vol1' - external_ref = {} - - model_update = { - 'replication_status': 'replicated', - 'replication_driver_data': 'driver_data', - 'display_name': 'vol1', - 'provider_location': six.text_type( - self.data.provider_location_manage)} - - new_model_update = common.manage_existing(volume, - external_ref) - - self.assertEqual(model_update, new_model_update) - - -class VMAXProvisionTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXProvisionTest, self).setUp() - - configuration = mock.Mock() - configuration.safe_get.return_value = 'ProvisionTests' - configuration.config_group = 'ProvisionTests' - common.VMAXCommon._gather_info = mock.Mock() - driver = iscsi.VMAXISCSIDriver( - configuration=configuration) - driver.db = FakeDB() - self.driver = driver - self.driver.utils = utils.VMAXUtils(object) - - @mock.patch.object( - provision.VMAXProvision, - 'remove_device_from_storage_group') - def test_remove_device_from_storage_group(self, mock_remove): - conn = FakeEcomConnection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeName = 'vol1' - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze'} - masking = self.driver.common.masking - volumeInstance = conn.GetInstance(volumeInstanceName) - storageGroupName = self.data.storagegroupname - storageGroupInstanceName = ( - self.driver.common.utils.find_storage_masking_group( - conn, controllerConfigService, storageGroupName)) - numVolsInSG = 2 - masking._multiple_vols_in_SG( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, numVolsInSG, extraSpecs) - masking.provision.remove_device_from_storage_group.assert_called_with( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstanceName, volumeName, extraSpecs) - - def test_add_members_to_masking_group(self): - conn = FakeEcomConnection() - controllerConfigService = ( - self.driver.utils.find_controller_configuration_service( - conn, self.data.storage_system)) - volumeInstanceName = ( - conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeName = 'vol1' - extraSpecs = {'volume_backend_name': 'V3_BE', - 'isV3': True, - 'storagetype:pool': 'SRP_1', - 'storagetype:workload': 'DSS', - 'storagetype:slo': 'Bronze'} - volumeInstance = conn.GetInstance(volumeInstanceName) - storageGroupName = self.data.storagegroupname - storageGroupInstanceName = ( - self.driver.common.utils.find_storage_masking_group( - conn, controllerConfigService, storageGroupName)) - masking = self.driver.common.masking - masking.provision.add_members_to_masking_group = mock.Mock() - masking.add_volume_to_storage_group( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, storageGroupName, extraSpecs) - masking.provision.add_members_to_masking_group.assert_called_with( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstanceName, volumeName, extraSpecs) - - def test_find_consistency_group(self): - common = self.driver.common - common.conn = FakeEcomConnection() - repserv = common.conn.EnumerateInstanceNames( - "EMC_ReplicationService")[0] - cgInstanceName, cgName = common._find_consistency_group( - repserv, VMAXCommonData.test_CG['id']) - self.assertEqual(VMAXCommonData.replicationgroup_creationclass, - cgInstanceName['CreationClassName']) - self.assertEqual(VMAXCommonData.test_CG['id'], cgName) - - -class VMAXISCSITest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXISCSITest, self).setUp() - - configuration = mock.Mock() - configuration.safe_get.return_value = 'iSCSITests' - configuration.config_group = 'iSCSITests' - common.VMAXCommon._gather_info = mock.Mock() - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'hostlunid': 1}, False, {})) - def test_smis_get_iscsi_properties(self, mock_device): - iqns_and_ips = ( - [{'iqn': 'iqn.1992-04.com.emc:50000973f006dd80,t,0x0001', - 'ip': '10.10.0.50'}, - {'iqn': 'iqn.1992-04.com.emc:50000973f006dd81,t,0x0001', - 'ip': '10.10.0.51'}]) - properties = self.driver.smis_get_iscsi_properties( - self.data.test_volume, self.data.connector, iqns_and_ips, True) - self.assertEqual([1, 1], properties['target_luns']) - self.assertEqual(['iqn.1992-04.com.emc:50000973f006dd80', - 'iqn.1992-04.com.emc:50000973f006dd81'], - properties['target_iqns']) - self.assertEqual(['10.10.0.50:3260', '10.10.0.51:3260'], - properties['target_portals']) - - @mock.patch.object( - common.VMAXCommon, - 'find_device_number', - return_value=({'hostlunid': 1, - 'storagesystem': VMAXCommonData.storage_system}, - False, {})) - @mock.patch.object( - common.VMAXCommon, - 'initialize_connection', - return_value=VMAXCommonData.iscsi_device_info) - def test_initialize_connection_snapshot(self, mock_conn, mock_num): - data = self.driver.initialize_connection_snapshot( - self.data.test_snapshot_v3, self.data.connector) - self.assertEqual('iscsi', data['driver_volume_type']) - self.assertEqual(1, data['data']['target_lun']) - - @mock.patch.object( - common.VMAXCommon, - '_unmap_lun') - def test_terminate_connection_snapshot(self, mock_unmap): - common = self.driver.common - common.conn = FakeEcomConnection() - self.driver.terminate_connection_snapshot( - self.data.test_snapshot_v3, self.data.connector) - common._unmap_lun.assert_called_once_with( - self.data.test_snapshot_v3, self.data.connector) - - -class EMCV3ReplicationTest(test.TestCase): - - def setUp(self): - self.data = VMAXCommonData() - - self.tempdir = tempfile.mkdtemp() - super(EMCV3ReplicationTest, self).setUp() - self.config_file_path = None - self.create_fake_config_file_v3() - self.addCleanup(self._cleanup) - self.flags(rpc_backend='oslo_messaging._drivers.impl_fake') - - self.set_configuration() - - def set_configuration(self): - self.replication_device = [ - {'target_device_id': u'000195900551', - 'remote_port_group': self.data.port_group, - 'remote_pool': 'SRP_1', - 'rdf_group_label': self.data.rdf_group, - 'allow_extend': 'True'}] - self.configuration = mock.Mock( - replication_device=self.replication_device, - cinder_emc_config_file=self.config_file_path, - config_group='V3') - - def safe_get(key): - return getattr(self.configuration, key) - self.configuration.safe_get = safe_get - - self.mock_object(common.VMAXCommon, '_get_ecom_connection', - self.fake_ecom_connection) - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(utils.VMAXUtils, 'isArrayV3', - self.fake_is_v3) - self.mock_object(common.VMAXCommon, - '_get_multi_pool_support_enabled_flag', - self.fake_get_multi_pool) - self.mock_object(utils.VMAXUtils, - 'get_existing_instance', - self.fake_get_existing_instance) - self.mock_object(cinder_utils, 'get_bool_param', - return_value=False) - self.patcher = mock.patch( - 'oslo_service.loopingcall.FixedIntervalLoopingCall', - new=unit_utils.ZeroIntervalLoopingCall) - self.patcher.start() - - driver = fc.VMAXFCDriver(configuration=self.configuration) - driver.db = FakeDB() - self.driver = driver - - def create_fake_config_file_v3(self): - doc = minidom.Document() - emc = doc.createElement("EMC") - doc.appendChild(emc) - - ecomserverip = doc.createElement("EcomServerIp") - ecomserveriptext = doc.createTextNode("1.1.1.1") - emc.appendChild(ecomserverip) - ecomserverip.appendChild(ecomserveriptext) - - ecomserverport = doc.createElement("EcomServerPort") - ecomserverporttext = doc.createTextNode("10") - emc.appendChild(ecomserverport) - ecomserverport.appendChild(ecomserverporttext) - - ecomusername = doc.createElement("EcomUserName") - ecomusernametext = doc.createTextNode("user") - emc.appendChild(ecomusername) - ecomusername.appendChild(ecomusernametext) - - ecompassword = doc.createElement("EcomPassword") - ecompasswordtext = doc.createTextNode("pass") - emc.appendChild(ecompassword) - ecompassword.appendChild(ecompasswordtext) - - portgroup = doc.createElement("PortGroup") - portgrouptext = doc.createTextNode(self.data.port_group) - portgroup.appendChild(portgrouptext) - - pool = doc.createElement("Pool") - pooltext = doc.createTextNode("SRP_1") - emc.appendChild(pool) - pool.appendChild(pooltext) - - array = doc.createElement("Array") - arraytext = doc.createTextNode("1234567891011") - emc.appendChild(array) - array.appendChild(arraytext) - - slo = doc.createElement("ServiceLevel") - slotext = doc.createTextNode("Bronze") - emc.appendChild(slo) - slo.appendChild(slotext) - - workload = doc.createElement("Workload") - workloadtext = doc.createTextNode("DSS") - emc.appendChild(workload) - workload.appendChild(workloadtext) - - portgroups = doc.createElement("PortGroups") - portgroups.appendChild(portgroup) - emc.appendChild(portgroups) - - timeout = doc.createElement("Timeout") - timeouttext = doc.createTextNode("0") - emc.appendChild(timeout) - timeout.appendChild(timeouttext) - - filename = 'cinder_emc_config_V3.xml' - - self.config_file_path = self.tempdir + '/' + filename - - f = open(self.config_file_path, 'w') - doc.writexml(f) - f.close() - - def fake_ecom_connection(self): - self.conn = FakeEcomConnection() - return self.conn - - def fake_is_v3(self, conn, serialNumber): - return True - - def fake_get_multi_pool(self): - return False - - def fake_get_existing_instance(self, conn, instancename): - return instancename - - def _cleanup(self): - bExists = os.path.exists(self.config_file_path) - if bExists: - os.remove(self.config_file_path) - shutil.rmtree(self.tempdir) - - @mock.patch.object( - common.VMAXCommon, - 'get_target_instance', - return_value='volume_instance') - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_setup_volume_replication_success(self, mock_pool, - mock_target): - common = self.driver.common - common.conn = self.fake_ecom_connection() - sourceVolume = self.data.test_volume_re - volumeDict = self.data.provider_location - with mock.patch.object( - common, 'create_remote_replica', - return_value=(0, self.data.provider_location2)): - extraSpecs = self.data.extra_specs_is_re - rep_status, rep_driver_data = common.setup_volume_replication( - common.conn, sourceVolume, volumeDict, extraSpecs) - self.assertEqual(fields.ReplicationStatus.ENABLED, rep_status) - self.assertEqual(self.data.keybindings2, rep_driver_data) - - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_setup_volume_replication_failed(self, mock_pool): - common = self.driver.common - common.conn = self.fake_ecom_connection() - sourceVolume = self.data.test_volume_re - volumeDict = self.data.provider_location - extraSpecs = self.data.extra_specs_is_re - self.assertRaises( - exception.VolumeBackendAPIException, - common.setup_volume_replication, common.conn, sourceVolume, - volumeDict, extraSpecs) - - @mock.patch.object( - common.VMAXCommon, - '_cleanup_remote_target') - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_cleanup_lun_replication(self, mock_pool, mock_delete): - common = self.driver.common - common.conn = self.fake_ecom_connection() - volume = self.data.test_volume_re - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - sourceInstance = common.conn.GetInstance(volumeInstanceName) - extraSpecs = self.data.extra_specs_is_re - common.cleanup_lun_replication(common.conn, volume, volume['name'], - sourceInstance, extraSpecs) - with mock.patch.object( - common.utils, 'find_volume_instance', - return_value={'ElementName': self.data.test_volume_re['id']}): - targetInstance = sourceInstance - repServiceInstanceName = common.conn.EnumerateInstanceNames( - 'EMC_ReplicationService')[0] - rep_config = common.utils.get_replication_config( - self.replication_device) - repExtraSpecs = common._get_replication_extraSpecs( - extraSpecs, rep_config) - common._cleanup_remote_target.assert_called_once_with( - common.conn, repServiceInstanceName, sourceInstance, - targetInstance, extraSpecs, repExtraSpecs) - - def test_get_rdf_details(self): - common = self.driver.common - conn = self.fake_ecom_connection() - rdfGroupInstance, repServiceInstanceName = ( - common.get_rdf_details(conn, self.data.storage_system)) - self.assertEqual(rdfGroupInstance, self.data.srdf_group_instance) - self.assertEqual(repServiceInstanceName, - conn.EnumerateInstanceNames( - 'EMC_ReplicationService')[0]) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'VMAXReplication', - 'replication_enabled': ' True'}) - @mock.patch.object( - provision_v3.VMAXProvisionV3, - '_check_sync_state', - return_value=6) - def test_failover_volume_success(self, mock_sync, mock_vol_types): - volumes = [self.data.test_volume_re] - rep_data = self.data.replication_driver_data - loc = six.text_type(self.data.provider_location) - rep_data = six.text_type(rep_data) - check_update_list = ( - [{'volume_id': self.data.test_volume_re['id'], - 'updates': - {'replication_status': fields.ReplicationStatus.ENABLED, - 'provider_location': loc, - 'replication_driver_data': rep_data}}]) - self.driver.common.failover = True - secondary_id, volume_update_list = ( - self.driver.failover_host('context', volumes, 'default')) - self.assertEqual(check_update_list, volume_update_list) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'VMAXReplication', - 'replication_enabled': ' True'}) - def test_failover_volume_failed(self, mock_vol_types): - fake_vol = self.data.test_failed_re_volume - fake_location = six.text_type( - {'keybindings': 'fake_keybindings'}) - fake_volumes = [fake_vol] - check_update_list = ( - [{'volume_id': fake_vol['id'], - 'updates': - {'replication_status': ( - fields.ReplicationStatus.FAILOVER_ERROR), - 'provider_location': fake_location, - 'replication_driver_data': 'fake_data'}}]) - secondary_id, volume_update_list = ( - self.driver.failover_host('context', fake_volumes, None)) - self.assertEqual(check_update_list, volume_update_list) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'VMAXReplication', - 'replication_enabled': ' True'}) - @mock.patch.object( - provision_v3.VMAXProvisionV3, - '_check_sync_state', - return_value=12) - def test_failback_volume_success(self, mock_sync, mock_vol_types): - volumes = [self.data.test_volume_re] - provider_location = self.data.provider_location - loc = six.text_type(provider_location) - rep_data = six.text_type(self.data.replication_driver_data) - check_update_list = ( - [{'volume_id': self.data.test_volume_re['id'], - 'updates': - {'replication_status': fields.ReplicationStatus.ENABLED, - 'replication_driver_data': rep_data, - 'provider_location': loc}}]) - self.driver.common.failover = True - secondary_id, volume_update_list = ( - self.driver.failover_host('context', volumes, 'default')) - six.assertCountEqual(self, check_update_list, volume_update_list) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'VMAXReplication', - 'replication_enabled': ' True'}) - def test_failback_volume_failed(self, mock_vol_types): - fake_vol = self.data.test_failed_re_volume - fake_location = six.text_type( - {'keybindings': 'fake_keybindings'}) - fake_volumes = [fake_vol] - check_update_list = ( - [{'volume_id': fake_vol['id'], - 'updates': - {'replication_status': ( - fields.ReplicationStatus.FAILOVER_ERROR), - 'provider_location': fake_location, - 'replication_driver_data': 'fake_data'}}]) - self.driver.common.failover = True - secondary_id, volume_update_list = ( - self.driver.failover_host('context', fake_volumes, 'default')) - self.assertEqual(check_update_list, volume_update_list) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'VMAXReplication', - 'replication_enabled': ' True'}) - @mock.patch.object( - utils.VMAXUtils, - 'compare_size', - return_value=0) - @mock.patch.object( - common.VMAXCommon, - 'add_volume_to_replication_group', - return_value=VMAXCommonData.re_storagegroup) - @mock.patch.object( - common.VMAXCommon, - '_create_remote_replica', - return_value=(0, VMAXCommonData.provider_location)) - def test_extend_volume_is_replicated_success( - self, mock_replica, mock_sg, mock_size, mock_vol_types): - common = self.driver.common - common.conn = self.fake_ecom_connection() - volume = self.data.test_volume_re - new_size = '2' - newSizeBits = common.utils.convert_gb_to_bits(new_size) - extendedVolumeInstance = self.data.volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - extendedVolumeSize = common.utils.get_volume_size( - self.conn, extendedVolumeInstance) - self.driver.extend_volume(volume, new_size) - common.utils.compare_size.assert_called_once_with( - newSizeBits, extendedVolumeSize) - - @mock.patch.object( - common.VMAXCommon, - '_create_remote_replica', - return_value=(1, 'error')) - def test_extend_volume_is_replicated_failed(self, mock_replica): - volume = self.data.test_volume_re - new_size = '2' - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.extend_volume, volume, new_size) - - @mock.patch.object( - masking.VMAXMasking, - 'remove_and_reset_members') - @mock.patch.object( - common.VMAXCommon, - 'add_volume_to_replication_group', - return_value=VMAXCommonData.re_storagegroup) - @mock.patch.object( - provision_v3.VMAXProvisionV3, - 'get_volume_dict_from_job', - return_value=VMAXCommonData.provider_location) - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_create_remote_replica_success(self, mock_pool, mock_volume_dict, - mock_sg, mock_return): - common = self.driver.common - common.conn = self.fake_ecom_connection() - repServiceInstanceName = common.conn.EnumerateInstanceNames( - 'EMC_ReplicationService')[0] - rdfGroupInstance = self.data.srdf_group_instance - sourceVolume = self.data.test_volume_re - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - sourceInstance = common.conn.GetInstance(volumeInstanceName) - targetInstance = sourceInstance - extraSpecs = self.data.extra_specs_is_re - rep_config = common.utils.get_replication_config( - self.replication_device) - referenceDict = VMAXCommonData.provider_location - rc, rdfDict = common.create_remote_replica( - common.conn, repServiceInstanceName, rdfGroupInstance, - sourceVolume, sourceInstance, targetInstance, - extraSpecs, rep_config) - self.assertEqual(referenceDict, rdfDict) - - @mock.patch.object( - masking.VMAXMasking, - 'remove_and_reset_members') - @mock.patch.object( - common.VMAXCommon, - '_cleanup_remote_target') - @mock.patch.object( - common.VMAXCommon, - '_get_pool_and_storage_system', - return_value=(None, VMAXCommonData.storage_system)) - def test_create_remote_replica_failed(self, mock_pool, - mock_cleanup, mock_return): - common = self.driver.common - common.conn = self.fake_ecom_connection() - repServiceInstanceName = common.conn.EnumerateInstanceNames( - 'EMC_ReplicationService')[0] - rdfGroupInstance = self.data.srdf_group_instance - sourceVolume = self.data.test_volume_re - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - sourceInstance = common.conn.GetInstance(volumeInstanceName) - targetInstance = sourceInstance - extraSpecs = self.data.extra_specs_is_re - rep_config = common.utils.get_replication_config( - self.replication_device) - repExtraSpecs = common._get_replication_extraSpecs( - extraSpecs, rep_config) - with mock.patch.object(common.provisionv3, - '_create_element_replica_extra_params', - return_value=(9, 'error')): - with mock.patch.object(common.utils, - 'wait_for_job_complete', - return_value=(9, 'error')): - self.assertRaises( - exception.VolumeBackendAPIException, - common.create_remote_replica, common.conn, - repServiceInstanceName, rdfGroupInstance, sourceVolume, - sourceInstance, targetInstance, extraSpecs, rep_config) - common._cleanup_remote_target.assert_called_once_with( - common.conn, repServiceInstanceName, sourceInstance, - targetInstance, extraSpecs, repExtraSpecs) - - @mock.patch.object( - masking.VMAXMasking, - 'get_masking_view_from_storage_group', - return_value=None) - def test_add_volume_to_replication_group_success(self, mock_mv): - common = self.driver.common - common.conn = self.fake_ecom_connection() - controllerConfigService = ( - common.utils.find_controller_configuration_service( - common.conn, self.data.storage_system)) - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = common.conn.GetInstance(volumeInstanceName) - volumeName = self.data.test_volume_re['name'] - extraSpecs = self.data.extra_specs_is_re - with mock.patch.object( - common.utils, 'find_storage_masking_group', - return_value=self.data.default_sg_instance_name): - common.add_volume_to_replication_group( - common.conn, controllerConfigService, - volumeInstance, volumeName, extraSpecs) - - def test_add_volume_to_replication_group_failed(self): - common = self.driver.common - common.conn = self.fake_ecom_connection() - controllerConfigService = ( - common.utils.find_controller_configuration_service( - common.conn, self.data.storage_system)) - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = common.conn.GetInstance(volumeInstanceName) - volumeName = self.data.test_volume_re['name'] - extraSpecs = self.data.extra_specs_is_re - with mock.patch.object( - common.utils, 'find_storage_masking_group', - return_value=None): - self.assertRaises(exception.VolumeBackendAPIException, - common.add_volume_to_replication_group, - common.conn, controllerConfigService, - volumeInstance, volumeName, extraSpecs) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'VMAXReplication', - 'replication_enabled': ' True'}) - @mock.patch.object( - common.VMAXCommon, - 'add_volume_to_replication_group') - @mock.patch.object( - common.VMAXCommon, - '_create_v3_volume', - return_value=(0, VMAXCommonData.provider_location, - VMAXCommonData.storage_system)) - def test_create_replicated_volume_success(self, mock_create, mock_add, - mock_vol_types): - model_update = self.driver.create_volume( - self.data.test_volume_re) - rep_status = model_update['replication_status'] - rep_data = model_update['replication_driver_data'] - self.assertEqual(fields.ReplicationStatus.ENABLED, - rep_status) - self.assertIsInstance(rep_data, six.text_type) - self.assertIsNotNone(rep_data) - - @mock.patch.object( - common.VMAXCommon, - 'setup_volume_replication', - return_value=(fields.ReplicationStatus.ENABLED, - {'provider_location': - VMAXCommonData.provider_location})) - @mock.patch.object( - common.VMAXCommon, - '_initial_setup', - return_value=(VMAXCommonData.extra_specs_is_re)) - @mock.patch.object( - common.VMAXCommon, - '_sync_check') - @mock.patch.object( - common.VMAXCommon, - 'add_volume_to_replication_group') - @mock.patch.object( - common.VMAXCommon, - '_create_cloned_volume', - return_value=VMAXCommonData.provider_location) - def test_create_replicated_volume_from_snap_success( - self, mock_create, mock_add, mock_sync_check, mock_setup, - mock_vol_rep): - model_update = self.driver.create_volume_from_snapshot( - self.data.test_volume_re, self.data.test_snapshot_re) - rep_status = model_update['replication_status'] - rep_data = model_update['replication_driver_data'] - self.assertEqual(fields.ReplicationStatus.ENABLED, - rep_status) - self.assertIsInstance(rep_data, six.text_type) - self.assertTrue(rep_data) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'VMAXReplication', - 'replication_enabled': ' True'}) - @mock.patch.object( - common.VMAXCommon, - '_cleanup_replication_source') - @mock.patch.object( - common.VMAXCommon, - '_create_v3_volume', - return_value=(0, VMAXCommonData.provider_location, - VMAXCommonData.storage_system)) - def test_create_replicated_volume_failed(self, mock_create, mock_cleanup, - mock_vol_types): - common = self.driver.common - common.conn = self.fake_ecom_connection() - volumeName = self.data.test_volume_re['id'] - volumeDict = self.data.provider_location - extraSpecs = self.data.extra_specs_is_re - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.create_volume, self.data.test_volume_re) - common._cleanup_replication_source.assert_called_once_with( - common.conn, volumeName, volumeDict, extraSpecs) - - @mock.patch.object( - common.VMAXCommon, - '_delete_from_pool_v3') - def test_cleanup_replication_source(self, mock_delete): - common = self.driver.common - common.conn = self.fake_ecom_connection() - volumeName = self.data.test_volume_re['name'] - volumeDict = self.data.provider_location - extraSpecs = self.data.extra_specs_is_re - storageConfigService = ( - common.utils.find_storage_configuration_service( - common.conn, self.data.storage_system)) - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - sourceInstance = common.conn.GetInstance(volumeInstanceName) - deviceId = self.data.test_volume_re['device_id'] - common._cleanup_replication_source( - common.conn, volumeName, volumeDict, extraSpecs) - common._delete_from_pool_v3.assert_called_once_with( - storageConfigService, sourceInstance, - volumeName, deviceId, extraSpecs) - - @mock.patch.object( - common.VMAXCommon, - '_delete_from_pool_v3') - def test_cleanup_remote_target(self, mock_delete): - common = self.driver.common - common.conn = self.fake_ecom_connection() - repServiceInstanceName = common.conn.EnumerateInstanceNames( - 'EMC_ReplicationService')[0] - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - sourceInstance = common.conn.GetInstance(volumeInstanceName) - targetInstance = sourceInstance.copy() - targetStorageConfigService = ( - common.utils.find_storage_configuration_service( - common.conn, self.data.storage_system)) - deviceId = targetInstance['DeviceID'] - volumeName = targetInstance['Name'] - extraSpecs = self.data.extra_specs_is_re - rep_config = common.utils.get_replication_config( - self.replication_device) - repExtraSpecs = common._get_replication_extraSpecs( - extraSpecs, rep_config) - common._cleanup_remote_target( - common.conn, repServiceInstanceName, sourceInstance, - targetInstance, extraSpecs, repExtraSpecs) - common._delete_from_pool_v3.assert_called_once_with( - targetStorageConfigService, targetInstance, volumeName, - deviceId, repExtraSpecs) - - @mock.patch.object( - volume_types, - 'get_volume_type_extra_specs', - return_value={'volume_backend_name': 'VMAXReplication', - 'replication_enabled': ' True'}) - @mock.patch.object( - common.VMAXCommon, - 'cleanup_lun_replication') - def test_delete_re_volume(self, mock_cleanup, mock_vol_types): - common = self.driver.common - common.conn = self.fake_ecom_connection() - volume = self.data.test_volume_re - volumeName = volume['name'] - volumeInstanceName = ( - common.conn.EnumerateInstanceNames("EMC_StorageVolume")[0]) - volumeInstance = common.conn.GetInstance(volumeInstanceName) - extraSpecs = self.data.extra_specs_is_re - self.driver.delete_volume(volume) - common.cleanup_lun_replication.assert_called_once_with( - common.conn, volume, volumeName, volumeInstance, extraSpecs) - - def test_failback_failover_wrong_state(self): - common = self.driver.common - volumes = [self.data.test_volume_re] - # failover command, backend already failed over - common.failover = True - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.failover_host, - 'context', volumes, None) - # failback command, backend not failed over - common.failover = False - self.assertRaises(exception.VolumeBackendAPIException, - self.driver.failover_host, - 'context', volumes, 'default') - - -class VMAXInitiatorCheckFalseTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXInitiatorCheckFalseTest, self).setUp() - - configuration = mock.Mock() - configuration.safe_get.return_value = 'initiatorCheckTest' - configuration.config_group = 'initiatorCheckTest' - - common.VMAXCommon._gather_info = mock.Mock() - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(common.VMAXCommon, '_get_ecom_connection', - FakeEcomConnection()) - self.mock_object(utils.VMAXUtils, - 'find_controller_configuration_service', - return_value=None) - driver = iscsi.VMAXISCSIDriver(configuration=configuration) - driver.db = FakeDB() - self.driver = driver - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict(self, mock_find_lun): - extraSpecs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'INITIATOR_BE', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'storagetype:slo': u'Diamond', - 'storagetype:workload': u'DSS'} - connector = self.data.connector - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, connector, extraSpecs) - self.assertFalse(maskingViewDict['initiatorCheck']) - - -class VMAXInitiatorCheckTrueTest(test.TestCase): - def setUp(self): - self.data = VMAXCommonData() - - super(VMAXInitiatorCheckTrueTest, self).setUp() - - self.configuration = mock.Mock( - replication_device={}, - initiator_check='True', - config_group='initiatorCheckTest') - - def safe_get(key): - return getattr(self.configuration, key) - self.configuration.safe_get = safe_get - common.VMAXCommon._gather_info = mock.Mock() - instancename = FakeCIMInstanceName() - self.mock_object(utils.VMAXUtils, 'get_instance_name', - instancename.fake_getinstancename) - self.mock_object(common.VMAXCommon, '_get_ecom_connection', - FakeEcomConnection()) - self.mock_object(utils.VMAXUtils, - 'find_controller_configuration_service', - return_value=None) - driver = iscsi.VMAXISCSIDriver(configuration=self.configuration) - driver.db = FakeDB() - self.driver = driver - - @mock.patch.object( - common.VMAXCommon, - '_find_lun', - return_value=( - {'SystemName': VMAXCommonData.storage_system})) - def test_populate_masking_dict(self, mock_find_lun): - extraSpecs = {'storagetype:pool': u'SRP_1', - 'volume_backend_name': 'INITIATOR_BE', - 'storagetype:array': u'1234567891011', - 'isV3': True, - 'portgroupname': u'OS-portgroup-PG', - 'storagetype:slo': u'Diamond', - 'storagetype:workload': u'DSS'} - connector = self.data.connector - maskingViewDict = self.driver.common._populate_masking_dict( - self.data.test_volume, connector, extraSpecs) - self.assertTrue(maskingViewDict['initiatorCheck']) diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py new file mode 100644 index 00000000000..0abcb29e158 --- /dev/null +++ b/cinder/tests/unit/volume/drivers/dell_emc/vmax/test_vmax.py @@ -0,0 +1,4308 @@ +# Copyright (c) 2017 Dell Inc. or its subsidiaries. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ast +from copy import deepcopy +import datetime +import tempfile +from xml.dom import minidom + +import mock +import requests +import six + +from cinder import exception +from cinder import test +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume +from cinder.volume.drivers.dell_emc.vmax import common +from cinder.volume.drivers.dell_emc.vmax import fc +from cinder.volume.drivers.dell_emc.vmax import iscsi +from cinder.volume.drivers.dell_emc.vmax import masking +from cinder.volume.drivers.dell_emc.vmax import provision +from cinder.volume.drivers.dell_emc.vmax import rest +from cinder.volume.drivers.dell_emc.vmax import utils +from cinder.volume import volume_types +from cinder.zonemanager import utils as fczm_utils + + +CINDER_EMC_CONFIG_DIR = '/etc/cinder/' + + +class VMAXCommonData(object): + # array info + array = '000197800123' + srp = 'SRP_1' + slo = 'Diamond' + workload = 'DSS' + port_group_name_f = 'OS-fibre-PG' + port_group_name_i = 'OS-iscsi-PG' + masking_view_name_f = 'OS-HostX-F-OS-fibre-PG-MV' + masking_view_name_i = 'OS-HostX-SRP_1-I-OS-iscsi-PG-MV' + initiatorgroup_name_f = 'OS-HostX-F-IG' + initiatorgroup_name_i = 'OS-HostX-I-IG' + parent_sg_f = 'OS-HostX-F-OS-fibre-PG-SG' + parent_sg_i = 'OS-HostX-I-OS-iscsi-PG-SG' + storagegroup_name_f = 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG' + storagegroup_name_i = 'OS-HostX-SRP_1-Diamond-DSS-OS-iscsi-PG' + defaultstoragegroup_name = 'OS-SRP_1-Diamond-DSS-SG' + default_sg_no_slo = 'OS-no_SLO-SG' + failed_resource = 'OS-failed-resource' + fake_host = 'HostX@Backend#Diamond+DSS+SRP_1+000197800123' + version = '3.0.0' + volume_wwn = '600000345' + + # connector info + wwpn1 = "123456789012345" + wwpn2 = "123456789054321" + wwnn1 = "223456789012345" + initiator = 'iqn.1993-08.org.debian: 01: 222' + ip = u'123.456.7.8' + iqn = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001,t,0x0001' + connector = {'ip': ip, + 'initiator': initiator, + 'wwpns': [wwpn1, wwpn2], + 'wwnns': [wwnn1], + 'host': 'HostX'} + + fabric_name_prefix = "fakeFabric" + end_point_map = {connector['wwpns'][0]: [wwnn1], + connector['wwpns'][1]: [wwnn1]} + target_wwns = [wwnn1] + zoning_mappings = { + 'array': u'000197800123', + 'init_targ_map': end_point_map, + 'initiator_group': initiatorgroup_name_f, + 'port_group': port_group_name_f, + 'target_wwns': target_wwns} + + device_map = {} + for wwn in connector['wwpns']: + fabric_name = ''.join([fabric_name_prefix, + wwn[-2:]]) + target_wwn = wwn[::-1] + fabric_map = {'initiator_port_wwn_list': [wwn], + 'target_port_wwn_list': [target_wwn] + } + device_map[fabric_name] = fabric_map + + iscsi_device_info = {'maskingview': masking_view_name_i, + 'ip_and_iqn': [{'ip': ip, + 'iqn': initiator}], + 'is_multipath': True, + 'array': array, + 'controller': {'host': '10.00.00.00'}, + 'hostlunid': 3} + fc_device_info = {'maskingview': masking_view_name_f, + 'array': array, + 'controller': {'host': '10.00.00.00'}, + 'hostlunid': 3} + + # cinder volume info + ctx = 'context' + provider_location = {'array': six.text_type(array), + 'device_id': '00001'} + + provider_location2 = {'array': six.text_type(array), + 'device_id': '00002'} + + snap_location = {'snap_name': '12345', + 'source_id': '00001'} + + test_volume = fake_volume.fake_volume_obj( + context=ctx, name='vol1', size=2, provider_auth=None, + provider_location=six.text_type(provider_location), + host=fake_host, volume_type_id='1e5177e7-95e5-4a0f-b170-e45f4b469f6a') + + test_clone_volume = fake_volume.fake_volume_obj( + context=ctx, name='vol1', size=2, provider_auth=None, + provider_location=six.text_type(provider_location2), + host=fake_host) + + test_snapshot = fake_snapshot.fake_snapshot_obj( + context=ctx, id='12345', name='my_snap', size=2, + provider_location=six.text_type(snap_location), + host=fake_host, volume=test_volume) + + test_failed_snap = fake_snapshot.fake_snapshot_obj( + context=ctx, id='12345', name=failed_resource, size=2, + provider_location=six.text_type(snap_location), + host=fake_host, volume=test_volume) + + location_info = {'location_info': '000197800123#SRP_1#Diamond#DSS', + 'storage_protocol': 'FC'} + test_host = {'capabilities': location_info, + 'host': fake_host} + + # extra-specs + vol_type_extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123'} + extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123', + 'slo': slo, + 'workload': workload, + 'srp': srp, + 'array': array, + 'interval': 3, + 'retries': 120} + extra_specs_intervals_set = deepcopy(extra_specs) + extra_specs_intervals_set['interval'] = 1 + extra_specs_intervals_set['retries'] = 1 + + # masking view dict + masking_view_dict = { + 'array': array, + 'connector': connector, + 'device_id': '00001', + 'init_group_name': initiatorgroup_name_f, + 'initiator_check': False, + 'maskingview_name': masking_view_name_f, + 'parent_sg_name': parent_sg_f, + 'srp': srp, + 'port_group_name': port_group_name_f, + 'slo': slo, + 'storagegroup_name': storagegroup_name_f, + 'volume_name': test_volume.name, + 'workload': workload} + + masking_view_dict_no_slo = { + 'array': array, + 'connector': connector, + 'device_id': '00001', + 'init_group_name': initiatorgroup_name_f, + 'initiator_check': False, + 'maskingview_name': masking_view_name_f, + 'srp': srp, + 'port_group_name': port_group_name_f, + 'slo': None, + 'parent_sg_name': parent_sg_f, + 'storagegroup_name': 'OS-HostX-No_SLO-OS-fibre-PG', + 'volume_name': test_volume.name, + 'workload': None} + + # vmax data + # sloprovisioning + inititiatorgroup = [{"initiator": [wwpn1], + "hostId": initiatorgroup_name_f, + "maskingview": [masking_view_name_f]}, + {"initiator": [initiator], + "hostId": initiatorgroup_name_i, + "maskingview": [masking_view_name_i]}] + + initiator_list = [{"host": initiatorgroup_name_f, + "initiatorId": wwpn1, + "maskingview": [masking_view_name_f]}, + {"host": initiatorgroup_name_i, + "initiatorId": initiator, + "maskingview": [masking_view_name_i]}, + {"initiatorId": [ + "FA-1D:4:" + wwpn1, + "SE-4E:0:" + initiator]}] + + maskingview = [{"maskingViewId": masking_view_name_f, + "portGroupId": port_group_name_f, + "storageGroupId": storagegroup_name_f, + "hostId": initiatorgroup_name_f, + "maskingViewConnection": [ + {"host_lun_address": "0003"}]}, + {"maskingViewId": masking_view_name_i, + "portGroupId": port_group_name_i, + "storageGroupId": storagegroup_name_i, + "hostId": initiatorgroup_name_i, + "maskingViewConnection": [ + {"host_lun_address": "0003"}]}, + {}] + + portgroup = [{"portGroupId": port_group_name_f, + "symmetrixPortKey": [ + {"directorId": "FA-1D", + "portId": "FA-1D:4"}], + "maskingview": [masking_view_name_f]}, + {"portGroupId": port_group_name_i, + "symmetrixPortKey": [ + {"directorId": "SE-4E", + "portId": "SE-4E:0"}], + "maskingview": [masking_view_name_i]}] + + port_list = [ + {"symmetrixPort": {"num_of_masking_views": 1, + "maskingview": [masking_view_name_f], + "identifier": wwnn1, + "symmetrixPortKey": { + "directorId": "FA-1D", + "portId": "4"}, + "portgroup": [port_group_name_f]}}, + {"symmetrixPort": {"identifier": initiator, + "symmetrixPortKey": { + "directorId": "SE-4E", + "portId": "0"}, + "ip_addresses": [ip], + "num_of_masking_views": 1, + "maskingview": [masking_view_name_i], + "portgroup": [port_group_name_i]}}] + + sg_details = [{"srp": srp, + "num_of_vols": 2, + "cap_gb": 2, + "storageGroupId": defaultstoragegroup_name, + "slo": slo, + "workload": workload}, + {"srp": srp, + "num_of_vols": 2, + "cap_gb": 2, + "storageGroupId": storagegroup_name_f, + "slo": slo, + "workload": workload, + "maskingview": [masking_view_name_f], + "parent_storage_group": [parent_sg_f]}, + {"srp": srp, + "num_of_vols": 2, + "cap_gb": 2, + "storageGroupId": storagegroup_name_i, + "slo": slo, + "workload": workload, + "maskingview": [masking_view_name_i], + "parent_storage_group": [parent_sg_i]}, + {"num_of_vols": 2, + "cap_gb": 2, + "storageGroupId": parent_sg_f, + "num_of_child_sgs": 1, + "child_storage_group": [storagegroup_name_f], + "maskingview": [masking_view_name_f]}, + {"num_of_vols": 2, + "cap_gb": 2, + "storageGroupId": parent_sg_i, + "num_of_child_sgs": 1, + "child_storage_group": [storagegroup_name_i], + "maskingview": [masking_view_name_i], } + ] + + sg_list = {"storageGroupId": [storagegroup_name_f, + defaultstoragegroup_name]} + + srp_details = {"srpSloDemandId": ["Bronze", "Diamond", "Gold", + "None", "Optimized", "Silver"], + "srpId": srp, + "total_allocated_cap_gb": 5244.7, + "total_usable_cap_gb": 20514.4, + "total_subscribed_cap_gb": 84970.1, + "reserved_cap_percent": 10} + + volume_details = [{"cap_gb": 2, + "num_of_storage_groups": 1, + "volumeId": "00001", + "volume_identifier": "1", + "wwn": volume_wwn, + "snapvx_target": 'false', + "snapvx_source": 'false', + "storageGroupId": [defaultstoragegroup_name, + storagegroup_name_f]}, + {"cap_gb": 1, + "num_of_storage_groups": 1, + "volumeId": "00002", + "volume_identifier": "OS-2", + "wwn": '600012345', + "storageGroupId": [defaultstoragegroup_name, + storagegroup_name_f]}] + + volume_list = [ + {"resultList": {"result": [{"volumeId": "00001"}]}}, + {"resultList": {"result": [{"volumeId": "00002"}]}}, + {"resultList": {"result": [{"volumeId": "00001"}, + {"volumeId": "00002"}]}}] + + private_vol_details = { + "resultList": { + "result": [{ + "timeFinderInfo": { + "snapVXSession": [ + {"srcSnapshotGenInfo": [ + {"snapshotHeader": { + "snapshotName": "temp-1", + "device": "00001"}, + "lnkSnapshotGenInfo": [ + {"targetDevice": "00002"}]}]}, + {"tgtSrcSnapshotGenInfo": { + "snapshotName": "temp-1", + "targetDevice": "00002", + "sourceDevice": "00001"}}], + "snapVXSrc": 'true', + "snapVXTgt": 'true'}}]}} + + workloadtype = {"workloadId": ["OLTP", "OLTP_REP", "DSS", "DSS_REP"]} + slo_details = {"sloId": ["Bronze", "Diamond", "Gold", + "Optimized", "Platinum", "Silver"]} + + # replication + volume_snap_vx = {"snapshotLnks": [], + "snapshotSrcs": [ + {"generation": 0, + "linkedDevices": [ + {"targetDevice": "00002", + "percentageCopied": 100, + "state": "Copied", + "copy": True, + "defined": True, + "linked": True}], + "snapshotName": '12345', + "state": "Established"}]} + capabilities = {"symmetrixCapability": [{"rdfCapable": True, + "snapVxCapable": True, + "symmetrixId": "0001111111"}, + {"symmetrixId": array, + "snapVxCapable": True, + "rdfCapable": True}]} + + # system + job_list = [{"status": "SUCCEEDED", + "jobId": "12345", + "result": "created", + "resourceLink": "storagegroup/%s" % storagegroup_name_f}, + {"status": "RUNNING", "jobId": "55555"}, + {"status": "FAILED", "jobId": "09999"}] + symmetrix = {"symmetrixId": array, + "model": "VMAX250F", + "ucode": "5977.1091.1092"} + + headroom = {"headroom": [{"headroomCapacity": 20348.29}]} + + +class FakeLookupService(object): + def get_device_mapping_from_network(self, initiator_wwns, target_wwns): + return VMAXCommonData.device_map + + +class FakeResponse(object): + + def __init__(self, status_code, return_object): + self.status_code = status_code + self.return_object = return_object + + def json(self): + if self.return_object: + return self.return_object + else: + raise ValueError + + +class FakeRequestsSession(object): + + def __init__(self, *args, **kwargs): + self.data = VMAXCommonData() + + def request(self, method, url, params=None, data=None): + return_object = '' + status_code = 200 + if method == 'GET': + status_code, return_object = self._get_request(url, params) + + elif method == 'POST' or method == 'PUT': + status_code, return_object = self._post_or_put(url, data) + + elif method == 'DELETE': + status_code, return_object = self._delete(url) + + elif method == 'TIMEOUT': + raise requests.Timeout + + elif method == 'EXCEPTION': + raise Exception + + return FakeResponse(status_code, return_object) + + def _get_request(self, url, params): + status_code = 200 + return_object = None + if self.data.failed_resource in url: + status_code = 500 + return_object = self.data.job_list[2] + elif 'sloprovisioning' in url: + if 'volume' in url: + return_object = self._sloprovisioning_volume(url, params) + elif 'storagegroup' in url: + return_object = self._sloprovisioning_sg(url) + elif 'maskingview' in url: + return_object = self._sloprovisioning_mv(url) + elif 'portgroup' in url: + return_object = self._sloprovisioning_pg(url) + elif 'director' in url: + return_object = self._sloprovisioning_port(url) + elif 'host' in url: + return_object = self._sloprovisioning_ig(url) + elif 'initiator' in url: + return_object = self._sloprovisioning_initiator(url) + elif 'srp' in url: + return_object = self.data.srp_details + elif 'workloadtype' in url: + return_object = self.data.workloadtype + else: + return_object = self.data.slo_details + + elif 'replication' in url: + return_object = self._replication(url) + + elif 'system' in url: + return_object = self._system(url) + + elif 'headroom' in url: + return_object = self.data.headroom + + return status_code, return_object + + def _sloprovisioning_volume(self, url, params): + return_object = self.data.volume_list[2] + if '/private' in url: + return_object = self.data.private_vol_details + elif params: + if '1' in params.values(): + return_object = self.data.volume_list[0] + elif '2' in params.values(): + return_object = self.data.volume_list[1] + else: + for vol in self.data.volume_details: + if vol['volumeId'] in url: + return_object = vol + break + return return_object + + def _sloprovisioning_sg(self, url): + return_object = self.data.sg_list + for sg in self.data.sg_details: + if sg['storageGroupId'] in url: + return_object = sg + break + return return_object + + def _sloprovisioning_mv(self, url): + if self.data.masking_view_name_i in url: + return_object = self.data.maskingview[1] + else: + return_object = self.data.maskingview[0] + return return_object + + def _sloprovisioning_pg(self, url): + return_object = None + for pg in self.data.portgroup: + if pg['portGroupId'] in url: + return_object = pg + break + return return_object + + def _sloprovisioning_port(self, url): + return_object = None + for port in self.data.port_list: + if port['symmetrixPort']['symmetrixPortKey']['directorId'] in url: + return_object = port + break + return return_object + + def _sloprovisioning_ig(self, url): + return_object = None + for ig in self.data.inititiatorgroup: + if ig['hostId'] in url: + return_object = ig + break + return return_object + + def _sloprovisioning_initiator(self, url): + return_object = self.data.initiator_list[2] + if self.data.wwpn1 in url: + return_object = self.data.initiator_list[0] + elif self.data.initiator in url: + return_object = self.data.initiator_list[1] + return return_object + + def _replication(self, url): + return_object = None + if 'volume' in url: + return_object = self.data.volume_snap_vx + elif 'capabilities' in url: + return_object = self.data.capabilities + return return_object + + def _system(self, url): + return_object = None + if 'job' in url: + for job in self.data.job_list: + if job['jobId'] in url: + return_object = job + break + else: + return_object = self.data.symmetrix + return return_object + + def _post_or_put(self, url, payload): + return_object = self.data.job_list[0] + status_code = 201 + if self.data.failed_resource in url: + status_code = 500 + return_object = self.data.job_list[2] + elif payload: + payload = ast.literal_eval(payload) + if self.data.failed_resource in payload.values(): + status_code = 500 + return_object = self.data.job_list[2] + if payload.get('executionOption'): + status_code = 202 + return status_code, return_object + + def _delete(self, url): + if self.data.failed_resource in url: + status_code = 500 + return_object = self.data.job_list[2] + else: + status_code = 204 + return_object = None + return status_code, return_object + + def session(self): + return FakeRequestsSession() + + +class FakeConfiguration(object): + + def __init__(self, emc_file=None, volume_backend_name=None, + intervals=0, retries=0): + self.cinder_dell_emc_config_file = emc_file + self.intervals = intervals + self.retries = retries + self.volume_backend_name = volume_backend_name + self.config_group = volume_backend_name + + def safe_get(self, key): + try: + return getattr(self, key) + except Exception: + return None + + def append_config_values(self, values): + pass + + +class FakeXML(object): + + def __init__(self): + """""" + self.tempdir = tempfile.mkdtemp() + self.data = VMAXCommonData() + + def create_fake_config_file(self, config_group, portgroup, + ssl_verify=False): + + doc = minidom.Document() + emc = doc.createElement("EMC") + doc.appendChild(emc) + doc = self.add_array_info(doc, emc, portgroup, ssl_verify) + filename = 'cinder_dell_emc_config_%s.xml' % config_group + config_file_path = self.tempdir + '/' + filename + + f = open(config_file_path, 'w') + doc.writexml(f) + f.close() + return config_file_path + + def add_array_info(self, doc, emc, portgroup_name, ssl_verify): + array = doc.createElement("Array") + arraytext = doc.createTextNode(self.data.array) + emc.appendChild(array) + array.appendChild(arraytext) + + ecomserverip = doc.createElement("RestServerIp") + ecomserveriptext = doc.createTextNode("1.1.1.1") + emc.appendChild(ecomserverip) + ecomserverip.appendChild(ecomserveriptext) + + ecomserverport = doc.createElement("RestServerPort") + ecomserverporttext = doc.createTextNode("8443") + emc.appendChild(ecomserverport) + ecomserverport.appendChild(ecomserverporttext) + + ecomusername = doc.createElement("RestUserName") + ecomusernametext = doc.createTextNode("smc") + emc.appendChild(ecomusername) + ecomusername.appendChild(ecomusernametext) + + ecompassword = doc.createElement("RestPassword") + ecompasswordtext = doc.createTextNode("smc") + emc.appendChild(ecompassword) + ecompassword.appendChild(ecompasswordtext) + + portgroup = doc.createElement("PortGroup") + portgrouptext = doc.createTextNode(portgroup_name) + portgroup.appendChild(portgrouptext) + + portgroups = doc.createElement("PortGroups") + portgroups.appendChild(portgroup) + emc.appendChild(portgroups) + + srp = doc.createElement("SRP") + srptext = doc.createTextNode("SRP_1") + emc.appendChild(srp) + srp.appendChild(srptext) + + if ssl_verify: + restcert = doc.createElement("SSLCert") + restcerttext = doc.createTextNode("/path/cert.crt") + emc.appendChild(restcert) + restcert.appendChild(restcerttext) + + restverify = doc.createElement("SSLVerify") + restverifytext = doc.createTextNode("/path/cert.pem") + emc.appendChild(restverify) + restverify.appendChild(restverifytext) + return doc + + +class VMAXUtilsTest(test.TestCase): + def setUp(self): + self.data = VMAXCommonData() + + super(VMAXUtilsTest, self).setUp() + config_group = 'UtilsTests' + fake_xml = FakeXML().create_fake_config_file( + config_group, self.data.port_group_name_i, True) + configuration = FakeConfiguration(fake_xml, config_group) + rest.VMAXRest._establish_rest_session = mock.Mock( + return_value=FakeRequestsSession()) + driver = iscsi.VMAXISCSIDriver(configuration=configuration) + self.driver = driver + self.common = self.driver.common + self.utils = self.common.utils + + def test_get_volumetype_extra_specs(self): + with mock.patch.object(volume_types, 'get_volume_type_extra_specs', + return_value={'specs'}) as type_mock: + # path 1: volume_type_id not passed in + self.utils.get_volumetype_extra_specs(self.data.test_volume) + volume_types.get_volume_type_extra_specs.assert_called_once_with( + self.data.test_volume.volume_type_id) + type_mock.reset_mock() + # path 2: volume_type_id passed in + self.utils.get_volumetype_extra_specs(self.data.test_volume, '123') + volume_types.get_volume_type_extra_specs.assert_called_once_with( + '123') + type_mock.reset_mock() + # path 3: no type_id + self.utils.get_volumetype_extra_specs(self.data.test_clone_volume) + (volume_types.get_volume_type_extra_specs. + assert_not_called()) + + def test_get_volumetype_extra_specs_exception(self): + extra_specs = self.utils.get_volumetype_extra_specs( + {'name': 'no_type_id'}) + self.assertEqual({}, extra_specs) + + def test_get_random_portgroup(self): + # 4 portgroups + data = ("\n\n" + "" + "OS-PG1\n" + "OS-PG2\n" + "OS-PG3\n" + "OS-PG4\n" + "" + "") + dom = minidom.parseString(data) + portgroup = self.utils._get_random_portgroup(dom) + self.assertIn('OS-PG', portgroup) + + # Duplicate portgroups + data = ("\n\n" + "" + "OS-PG1\n" + "OS-PG1\n" + "OS-PG1\n" + "OS-PG2\n" + "" + "") + dom = minidom.parseString(data) + portgroup = self.utils._get_random_portgroup(dom) + self.assertIn('OS-PG', portgroup) + + def test_get_random_portgroup_none(self): + # Missing PortGroup tag + data = ("\n\n" + "") + dom = minidom.parseString(data) + self.assertIsNone(self.utils._get_random_portgroup(dom)) + + # Missing portgroups + data = ("\n\n" + "" + "" + "") + dom = minidom.parseString(data) + self.assertIsNone(self.utils._get_random_portgroup(dom)) + + def test_get_host_short_name(self): + host_under_16_chars = 'host_13_chars' + host1 = self.utils.get_host_short_name( + host_under_16_chars) + self.assertEqual(host_under_16_chars, host1) + + host_over_16_chars = ( + 'host_over_16_chars_host_over_16_chars_host_over_16_chars') + # Check that the same md5 value is retrieved from multiple calls + host2 = self.utils.get_host_short_name( + host_over_16_chars) + host3 = self.utils.get_host_short_name( + host_over_16_chars) + self.assertEqual(host2, host3) + host_with_period = 'hostname.with.many.parts' + ref_host_name = self.utils.generate_unique_trunc_host('hostname') + host4 = self.utils.get_host_short_name(host_with_period) + self.assertEqual(ref_host_name, host4) + + def test_get_volume_element_name(self): + volume_id = 'ea95aa39-080b-4f11-9856-a03acf9112ad' + volume_element_name = self.utils.get_volume_element_name(volume_id) + expect_vol_element_name = ('OS-' + volume_id) + self.assertEqual(expect_vol_element_name, volume_element_name) + + def test_parse_file_to_get_array_map(self): + kwargs = ( + {'RestServerIp': '1.1.1.1', + 'RestServerPort': '8443', + 'RestUserName': 'smc', + 'RestPassword': 'smc', + 'SSLCert': '/path/cert.crt', + 'SSLVerify': '/path/cert.pem', + 'SerialNumber': self.data.array, + 'srpName': 'SRP_1', + 'PortGroup': self.data.port_group_name_i}) + array_info = self.utils.parse_file_to_get_array_map( + self.common.configuration.cinder_dell_emc_config_file) + self.assertEqual(kwargs, array_info) + + @mock.patch.object(utils.VMAXUtils, + '_get_connection_info') + @mock.patch.object(utils.VMAXUtils, + '_get_random_portgroup') + def test_parse_file_to_get_array_map_errors(self, mock_port, mock_conn): + tempdir = tempfile.mkdtemp() + doc = minidom.Document() + emc = doc.createElement("EMC") + doc.appendChild(emc) + filename = 'cinder_dell_emc_config_%s.xml' % 'fake_xml' + config_file_path = tempdir + '/' + filename + f = open(config_file_path, 'w') + doc.writexml(f) + f.close() + array_info = self.utils.parse_file_to_get_array_map( + config_file_path) + self.assertIsNone(array_info['SerialNumber']) + + def test_parse_file_to_get_array_map_conn_errors(self): + tempdir = tempfile.mkdtemp() + doc = minidom.Document() + emc = doc.createElement("EMC") + doc.appendChild(emc) + filename = 'cinder_dell_emc_config_%s.xml' % 'fake_xml' + config_file_path = tempdir + '/' + filename + f = open(config_file_path, 'w') + doc.writexml(f) + f.close() + self.assertRaises(exception.VolumeBackendAPIException, + self.utils.parse_file_to_get_array_map, + config_file_path) + + def test_truncate_string(self): + # string is less than max number + str_to_truncate = 'string' + response = self.utils.truncate_string(str_to_truncate, 10) + self.assertEqual(str_to_truncate, response) + + def test_override_ratio(self): + max_over_sub_ratio = 30 + max_sub_ratio_from_per = 40 + ratio = self.utils.override_ratio( + max_over_sub_ratio, max_sub_ratio_from_per) + self.assertEqual(max_sub_ratio_from_per, ratio) + ratio2 = self.utils.override_ratio( + None, max_sub_ratio_from_per) + self.assertEqual(max_sub_ratio_from_per, ratio2) + + def test_get_default_storage_group_name_slo_workload(self): + srp_name = self.data.srp + slo = self.data.slo + workload = self.data.workload + sg_name = self.utils.get_default_storage_group_name( + srp_name, slo, workload) + self.assertEqual(self.data.defaultstoragegroup_name, sg_name) + + def test_get_default_storage_group_name_no_slo(self): + srp_name = self.data.srp + slo = None + workload = None + sg_name = self.utils.get_default_storage_group_name( + srp_name, slo, workload) + self.assertEqual(self.data.default_sg_no_slo, sg_name) + + def test_get_time_delta(self): + start_time = 1487781721.09 + end_time = 1487781758.16 + delta = end_time - start_time + ref_delta = six.text_type(datetime.timedelta(seconds=int(delta))) + time_delta = self.utils.get_time_delta(start_time, end_time) + self.assertEqual(ref_delta, time_delta) + + def test_get_short_protocol_type(self): + # iscsi + short_i_protocol = self.utils.get_short_protocol_type('iscsi') + self.assertEqual('I', short_i_protocol) + # fc + short_f_protocol = self.utils.get_short_protocol_type('FC') + self.assertEqual('F', short_f_protocol) + # else + other_protocol = self.utils.get_short_protocol_type('OTHER') + self.assertEqual('OTHER', other_protocol) + + def test_get_temp_snap_name(self): + clone_name = "12345" + source_device_id = "00001" + ref_name = "temp-00001-12345" + snap_name = self.utils.get_temp_snap_name( + clone_name, source_device_id) + self.assertEqual(ref_name, snap_name) + + def test_get_array_and_device_id(self): + volume = deepcopy(self.data.test_volume) + external_ref = {u'source-name': u'00002'} + array, device_id = self.utils.get_array_and_device_id( + volume, external_ref) + self.assertEqual(self.data.array, array) + self.assertEqual('00002', device_id) + + def test_get_array_and_device_id_exception(self): + volume = deepcopy(self.data.test_volume) + external_ref = {u'source-name': None} + self.assertRaises(exception.VolumeBackendAPIException, + self.utils.get_array_and_device_id, + volume, external_ref) + + def test_get_pg_short_name(self): + pg_under_12_chars = 'pg_11_chars' + pg1 = self.utils.get_pg_short_name(pg_under_12_chars) + self.assertEqual(pg_under_12_chars, pg1) + + pg_over_12_chars = 'portgroup_over_12_characters' + # Check that the same md5 value is retrieved from multiple calls + pg2 = self.utils.get_pg_short_name(pg_over_12_chars) + pg3 = self.utils.get_pg_short_name(pg_over_12_chars) + self.assertEqual(pg2, pg3) + + +class VMAXRestTest(test.TestCase): + def setUp(self): + self.data = VMAXCommonData() + + super(VMAXRestTest, self).setUp() + config_group = 'RestTests' + fake_xml = FakeXML().create_fake_config_file( + config_group, self.data.port_group_name_f) + configuration = FakeConfiguration(fake_xml, config_group) + rest.VMAXRest._establish_rest_session = mock.Mock( + return_value=FakeRequestsSession()) + driver = fc.VMAXFCDriver(configuration=configuration) + self.driver = driver + self.common = self.driver.common + self.rest = self.common.rest + self.utils = self.common.utils + + def test_rest_request_exception(self): + sc, msg = self.rest.request('/fake_url', 'TIMEOUT') + self.assertIsNone(sc) + self.assertIsNone(msg) + self.assertRaises(exception.VolumeBackendAPIException, + self.rest.request, '', 'EXCEPTION') + + def test_wait_for_job_complete(self): + rc, job, status, task = self.rest.wait_for_job_complete( + {'status': 'created', 'jobId': '12345'}, self.data.extra_specs) + self.assertEqual(0, rc) + + def test_wait_for_job_complete_failed(self): + with mock.patch.object(self.rest, '_is_job_finished', + side_effect=exception.BadHTTPResponseStatus): + self.assertRaises(exception.VolumeBackendAPIException, + self.rest.wait_for_job_complete, + self.data.job_list[0], self.data.extra_specs) + + def test_is_job_finished_false(self): + job_id = "55555" + complete, response, rc, status, task = self.rest._is_job_finished( + job_id) + self.assertFalse(complete) + + def test_is_job_finished_failed(self): + job_id = "55555" + complete, response, rc, status, task = self.rest._is_job_finished( + job_id) + self.assertFalse(complete) + with mock.patch.object(self.rest, 'request', + return_value=(200, {'status': 'FAILED'})): + complete, response, rc, status, task = ( + self.rest._is_job_finished(job_id)) + self.assertTrue(complete) + self.assertEqual(-1, rc) + + def test_check_status_code_success(self): + status_code = 200 + self.rest.check_status_code_success( + 'test success', status_code, "") + + def test_check_status_code_not_success(self): + status_code = 500 + self.assertRaises(exception.VolumeBackendAPIException, + self.rest.check_status_code_success, + 'test exception', status_code, "") + + def test_wait_for_job_success(self): + operation = 'test' + status_code = 202 + job = self.data.job_list[0] + extra_specs = self.data.extra_specs + self.rest.wait_for_job( + operation, status_code, job, extra_specs) + + def test_wait_for_job_failed(self): + operation = 'test' + status_code = 202 + job = self.data.job_list[2] + extra_specs = self.data.extra_specs + with mock.patch.object(self.rest, 'wait_for_job_complete', + return_value=(-1, '', '', '')): + self.assertRaises(exception.VolumeBackendAPIException, + self.rest.wait_for_job, + operation, status_code, job, extra_specs) + + def test_get_resource_present(self): + array = self.data.array + category = 'sloprovisioning' + resource_type = 'storagegroup' + resource = self.rest.get_resource(array, category, resource_type) + self.assertEqual(self.data.sg_list, resource) + + def test_get_resource_not_present(self): + array = self.data.array + category = 'sloprovisioning' + resource_type = self.data.failed_resource + resource = self.rest.get_resource(array, category, resource_type) + self.assertIsNone(resource) + + def test_create_resource_success(self): + array = self.data.array + category = '' + resource_type = '' + payload = {'someKey': 'someValue'} + status_code, message = self.rest.create_resource( + array, category, resource_type, payload) + self.assertEqual(self.data.job_list[0], message) + + def test_create_resource_failed(self): + array = self.data.array + category = '' + resource_type = '' + payload = {'someKey': self.data.failed_resource} + self.assertRaises( + exception.VolumeBackendAPIException, + self.rest.create_resource, array, category, + resource_type, payload) + + def test_modify_resource(self): + array = self.data.array + category = '' + resource_type = '' + payload = {'someKey': 'someValue'} + status_code, message = self.rest.modify_resource( + array, category, resource_type, payload) + self.assertEqual(self.data.job_list[0], message) + + def test_modify_resource_failed(self): + array = self.data.array + category = '' + resource_type = '' + payload = {'someKey': self.data.failed_resource} + self.assertRaises( + exception.VolumeBackendAPIException, + self.rest.modify_resource, array, category, + resource_type, payload) + + def test_delete_resource(self): + operation = 'delete res resource' + status_code = 204 + message = None + array = self.data.array + category = 'cat' + resource_type = 'res' + resource_name = 'name' + with mock.patch.object(self.rest, 'check_status_code_success'): + self.rest.delete_resource( + array, category, resource_type, resource_name) + self.rest.check_status_code_success.assert_called_with( + operation, status_code, message) + + def test_delete_resource_failed(self): + array = self.data.array + category = self.data.failed_resource + resource_type = self.data.failed_resource + resource_name = self.data.failed_resource + self.assertRaises( + exception.VolumeBackendAPIException, + self.rest.modify_resource, array, category, + resource_type, resource_name) + + def test_get_array_serial(self): + ref_details = self.data.symmetrix + array_details = self.rest.get_array_serial(self.data.array) + self.assertEqual(ref_details, array_details) + + def test_get_array_serial_failed(self): + array_details = self.rest.get_array_serial(self.data.failed_resource) + self.assertIsNone(array_details) + + def test_get_srp_by_name(self): + ref_details = self.data.srp_details + srp_details = self.rest.get_srp_by_name( + self.data.array, self.data.srp) + self.assertEqual(ref_details, srp_details) + + def test_get_slo_list(self): + ref_settings = self.data.slo_details['sloId'] + slo_settings = self.rest.get_slo_list(self.data.array) + self.assertEqual(ref_settings, slo_settings) + + def test_get_workload_settings(self): + ref_settings = self.data.workloadtype['workloadId'] + wl_settings = self.rest.get_workload_settings( + self.data.array) + self.assertEqual(ref_settings, wl_settings) + + def test_get_workload_settings_failed(self): + wl_settings = self.rest.get_workload_settings( + self.data.failed_resource) + self.assertFalse(wl_settings) + + def test_get_headroom_capacity(self): + ref_headroom = self.data.headroom['headroom'][0]['headroomCapacity'] + headroom_cap = self.rest.get_headroom_capacity( + self.data.array, self.data.srp, + self.data.slo, self.data.workload) + self.assertEqual(ref_headroom, headroom_cap) + + def test_get_headroom_capacity_failed(self): + headroom_cap = self.rest.get_headroom_capacity( + self.data.failed_resource, self.data.srp, + self.data.slo, self.data.workload) + self.assertIsNone(headroom_cap) + + def test_get_storage_group(self): + ref_details = self.data.sg_details[0] + sg_details = self.rest.get_storage_group( + self.data.array, self.data.defaultstoragegroup_name) + self.assertEqual(ref_details, sg_details) + + def test_get_storage_group_list(self): + ref_details = self.data.sg_list['storageGroupId'] + sg_list = self.rest.get_storage_group_list( + self.data.array, {}) + self.assertEqual(ref_details, sg_list) + + def test_get_storage_group_list_none(self): + with mock.patch.object(self.rest, 'get_resource', return_value=None): + sg_list = self.rest.get_storage_group_list( + self.data.array, {}) + self.assertFalse(sg_list) + + def test_create_storage_group(self): + with mock.patch.object(self.rest, 'create_resource'): + payload = {'someKey': 'someValue'} + self.rest._create_storagegroup(self.data.array, payload) + self.rest.create_resource.assert_called_once_with( + self.data.array, 'sloprovisioning', 'storagegroup', payload) + + def test_create_storage_group_success(self): + sg_name = self.rest.create_storage_group( + self.data.array, self.data.storagegroup_name_f, self.data.srp, + self.data.slo, self.data.workload, self.data.extra_specs) + self.assertEqual(self.data.storagegroup_name_f, sg_name) + + def test_create_storage_group_failed(self): + self.assertRaises( + exception.VolumeBackendAPIException, + self.rest.create_storage_group, self.data.array, + self.data.failed_resource, self.data.srp, self.data.slo, + self.data.workload, self.data.extra_specs) + + def test_create_storage_group_no_slo(self): + sg_name = self.rest.create_storage_group( + self.data.array, self.data.default_sg_no_slo, self.data.srp, + None, None, self.data.extra_specs) + self.assertEqual(self.data.default_sg_no_slo, sg_name) + + def test_modify_storage_group(self): + array = self.data.array + storagegroup = self.data.defaultstoragegroup_name + payload = {'someKey': 'someValue'} + with mock.patch.object(self.rest, 'modify_resource'): + self.rest.modify_storage_group(array, storagegroup, payload) + self.rest.modify_resource.assert_called_once_with( + self.data.array, 'sloprovisioning', 'storagegroup', + payload, resource_name=storagegroup) + + def test_create_volume_from_sg_success(self): + volume_name = self.data.volume_details[0]['volume_identifier'] + ref_dict = self.data.provider_location + volume_dict = self.rest.create_volume_from_sg( + self.data.array, volume_name, self.data.defaultstoragegroup_name, + self.data.test_volume.size, self.data.extra_specs) + self.assertEqual(ref_dict, volume_dict) + + def test_create_volume_from_sg_failed(self): + volume_name = self.data.volume_details[0]['volume_identifier'] + self.assertRaises( + exception.VolumeBackendAPIException, + self.rest.create_volume_from_sg, self.data.array, + volume_name, self.data.failed_resource, + self.data.test_volume.size, self.data.extra_specs) + + def test_create_volume_from_sg_cannot_retrieve_device_id(self): + with mock.patch.object(self.rest, 'find_volume_device_id', + return_value=None): + volume_name = self.data.volume_details[0]['volume_identifier'] + self.assertRaises( + exception.VolumeBackendAPIException, + self.rest.create_volume_from_sg, self.data.array, + volume_name, self.data.failed_resource, + self.data.test_volume.size, self.data.extra_specs) + + def test_add_vol_to_sg_success(self): + operation = 'Add volume to sg' + status_code = 202 + message = self.data.job_list[0] + with mock.patch.object(self.rest, 'wait_for_job'): + device_id = self.data.volume_details[0]['volumeId'] + self.rest.add_vol_to_sg( + self.data.array, self.data.storagegroup_name_f, device_id, + self.data.extra_specs) + self.rest.wait_for_job.assert_called_with( + operation, status_code, message, self.data.extra_specs) + + def test_add_vol_to_sg_failed(self): + device_id = [self.data.volume_details[0]['volumeId']] + self.assertRaises( + exception.VolumeBackendAPIException, + self.rest.add_vol_to_sg, self.data.array, + self.data.failed_resource, device_id, + self.data.extra_specs) + + def test_remove_vol_from_sg_success(self): + operation = 'Remove vol from sg' + status_code = 202 + message = self.data.job_list[0] + with mock.patch.object(self.rest, 'wait_for_job'): + device_id = self.data.volume_details[0]['volumeId'] + self.rest.remove_vol_from_sg( + self.data.array, self.data.storagegroup_name_f, device_id, + self.data.extra_specs) + self.rest.wait_for_job.assert_called_with( + operation, status_code, message, self.data.extra_specs) + + def test_remove_vol_from_sg_failed(self): + device_id = [self.data.volume_details[0]['volumeId']] + self.assertRaises( + exception.VolumeBackendAPIException, + self.rest.remove_vol_from_sg, self.data.array, + self.data.failed_resource, device_id, + self.data.extra_specs) + + def test_get_vmax_default_storage_group(self): + ref_storage_group = self.data.sg_details[0] + ref_sg_name = self.data.defaultstoragegroup_name + storagegroup, storagegroup_name = ( + self.rest.get_vmax_default_storage_group( + self.data.array, self.data.srp, + self.data.slo, self.data.workload)) + self.assertEqual(ref_sg_name, storagegroup_name) + self.assertEqual(ref_storage_group, storagegroup) + + def test_delete_storage_group(self): + operation = 'delete storagegroup resource' + status_code = 204 + message = None + with mock.patch.object(self.rest, 'check_status_code_success'): + self.rest.delete_storage_group( + self.data.array, self.data.storagegroup_name_f) + self.rest.check_status_code_success.assert_called_with( + operation, status_code, message) + + def test_is_child_sg_in_parent_sg(self): + is_child1 = self.rest.is_child_sg_in_parent_sg( + self.data.array, self.data.storagegroup_name_f, + self.data.parent_sg_f) + is_child2 = self.rest.is_child_sg_in_parent_sg( + self.data.array, self.data.defaultstoragegroup_name, + self.data.parent_sg_f) + self.assertTrue(is_child1) + self.assertFalse(is_child2) + + def test_add_child_sg_to_parent_sg(self): + payload = {"editStorageGroupActionParam": { + "expandStorageGroupParam": { + "addExistingStorageGroupParam": { + "storageGroupId": [self.data.storagegroup_name_f]}}}} + with mock.patch.object(self.rest, 'modify_storage_group', + return_value=(202, self.data.job_list[0])): + self.rest.add_child_sg_to_parent_sg( + self.data.array, self.data.storagegroup_name_f, + self.data.parent_sg_f, self.data.extra_specs) + self.rest.modify_storage_group.assert_called_once_with( + self.data.array, self.data.parent_sg_f, payload) + + def test_remove_child_sg_from_parent_sg(self): + payload = {"editStorageGroupActionParam": { + "removeStorageGroupParam": { + "storageGroupId": [self.data.storagegroup_name_f], + "force": 'true'}}} + with mock.patch.object(self.rest, 'modify_storage_group', + return_value=(202, self.data.job_list[0])): + self.rest.remove_child_sg_from_parent_sg( + self.data.array, self.data.storagegroup_name_f, + self.data.parent_sg_f, self.data.extra_specs) + self.rest.modify_storage_group.assert_called_once_with( + self.data.array, self.data.parent_sg_f, payload) + + def test_get_volume_list(self): + ref_volumes = ['00001', '00002'] + volumes = self.rest.get_volume_list(self.data.array, {}) + self.assertEqual(ref_volumes, volumes) + + def test_get_volume(self): + ref_volumes = self.data.volume_details[0] + device_id = self.data.volume_details[0]['volumeId'] + volumes = self.rest.get_volume(self.data.array, device_id) + self.assertEqual(ref_volumes, volumes) + + def test_get_private_volume(self): + device_id = self.data.volume_details[0]['volumeId'] + ref_volume = self.data.private_vol_details['resultList']['result'][0] + volume = self.rest._get_private_volume(self.data.array, device_id) + self.assertEqual(ref_volume, volume) + + def test_get_private_volume_exception(self): + device_id = self.data.volume_details[0]['volumeId'] + with mock.patch.object(self.rest, 'get_resource', + return_value={}): + self.assertRaises(exception.VolumeBackendAPIException, + self.rest._get_private_volume, + self.data.array, device_id) + + def test_modify_volume_success(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + payload = {'someKey': 'someValue'} + with mock.patch.object(self.rest, 'modify_resource'): + self.rest._modify_volume(array, device_id, payload) + self.rest.modify_resource.assert_called_once_with( + self.data.array, 'sloprovisioning', 'volume', + payload, resource_name=device_id) + + def test_modify_volume_failed(self): + payload = {'someKey': self.data.failed_resource} + device_id = self.data.volume_details[0]['volumeId'] + self.assertRaises( + exception.VolumeBackendAPIException, + self.rest._modify_volume, self.data.array, + device_id, payload) + + def test_extend_volume(self): + device_id = self.data.volume_details[0]['volumeId'] + new_size = '3' + extend_vol_payload = {"executionOption": "ASYNCHRONOUS", + "editVolumeActionParam": { + "expandVolumeParam": { + "volumeAttribute": { + "volume_size": new_size, + "capacityUnit": "GB"}}}} + with mock.patch.object(self.rest, '_modify_volume', + return_value=(202, self.data.job_list[0])): + self.rest.extend_volume(self.data.array, device_id, new_size, + self.data.extra_specs) + self.rest._modify_volume.assert_called_once_with( + self.data.array, device_id, extend_vol_payload) + + def test_delete_volume(self): + device_id = self.data.volume_details[0]['volumeId'] + deallocate_vol_payload = {"editVolumeActionParam": { + "freeVolumeParam": {"free_volume": 'true'}}} + with mock.patch.object(self.rest, 'delete_resource'): + with mock.patch.object(self.rest, '_modify_volume'): + self.rest.delete_volume(self.data.array, device_id) + self.rest._modify_volume.assert_called_once_with( + self.data.array, device_id, deallocate_vol_payload) + self.rest.delete_resource.assert_called_once_with( + self.data.array, 'sloprovisioning', 'volume', device_id) + + def test_rename_volume(self): + device_id = self.data.volume_details[0]['volumeId'] + payload = {"editVolumeActionParam": { + "modifyVolumeIdentifierParam": { + "volumeIdentifier": { + "identifier_name": 'new_name', + "volumeIdentifierChoice": "identifier_name"}}}} + with mock.patch.object(self.rest, '_modify_volume'): + self.rest.rename_volume(self.data.array, device_id, 'new_name') + self.rest._modify_volume.assert_called_once_with( + self.data.array, device_id, payload) + + def test_find_mv_connections_for_vol(self): + device_id = self.data.volume_details[0]['volumeId'] + ref_lun_id = int((self.data.maskingview[0]['maskingViewConnection'] + [0]['host_lun_address']), 16) + host_lun_id = self.rest.find_mv_connections_for_vol( + self.data.array, self.data.masking_view_name_f, device_id) + self.assertEqual(ref_lun_id, host_lun_id) + + def test_find_mv_connections_for_vol_failed(self): + # no masking view info retrieved + device_id = self.data.volume_details[0]['volumeId'] + host_lun_id = self.rest.find_mv_connections_for_vol( + self.data.array, self.data.failed_resource, device_id) + self.assertIsNone(host_lun_id) + # no connection info received + with mock.patch.object(self.rest, 'get_resource', + return_value={'no_conn': 'no_info'}): + host_lun_id2 = self.rest.find_mv_connections_for_vol( + self.data.array, self.data.masking_view_name_f, device_id) + self.assertIsNone(host_lun_id2) + + def test_get_storage_groups_from_volume(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + ref_list = self.data.volume_details[0]['storageGroupId'] + sg_list = self.rest.get_storage_groups_from_volume(array, device_id) + self.assertEqual(ref_list, sg_list) + + def test_get_num_vols_in_sg(self): + num_vol = self.rest.get_num_vols_in_sg( + self.data.array, self.data.defaultstoragegroup_name) + self.assertEqual(2, num_vol) + + def test_get_num_vols_in_sg_no_num(self): + with mock.patch.object(self.rest, 'get_storage_group', + return_value={}): + num_vol = self.rest.get_num_vols_in_sg( + self.data.array, self.data.defaultstoragegroup_name) + self.assertEqual(0, num_vol) + + def test_is_volume_in_storagegroup(self): + # True + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + storagegroup = self.data.defaultstoragegroup_name + is_vol1 = self.rest.is_volume_in_storagegroup( + array, device_id, storagegroup) + # False + with mock.patch.object(self.rest, 'get_storage_groups_from_volume', + return_value=[]): + is_vol2 = self.rest.is_volume_in_storagegroup( + array, device_id, storagegroup) + self.assertTrue(is_vol1) + self.assertFalse(is_vol2) + + def test_find_volume_device_number(self): + array = self.data.array + volume_name = self.data.volume_details[0]['volume_identifier'] + ref_device = self.data.volume_details[0]['volumeId'] + device_number = self.rest.find_volume_device_id(array, volume_name) + self.assertEqual(ref_device, device_number) + + def test_find_volume_device_number_failed(self): + array = self.data.array + with mock.patch.object(self.rest, 'get_volume_list', + return_value=[]): + device_number = self.rest.find_volume_device_id( + array, 'name') + self.assertIsNone(device_number) + + def test_get_volume_success(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + ref_volume = self.data.volume_details[0] + volume = self.rest.get_volume(array, device_id) + self.assertEqual(ref_volume, volume) + + def test_get_volume_failed(self): + array = self.data.array + device_id = self.data.failed_resource + self.assertRaises(exception.VolumeBackendAPIException, + self.rest.get_volume, + array, device_id) + + def test_find_volume_identifier(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + ref_name = self.data.volume_details[0]['volume_identifier'] + vol_name = self.rest.find_volume_identifier(array, device_id) + self.assertEqual(ref_name, vol_name) + + def test_get_volume_size(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + ref_size = self.data.test_volume.size + size = self.rest.get_size_of_device_on_array(array, device_id) + self.assertEqual(ref_size, size) + + def test_get_volume_size_exception(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + with mock.patch.object(self.rest, 'get_volume', + return_value=None): + size = self.rest.get_size_of_device_on_array( + array, device_id) + self.assertIsNone(size) + + def test_get_portgroup(self): + array = self.data.array + pg_name = self.data.port_group_name_f + ref_pg = self.data.portgroup[0] + portgroup = self.rest.get_portgroup(array, pg_name) + self.assertEqual(ref_pg, portgroup) + + def test_get_port_ids(self): + array = self.data.array + pg_name = self.data.port_group_name_f + ref_ports = ["FA-1D:4"] + port_ids = self.rest.get_port_ids(array, pg_name) + self.assertEqual(ref_ports, port_ids) + + def test_get_port_ids_no_portgroup(self): + array = self.data.array + pg_name = self.data.port_group_name_f + with mock.patch.object(self.rest, 'get_portgroup', + return_value=None): + port_ids = self.rest.get_port_ids(array, pg_name) + self.assertFalse(port_ids) + + def test_get_port(self): + array = self.data.array + port_id = "FA-1D:4" + ref_port = self.data.port_list[0] + port = self.rest.get_port(array, port_id) + self.assertEqual(ref_port, port) + + def test_get_iscsi_ip_address_and_iqn(self): + array = self.data.array + port_id = "SE-4E:0" + ref_ip = [self.data.ip] + ref_iqn = self.data.initiator + ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( + array, port_id) + self.assertEqual(ref_ip, ip_addresses) + self.assertEqual(ref_iqn, iqn) + + def test_get_iscsi_ip_address_and_iqn_no_port(self): + array = self.data.array + port_id = "SE-4E:0" + with mock.patch.object(self.rest, 'get_port', return_value=None): + ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( + array, port_id) + self.assertIsNone(ip_addresses) + self.assertIsNone(iqn) + + def test_get_target_wwns(self): + array = self.data.array + pg_name = self.data.port_group_name_f + ref_wwns = [self.data.wwnn1] + target_wwns = self.rest.get_target_wwns(array, pg_name) + self.assertEqual(ref_wwns, target_wwns) + + def test_get_target_wwns_failed(self): + array = self.data.array + pg_name = self.data.port_group_name_f + with mock.patch.object(self.rest, 'get_port', + return_value=None): + target_wwns = self.rest.get_target_wwns(array, pg_name) + self.assertFalse(target_wwns) + + def test_get_initiator_group(self): + array = self.data.array + ig_name = self.data.initiatorgroup_name_f + ref_ig = self.data.inititiatorgroup[0] + response_ig = self.rest.get_initiator_group(array, ig_name) + self.assertEqual(ref_ig, response_ig) + + def test_get_initiator(self): + array = self.data.array + initiator_name = self.data.initiator + ref_initiator = self.data.initiator_list[1] + response_initiator = self.rest.get_initiator(array, initiator_name) + self.assertEqual(ref_initiator, response_initiator) + + def test_get_initiator_list(self): + array = self.data.array + with mock.patch.object(self.rest, 'get_resource', + return_value={'initiatorId': '1234'}): + init_list = self.rest.get_initiator_list(array) + self.assertIsNotNone(init_list) + + def test_get_initiator_list_none(self): + array = self.data.array + with mock.patch.object(self.rest, 'get_resource', return_value={}): + init_list = self.rest.get_initiator_list(array) + self.assertFalse(init_list) + + def test_get_in_use_initiator_list_from_array(self): + ref_list = self.data.initiator_list[2]['initiatorId'] + init_list = self.rest.get_in_use_initiator_list_from_array( + self.data.array) + self.assertEqual(ref_list, init_list) + + def test_get_in_use_initiator_list_from_array_failed(self): + array = self.data.array + with mock.patch.object(self.rest, 'get_initiator_list', + return_value=[]): + init_list = self.rest.get_in_use_initiator_list_from_array(array) + self.assertFalse(init_list) + + def test_get_initiator_group_from_initiator(self): + initiator = self.data.wwpn1 + ref_group = self.data.initiatorgroup_name_f + init_group = self.rest.get_initiator_group_from_initiator( + self.data.array, initiator) + self.assertEqual(ref_group, init_group) + + def test_get_initiator_group_from_initiator_failed(self): + initiator = self.data.wwpn1 + with mock.patch.object(self.rest, 'get_initiator', + return_value=None): + init_group = self.rest.get_initiator_group_from_initiator( + self.data.array, initiator) + self.assertIsNone(init_group) + with mock.patch.object(self.rest, 'get_initiator', + return_value={'name': 'no_host'}): + init_group = self.rest.get_initiator_group_from_initiator( + self.data.array, initiator) + self.assertIsNone(init_group) + + def test_create_initiator_group(self): + init_group_name = self.data.initiatorgroup_name_f + init_list = [self.data.wwpn1] + extra_specs = self.data.extra_specs + with mock.patch.object(self.rest, 'create_resource', + return_value=(202, self.data.job_list[0])): + payload = ({"executionOption": "ASYNCHRONOUS", + "hostId": init_group_name, "initiatorId": init_list}) + self.rest.create_initiator_group( + self.data.array, init_group_name, init_list, extra_specs) + self.rest.create_resource.assert_called_once_with( + self.data.array, 'sloprovisioning', 'host', payload) + + def test_delete_initiator_group(self): + with mock.patch.object(self.rest, 'delete_resource'): + self.rest.delete_initiator_group( + self.data.array, self.data.initiatorgroup_name_f) + self.rest.delete_resource.assert_called_once_with( + self.data.array, 'sloprovisioning', 'host', + self.data.initiatorgroup_name_f) + + def test_get_masking_view(self): + array = self.data.array + masking_view_name = self.data.masking_view_name_f + ref_mask_view = self.data.maskingview[0] + masking_view = self.rest.get_masking_view(array, masking_view_name) + self.assertEqual(ref_mask_view, masking_view) + + def test_get_masking_views_from_storage_group(self): + array = self.data.array + storagegroup_name = self.data.storagegroup_name_f + ref_mask_view = [self.data.masking_view_name_f] + masking_view = self.rest.get_masking_views_from_storage_group( + array, storagegroup_name) + self.assertEqual(ref_mask_view, masking_view) + + def test_get_masking_views_by_initiator_group(self): + array = self.data.array + initiatorgroup_name = self.data.initiatorgroup_name_f + ref_mask_view = [self.data.masking_view_name_f] + masking_view = self.rest.get_masking_views_by_initiator_group( + array, initiatorgroup_name) + self.assertEqual(ref_mask_view, masking_view) + + def test_get_masking_views_by_initiator_group_failed(self): + array = self.data.array + initiatorgroup_name = self.data.initiatorgroup_name_f + with mock.patch.object(self.rest, 'get_initiator_group', + return_value=None): + masking_view = self.rest.get_masking_views_by_initiator_group( + array, initiatorgroup_name) + self.assertFalse(masking_view) + with mock.patch.object(self.rest, 'get_initiator_group', + return_value={'name': 'no_mv'}): + masking_view = self.rest.get_masking_views_by_initiator_group( + array, initiatorgroup_name) + self.assertFalse(masking_view) + + def test_get_element_from_masking_view(self): + array = self.data.array + maskingview_name = self.data.masking_view_name_f + # storage group + ref_sg = self.data.storagegroup_name_f + storagegroup = self.rest.get_element_from_masking_view( + array, maskingview_name, storagegroup=True) + self.assertEqual(ref_sg, storagegroup) + # initiator group + ref_ig = self.data.initiatorgroup_name_f + initiatorgroup = self.rest.get_element_from_masking_view( + array, maskingview_name, host=True) + self.assertEqual(ref_ig, initiatorgroup) + # portgroup + ref_pg = self.data.port_group_name_f + portgroup = self.rest.get_element_from_masking_view( + array, maskingview_name, portgroup=True) + self.assertEqual(ref_pg, portgroup) + + def test_get_element_from_masking_view_failed(self): + array = self.data.array + maskingview_name = self.data.masking_view_name_f + # no element chosen + element = self.rest.get_element_from_masking_view( + array, maskingview_name) + self.assertIsNone(element) + # cannot retrieve maskingview + with mock.patch.object(self.rest, 'get_masking_view', + return_value=None): + self.assertRaises(exception.VolumeBackendAPIException, + self.rest.get_element_from_masking_view, + array, maskingview_name) + + def test_get_common_masking_views(self): + array = self.data.array + initiatorgroup = self.data.initiatorgroup_name_f + portgroup = self.data.port_group_name_f + ref_maskingview = self.data.masking_view_name_f + maskingview_list = self.rest.get_common_masking_views( + array, portgroup, initiatorgroup) + self.assertEqual(ref_maskingview, maskingview_list) + + def test_get_common_masking_views_none(self): + array = self.data.array + initiatorgroup = self.data.initiatorgroup_name_f + portgroup = self.data.port_group_name_f + with mock.patch.object(self.rest, 'get_masking_view_list', + return_value=[]): + maskingview_list = self.rest.get_common_masking_views( + array, portgroup, initiatorgroup) + self.assertFalse(maskingview_list) + + def test_create_masking_view(self): + maskingview_name = self.data.masking_view_name_f + storagegroup_name = self.data.storagegroup_name_f + port_group_name = self.data.port_group_name_f + init_group_name = self.data.initiatorgroup_name_f + extra_specs = self.data.extra_specs + with mock.patch.object(self.rest, 'create_resource', + return_value=(202, self.data.job_list[0])): + payload = ({"executionOption": "ASYNCHRONOUS", + "portGroupSelection": { + "useExistingPortGroupParam": { + "portGroupId": port_group_name}}, + "maskingViewId": maskingview_name, + "hostOrHostGroupSelection": { + "useExistingHostParam": { + "hostId": init_group_name}}, + "storageGroupSelection": { + "useExistingStorageGroupParam": { + "storageGroupId": storagegroup_name}}}) + self.rest.create_masking_view( + self.data.array, maskingview_name, storagegroup_name, + port_group_name, init_group_name, extra_specs) + self.rest.create_resource.assert_called_once_with( + self.data.array, 'sloprovisioning', 'maskingview', payload) + + def test_delete_masking_view(self): + with mock.patch.object(self.rest, 'delete_resource'): + self.rest.delete_masking_view( + self.data.array, self.data.masking_view_name_f) + self.rest.delete_resource.assert_called_once_with( + self.data.array, 'sloprovisioning', 'maskingview', + self.data.masking_view_name_f) + + def test_get_replication_capabilities(self): + ref_response = self.data.capabilities['symmetrixCapability'][1] + capabilities = self.rest.get_replication_capabilities(self.data.array) + self.assertEqual(ref_response, capabilities) + + def test_is_clone_licenced(self): + licence = self.rest.is_snapvx_licensed(self.data.array) + self.assertTrue(licence) + false_response = {'rdfCapable': True, + 'snapVxCapable': False, + 'symmetrixId': '000197800123'} + with mock.patch.object(self.rest, 'get_replication_capabilities', + return_value=false_response): + licence2 = self.rest.is_snapvx_licensed(self.data.array) + self.assertFalse(licence2) + + def test_is_clone_licenced_error(self): + with mock.patch.object(self.rest, 'get_replication_capabilities', + return_value=None): + licence3 = self.rest.is_snapvx_licensed(self.data.array) + self.assertFalse(licence3) + + def test_create_volume_snap(self): + snap_name = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['snapshotName']) + device_id = self.data.volume_details[0]['volumeId'] + extra_specs = self.data.extra_specs + payload = {"deviceNameListSource": [{"name": device_id}], + "bothSides": 'false', "star": 'false', + "force": 'false'} + resource_type = 'snapshot/%(snap)s' % {'snap': snap_name} + with mock.patch.object(self.rest, 'create_resource', + return_value=(202, self.data.job_list[0])): + self.rest.create_volume_snap( + self.data.array, snap_name, device_id, extra_specs) + self.rest.create_resource.assert_called_once_with( + self.data.array, 'replication', resource_type, + payload, private='/private') + + def test_modify_volume_snap(self): + array = self.data.array + source_id = self.data.volume_details[0]['volumeId'] + target_id = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + snap_name = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['snapshotName']) + extra_specs = self.data.extra_specs + payload = {"deviceNameListSource": [{"name": source_id}], + "deviceNameListTarget": [ + {"name": target_id}], + "copy": 'true', "action": "", + "star": 'false', "force": 'false', + "exact": 'false', "remote": 'false', + "symforce": 'false', "nocopy": 'false'} + with mock.patch.object( + self.rest, 'modify_resource', return_value=( + 202, self.data.job_list[0])) as mock_modify: + # link + payload["action"] = "Link" + self.rest.modify_volume_snap( + array, source_id, target_id, snap_name, extra_specs, link=True) + self.rest.modify_resource.assert_called_once_with( + array, 'replication', 'snapshot', payload, + resource_name=snap_name, private='/private') + # unlink + mock_modify.reset_mock() + payload["action"] = "Unlink" + self.rest.modify_volume_snap( + array, source_id, target_id, snap_name, + extra_specs, unlink=True) + self.rest.modify_resource.assert_called_once_with( + array, 'replication', 'snapshot', payload, + resource_name=snap_name, private='/private') + # none selected + mock_modify.reset_mock() + self.rest.modify_volume_snap( + array, source_id, target_id, snap_name, + extra_specs) + self.rest.modify_resource.assert_not_called() + + def test_delete_volume_snap(self): + array = self.data.array + snap_name = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['snapshotName']) + source_device_id = self.data.volume_details[0]['volumeId'] + payload = {"deviceNameListSource": [{"name": source_device_id}]} + with mock.patch.object(self.rest, 'delete_resource'): + self.rest.delete_volume_snap(array, snap_name, source_device_id) + self.rest.delete_resource.assert_called_once_with( + array, 'replication', 'snapshot', snap_name, + payload=payload, private='/private') + + def test_get_volume_snap_info(self): + array = self.data.array + source_device_id = self.data.volume_details[0]['volumeId'] + ref_snap_info = self.data.volume_snap_vx + snap_info = self.rest.get_volume_snap_info(array, source_device_id) + self.assertEqual(ref_snap_info, snap_info) + + def test_get_volume_snap(self): + array = self.data.array + snap_name = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['snapshotName']) + device_id = self.data.volume_details[0]['volumeId'] + ref_snap = self.data.volume_snap_vx['snapshotSrcs'][0] + snap = self.rest.get_volume_snap(array, device_id, snap_name) + self.assertEqual(ref_snap, snap) + + def test_get_volume_snap_none(self): + array = self.data.array + snap_name = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['snapshotName']) + device_id = self.data.volume_details[0]['volumeId'] + with mock.patch.object(self.rest, 'get_volume_snap_info', + return_value=None): + snap = self.rest.get_volume_snap(array, device_id, snap_name) + self.assertIsNone(snap) + with mock.patch.object(self.rest, 'get_volume_snap_info', + return_value={'snapshotSrcs': []}): + snap = self.rest.get_volume_snap(array, device_id, snap_name) + self.assertIsNone(snap) + + def test_is_sync_complete(self): + array = self.data.array + source_id = self.data.volume_details[0]['volumeId'] + target_id = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + snap_name = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['snapshotName']) + extra_specs = self.data.extra_specs + rc = self.rest.is_sync_complete( + array, source_id, target_id, snap_name, extra_specs) + self.assertTrue(rc) + + def test_is_sync_complete_exception(self): + array = self.data.array + source_id = self.data.volume_details[0]['volumeId'] + target_id = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + snap_name = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['snapshotName']) + extra_specs = self.data.extra_specs + with mock.patch.object( + self.rest, '_is_sync_complete', + side_effect=exception.VolumeBackendAPIException): + self.assertRaises(exception.VolumeBackendAPIException, + self.rest.is_sync_complete, array, source_id, + target_id, snap_name, extra_specs) + + def test_get_sync_session(self): + array = self.data.array + source_id = self.data.volume_details[0]['volumeId'] + target_id = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + snap_name = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['snapshotName']) + ref_sync = (self.data.volume_snap_vx + ['snapshotSrcs'][0]['linkedDevices'][0]) + sync = self.rest._get_sync_session( + array, source_id, snap_name, target_id) + self.assertEqual(ref_sync, sync) + + def test_find_snap_vx_sessions(self): + array = self.data.array + source_id = self.data.volume_details[0]['volumeId'] + ref_sessions = [{'snap_name': 'temp-1', + 'source_vol': '00001', + 'target_vol_list': ['00002']}, + {'snap_name': 'temp-1', + 'source_vol': '00001', + 'target_vol_list': ['00002']}] + sessions = self.rest.find_snap_vx_sessions(array, source_id) + self.assertEqual(ref_sessions, sessions) + + def test_find_snap_vx_sessions_tgt_only(self): + array = self.data.array + source_id = self.data.volume_details[0]['volumeId'] + ref_sessions = [{'snap_name': 'temp-1', + 'source_vol': '00001', + 'target_vol_list': ['00002']}] + sessions = self.rest.find_snap_vx_sessions( + array, source_id, tgt_only=True) + self.assertEqual(ref_sessions, sessions) + + +class VMAXProvisionTest(test.TestCase): + def setUp(self): + self.data = VMAXCommonData() + + super(VMAXProvisionTest, self).setUp() + config_group = 'ProvisionTests' + self.fake_xml = FakeXML().create_fake_config_file( + config_group, self.data.port_group_name_i) + configuration = FakeConfiguration(self.fake_xml, config_group) + rest.VMAXRest._establish_rest_session = mock.Mock( + return_value=FakeRequestsSession()) + driver = iscsi.VMAXISCSIDriver(configuration=configuration) + self.driver = driver + self.common = self.driver.common + self.provision = self.common.provision + self.utils = self.common.utils + + def test_create_storage_group(self): + array = self.data.array + storagegroup_name = self.data.storagegroup_name_f + srp = self.data.srp + slo = self.data.slo + workload = self.data.workload + extra_specs = self.data.extra_specs + storagegroup = self.provision.create_storage_group( + array, storagegroup_name, srp, slo, workload, extra_specs) + self.assertEqual(storagegroup_name, storagegroup) + + def test_create_volume_from_sg(self): + array = self.data.array + storagegroup_name = self.data.storagegroup_name_f + volumeId = self.data.test_volume.id + volume_name = self.utils.get_volume_element_name(volumeId) + volume_size = self.data.test_volume.size + extra_specs = self.data.extra_specs + ref_dict = self.data.provider_location + volume_dict = self.provision.create_volume_from_sg( + array, volume_name, storagegroup_name, volume_size, extra_specs) + self.assertEqual(ref_dict, volume_dict) + + def test_delete_volume_from_srp(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + volume_name = self.data.volume_details[0]['volume_identifier'] + with mock.patch.object(self.provision.rest, 'delete_volume'): + self.provision.delete_volume_from_srp( + array, device_id, volume_name) + self.provision.rest.delete_volume.assert_called_once_with( + array, device_id) + + def test_create_volume_snap_vx(self): + array = self.data.array + source_device_id = self.data.volume_details[0]['volumeId'] + snap_name = self.data.snap_location['snap_name'] + extra_specs = self.data.extra_specs + with mock.patch.object(self.provision.rest, 'create_volume_snap'): + self.provision.create_volume_snapvx( + array, source_device_id, snap_name, extra_specs) + self.provision.rest.create_volume_snap.assert_called_once_with( + array, snap_name, source_device_id, extra_specs) + + def test_create_volume_replica_create_snap_true(self): + array = self.data.array + source_device_id = self.data.volume_details[0]['volumeId'] + target_device_id = ( + self.data.volume_snap_vx + ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + snap_name = self.data.snap_location['snap_name'] + extra_specs = self.data.extra_specs + with mock.patch.object(self.provision, 'create_volume_snapvx'): + with mock.patch.object(self.provision.rest, 'modify_volume_snap'): + self.provision.create_volume_replica( + array, source_device_id, target_device_id, + snap_name, extra_specs, create_snap=True) + self.provision.rest.modify_volume_snap.assert_called_once_with( + array, source_device_id, target_device_id, snap_name, + extra_specs, link=True) + self.provision.create_volume_snapvx.assert_called_once_with( + array, source_device_id, snap_name, extra_specs) + + def test_create_volume_replica_create_snap_false(self): + array = self.data.array + source_device_id = self.data.volume_details[0]['volumeId'] + target_device_id = ( + self.data.volume_snap_vx + ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + snap_name = self.data.snap_location['snap_name'] + extra_specs = self.data.extra_specs + with mock.patch.object(self.provision, 'create_volume_snapvx'): + with mock.patch.object(self.provision.rest, 'modify_volume_snap'): + self.provision.create_volume_replica( + array, source_device_id, target_device_id, + snap_name, extra_specs, create_snap=False) + self.provision.rest.modify_volume_snap.assert_called_once_with( + array, source_device_id, target_device_id, snap_name, + extra_specs, link=True) + self.provision.create_volume_snapvx.assert_not_called() + + def test_break_replication_relationship_sync_wait_true(self): + array = self.data.array + source_device_id = self.data.volume_details[0]['volumeId'] + target_device_id = ( + self.data.volume_snap_vx + ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + snap_name = self.data.snap_location['snap_name'] + extra_specs = self.data.extra_specs + with mock.patch.object(self.provision.rest, 'modify_volume_snap'): + with mock.patch.object(self.provision.rest, + 'is_sync_complete'): + self.provision.break_replication_relationship( + array, target_device_id, source_device_id, snap_name, + extra_specs, wait_for_sync=True) + (self.provision.rest.modify_volume_snap. + assert_called_once_with( + array, source_device_id, target_device_id, + snap_name, extra_specs, unlink=True)) + (self.provision.rest.is_sync_complete. + assert_called_once_with( + array, source_device_id, target_device_id, + snap_name, extra_specs)) + + def test_break_replication_relationship_sync_wait_false(self): + array = self.data.array + source_device_id = self.data.volume_details[0]['volumeId'] + target_device_id = ( + self.data.volume_snap_vx + ['snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']) + snap_name = self.data.snap_location['snap_name'] + extra_specs = self.data.extra_specs + with mock.patch.object(self.provision.rest, 'modify_volume_snap'): + with mock.patch.object(self.provision.rest, + 'is_sync_complete'): + self.provision.break_replication_relationship( + array, target_device_id, source_device_id, snap_name, + extra_specs, wait_for_sync=False) + (self.provision.rest.modify_volume_snap. + assert_called_once_with( + array, source_device_id, target_device_id, + snap_name, extra_specs, unlink=True)) + self.provision.rest.is_sync_complete.assert_not_called() + + def test_delete_volume_snap(self): + array = self.data.array + source_device_id = self.data.volume_details[0]['volumeId'] + snap_name = self.data.snap_location['snap_name'] + with mock.patch.object(self.provision.rest, 'delete_volume_snap'): + self.provision.delete_volume_snap( + array, snap_name, source_device_id) + self.provision.rest.delete_volume_snap.assert_called_once_with( + array, snap_name, source_device_id) + + def test_extend_volume(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + new_size = '3' + extra_specs = self.data.extra_specs + with mock.patch.object(self.provision.rest, 'extend_volume'): + self.provision.extend_volume(array, device_id, new_size, + extra_specs) + self.provision.rest.extend_volume.assert_called_once_with( + array, device_id, new_size, extra_specs) + + def test_get_srp_pool_stats_no_wlp(self): + array = self.data.array + array_info = self.common.pool_info['arrays_info'][0] + ref_stats = (self.data.srp_details['total_usable_cap_gb'], + float(self.data.srp_details['total_usable_cap_gb'] + - self.data.srp_details['total_allocated_cap_gb']), + self.data.srp_details['total_subscribed_cap_gb'], + self.data.srp_details['reserved_cap_percent'], False) + with mock.patch.object(self.provision, + '_get_remaining_slo_capacity_wlp', + return_value=-1): + stats = self.provision.get_srp_pool_stats(array, array_info) + self.assertEqual(ref_stats, stats) + + def test_get_srp_pool_stats_wlp_enabled(self): + array = self.data.array + array_info = self.common.pool_info['arrays_info'][0] + srp = self.data.srp + headroom_capacity = self.provision.rest.get_headroom_capacity( + array, srp, array_info['SLO'], array_info['Workload']) + ref_stats = (self.data.srp_details['total_usable_cap_gb'], + float(headroom_capacity + - self.data.srp_details['total_allocated_cap_gb']), + self.data.srp_details['total_subscribed_cap_gb'], + self.data.srp_details['reserved_cap_percent'], True) + stats = self.provision.get_srp_pool_stats(array, array_info) + self.assertEqual(ref_stats, stats) + + def test_get_srp_pool_stats_errors(self): + # cannot retrieve srp + array = self.data.array + array_info = {'srpName': self.data.failed_resource} + ref_stats = (0, 0, 0, 0, False) + stats = self.provision.get_srp_pool_stats(array, array_info) + self.assertEqual(ref_stats, stats) + # cannot report on all stats + with mock.patch.object(self.provision.rest, 'get_srp_by_name', + return_value={'total_usable_cap_gb': 33}): + with mock.patch.object(self.provision, + '_get_remaining_slo_capacity_wlp', + return_value=(-1)): + ref_stats = (33, 0, 0, 0, False) + stats = self.provision.get_srp_pool_stats(array, array_info) + self.assertEqual(ref_stats, stats) + + def test_get_remaining_slo_capacity_wlp(self): + array = self.data.array + array_info = self.common.pool_info['arrays_info'][0] + srp = self.data.srp + ref_capacity = self.provision.rest.get_headroom_capacity( + array, srp, array_info['SLO'], array_info['Workload']) + remaining_capacity = ( + self.provision._get_remaining_slo_capacity_wlp( + array, srp, array_info)) + self.assertEqual(ref_capacity, remaining_capacity) + + def test_get_remaining_slo_capacity_no_slo_or_wlp(self): + array = self.data.array + array_info = self.common.pool_info['arrays_info'][0] + srp = self.data.srp + ref_capacity = -1 + with mock.patch.object(self.provision.rest, 'get_headroom_capacity', + return_value=None): + remaining_capacity = ( + self.provision._get_remaining_slo_capacity_wlp( + array, srp, {'SLO': None})) + self.assertEqual(ref_capacity, remaining_capacity) + self.provision.rest.get_headroom_capacity.assert_not_called() + remaining_capacity = ( + self.provision._get_remaining_slo_capacity_wlp( + array, srp, array_info)) + self.assertEqual(ref_capacity, remaining_capacity) + + def test_verify_slo_workload_true(self): + # with slo and workload + array = self.data.array + slo = self.data.slo + workload = self.data.workload + srp = self.data.srp + valid_slo, valid_workload = self.provision.verify_slo_workload( + array, slo, workload, srp) + self.assertTrue(valid_slo) + self.assertTrue(valid_workload) + # slo and workload = none + slo2 = None + workload2 = None + valid_slo2, valid_workload2 = self.provision.verify_slo_workload( + array, slo2, workload2, srp) + self.assertTrue(valid_slo2) + self.assertTrue(valid_workload2) + slo2 = None + workload2 = 'None' + valid_slo2, valid_workload2 = self.provision.verify_slo_workload( + array, slo2, workload2, srp) + self.assertTrue(valid_slo2) + self.assertTrue(valid_workload2) + + def test_verify_slo_workload_false(self): + # Both wrong + array = self.data.array + slo = 'Diamante' + workload = 'DSSS' + srp = self.data.srp + valid_slo, valid_workload = self.provision.verify_slo_workload( + array, slo, workload, srp) + self.assertFalse(valid_slo) + self.assertFalse(valid_workload) + # Workload set, no slo set + valid_slo, valid_workload = self.provision.verify_slo_workload( + array, None, self.data.workload, srp) + self.assertTrue(valid_slo) + self.assertFalse(valid_workload) + + +class VMAXCommonTest(test.TestCase): + def setUp(self): + self.data = VMAXCommonData() + + super(VMAXCommonTest, self).setUp() + config_group = 'CommonTests' + self.fake_xml = FakeXML().create_fake_config_file( + config_group, self.data.port_group_name_f) + configuration = FakeConfiguration(self.fake_xml, config_group, + 1, 1) + rest.VMAXRest._establish_rest_session = mock.Mock( + return_value=FakeRequestsSession()) + driver = fc.VMAXFCDriver(configuration=configuration) + self.driver = driver + self.common = self.driver.common + self.masking = self.common.masking + self.provision = self.common.provision + self.rest = self.common.rest + self.utils = self.common.utils + self.utils.get_volumetype_extra_specs = ( + mock.Mock(return_value=self.data.vol_type_extra_specs)) + + @mock.patch.object(rest.VMAXRest, + 'set_rest_credentials') + @mock.patch.object(common.VMAXCommon, + '_get_slo_workload_combinations', + return_value=[]) + @mock.patch.object(utils.VMAXUtils, + 'parse_file_to_get_array_map', + return_value=[]) + def test_gather_info_no_opts(self, mock_parse, mock_combo, mock_rest): + configuration = FakeConfiguration(None, 'config_group', None, None) + fc.VMAXFCDriver(configuration=configuration) + + def test_get_slo_workload_combinations_success(self): + array_info = self.utils.parse_file_to_get_array_map( + self.common.pool_info['config_file']) + finalarrayinfolist = self.common._get_slo_workload_combinations( + array_info) + self.assertTrue(len(finalarrayinfolist) > 1) + + def test_get_slo_workload_combinations_failed(self): + array_info = {} + self.assertRaises(exception.VolumeBackendAPIException, + self.common._get_slo_workload_combinations, + array_info) + + def test_create_volume(self): + ref_model_update = ( + {'provider_location': six.text_type(self.data.provider_location)}) + model_update = self.common.create_volume(self.data.test_volume) + self.assertEqual(ref_model_update, model_update) + + def test_create_volume_from_snapshot(self): + ref_model_update = ( + {'provider_location': six.text_type( + self.data.provider_location)}) + model_update = self.common.create_volume_from_snapshot( + self.data.test_clone_volume, self.data.test_snapshot) + self.assertEqual(ref_model_update, model_update) + + def test_cloned_volume(self): + ref_model_update = ( + {'provider_location': six.text_type( + self.data.provider_location)}) + model_update = self.common.create_cloned_volume( + self.data.test_clone_volume, self.data.test_volume) + self.assertEqual(ref_model_update, model_update) + + def test_delete_volume(self): + with mock.patch.object(self.common, '_delete_volume'): + self.common.delete_volume(self.data.test_volume) + self.common._delete_volume.assert_called_once_with( + self.data.test_volume) + + def test_create_snapshot(self): + ref_model_update = ( + {'provider_location': six.text_type( + self.data.snap_location)}) + model_update = self.common.create_snapshot( + self.data.test_snapshot, self.data.test_volume) + self.assertEqual(ref_model_update, model_update) + + def test_delete_snapshot(self): + snap_name = self.data.snap_location['snap_name'] + sourcedevice_id = self.data.snap_location['source_id'] + with mock.patch.object(self.provision, 'delete_volume_snap'): + self.common.delete_snapshot(self.data.test_snapshot, + self.data.test_volume) + self.provision.delete_volume_snap.assert_called_once_with( + self.data.array, snap_name, sourcedevice_id) + + def test_delete_snapshot_not_found(self): + with mock.patch.object(self.common, '_parse_snap_info', + return_value=(None, None)): + with mock.patch.object(self.provision, 'delete_volume_snap'): + self.common.delete_snapshot(self.data.test_snapshot, + self.data.test_volume) + self.provision.delete_volume_snap.assert_not_called() + + def test_remove_members(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + volume = self.data.test_volume + volume_name = self.data.test_volume.name + extra_specs = self.data.extra_specs + with mock.patch.object(self.masking, 'remove_and_reset_members'): + self.common._remove_members(array, volume, device_id, extra_specs) + self.masking.remove_and_reset_members.assert_called_once_with( + array, device_id, volume_name, extra_specs, True) + + def test_unmap_lun(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + volume = self.data.test_volume + extra_specs = deepcopy(self.data.extra_specs_intervals_set) + extra_specs['port_group_name'] = self.data.port_group_name_f + connector = self.data.connector + with mock.patch.object(self.common, '_remove_members'): + self.common._unmap_lun(volume, connector) + self.common._remove_members.assert_called_once_with( + array, volume, device_id, extra_specs) + + def test_unmap_lun_not_mapped(self): + volume = self.data.test_volume + connector = self.data.connector + with mock.patch.object(self.common, 'find_host_lun_id', + return_value={}): + with mock.patch.object(self.common, '_remove_members'): + self.common._unmap_lun(volume, connector) + self.common._remove_members.assert_not_called() + + def test_initialize_connection_already_mapped(self): + volume = self.data.test_volume + connector = self.data.connector + host_lun = (self.data.maskingview[0]['maskingViewConnection'][0] + ['host_lun_address']) + ref_dict = {'hostlunid': int(host_lun, 16), + 'maskingview': self.data.masking_view_name_f, + 'array': self.data.array} + device_info_dict = self.common.initialize_connection(volume, connector) + self.assertEqual(ref_dict, device_info_dict) + + def test_initialize_connection_not_mapped(self): + volume = self.data.test_volume + connector = self.data.connector + extra_specs = deepcopy(self.data.extra_specs_intervals_set) + extra_specs['port_group_name'] = self.data.port_group_name_f + masking_view_dict = self.common._populate_masking_dict( + volume, connector, extra_specs) + with mock.patch.object(self.common, 'find_host_lun_id', + return_value={}): + with mock.patch.object( + self.common, '_attach_volume', return_value=( + {}, self.data.port_group_name_f)): + device_info_dict = self.common.initialize_connection(volume, + connector) + self.assertEqual({}, device_info_dict) + self.common._attach_volume.assert_called_once_with( + volume, connector, extra_specs, masking_view_dict) + + def test_attach_volume_success(self): + volume = self.data.test_volume + connector = self.data.connector + extra_specs = deepcopy(self.data.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + masking_view_dict = self.common._populate_masking_dict( + volume, connector, extra_specs) + host_lun = (self.data.maskingview[0]['maskingViewConnection'][0] + ['host_lun_address']) + ref_dict = {'hostlunid': int(host_lun, 16), + 'maskingview': self.data.masking_view_name_f, + 'array': self.data.array} + with mock.patch.object(self.masking, 'setup_masking_view', + return_value={ + 'port_group_name': + self.data.port_group_name_f}): + device_info_dict, pg = self.common._attach_volume( + volume, connector, extra_specs, masking_view_dict) + self.assertEqual(ref_dict, device_info_dict) + + def test_attach_volume_failed(self): + volume = self.data.test_volume + connector = self.data.connector + extra_specs = deepcopy(self.data.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + masking_view_dict = self.common._populate_masking_dict( + volume, connector, extra_specs) + with mock.patch.object(self.masking, 'setup_masking_view', + return_value={}): + with mock.patch.object(self.common, 'find_host_lun_id', + return_value={}): + with mock.patch.object( + self.masking, + 'check_if_rollback_action_for_masking_required'): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._attach_volume, volume, + connector, extra_specs, + masking_view_dict) + device_id = self.data.volume_details[0]['volumeId'] + (self.masking. + check_if_rollback_action_for_masking_required. + assert_called_once_with(self.data.array, device_id, {})) + + def test_terminate_connection(self): + volume = self.data.test_volume + connector = self.data.connector + with mock.patch.object(self.common, '_unmap_lun'): + self.common.terminate_connection(volume, connector) + self.common._unmap_lun.assert_called_once_with( + volume, connector) + + def test_extend_volume_success(self): + volume = self.data.test_volume + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + new_size = self.data.test_volume.size + ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) + ref_extra_specs['port_group_name'] = self.data.port_group_name_f + with mock.patch.object(self.common, '_sync_check'): + with mock.patch.object(self.provision, 'extend_volume'): + self.common.extend_volume(volume, new_size) + self.provision.extend_volume.assert_called_once_with( + array, device_id, new_size, ref_extra_specs) + + def test_extend_volume_failed_snap_src(self): + volume = self.data.test_volume + new_size = self.data.test_volume.size + with mock.patch.object(self.rest, 'is_vol_in_rep_session', + return_value=(False, True, None)): + self.assertRaises(exception.VolumeBackendAPIException, + self.common.extend_volume, volume, new_size) + + def test_extend_volume_failed_no_device_id(self): + volume = self.data.test_volume + new_size = self.data.test_volume.size + with mock.patch.object(self.common, '_find_device_on_array', + return_value=None): + self.assertRaises(exception.VolumeBackendAPIException, + self.common.extend_volume, volume, new_size) + + def test_extend_volume_failed_wrong_size(self): + volume = self.data.test_volume + new_size = 1 + self.assertRaises(exception.VolumeBackendAPIException, + self.common.extend_volume, volume, new_size) + + def test_update_volume_stats(self): + data = self.common.update_volume_stats() + self.assertEqual('CommonTests', data['volume_backend_name']) + + def test_update_volume_stats_no_wlp(self): + with mock.patch.object(self.common, '_update_srp_stats', + return_value=('123s#SRP_1#None#None', + 100, 90, 90, 10, False)): + data = self.common.update_volume_stats() + self.assertEqual('CommonTests', data['volume_backend_name']) + + def test_set_config_file_and_get_extra_specs(self): + volume = self.data.test_volume + extra_specs, config_file = ( + self.common._set_config_file_and_get_extra_specs(volume)) + self.assertEqual(self.data.vol_type_extra_specs, extra_specs) + self.assertEqual(self.fake_xml, config_file) + + def test_set_config_file_and_get_extra_specs_no_specs(self): + volume = self.data.test_volume + ref_config = '/etc/cinder/cinder_dell_emc_config.xml' + with mock.patch.object(self.utils, 'get_volumetype_extra_specs', + return_value=None): + extra_specs, config_file = ( + self.common._set_config_file_and_get_extra_specs(volume)) + self.assertIsNone(extra_specs) + self.assertEqual(ref_config, config_file) + + def test_find_device_on_array_success(self): + volume = self.data.test_volume + extra_specs = self.data.extra_specs + ref_device_id = self.data.volume_details[0]['volumeId'] + founddevice_id = self.common._find_device_on_array(volume, extra_specs) + self.assertEqual(ref_device_id, founddevice_id) + + def test_find_device_on_array_different_device_id(self): + volume = self.data.test_volume + extra_specs = self.data.extra_specs + with mock.patch.object( + self.rest, 'find_volume_device_id', + return_value='01234'): + founddevice_id = self.common._find_device_on_array( + volume, extra_specs) + self.assertIsNone(founddevice_id) + + def test_find_device_on_array_provider_location_not_string(self): + volume = fake_volume.fake_volume_obj( + context='cxt', provider_location=None) + extra_specs = self.data.extra_specs + founddevice_id = self.common._find_device_on_array( + volume, extra_specs) + self.assertIsNone(founddevice_id) + + def test_find_host_lun_id_attached(self): + volume = self.data.test_volume + extra_specs = self.data.extra_specs + host = 'HostX' + host_lun = (self.data.maskingview[0]['maskingViewConnection'][0] + ['host_lun_address']) + ref_masked = {'hostlunid': int(host_lun, 16), + 'maskingview': self.data.masking_view_name_f, + 'array': self.data.array} + maskedvols = self.common.find_host_lun_id( + volume, host, extra_specs) + self.assertEqual(ref_masked, maskedvols) + + def test_find_host_lun_id_not_attached(self): + volume = self.data.test_volume + extra_specs = self.data.extra_specs + host = 'HostX' + with mock.patch.object(self.rest, 'find_mv_connections_for_vol', + return_value=None): + maskedvols = self.common.find_host_lun_id( + volume, host, extra_specs) + self.assertEqual({}, maskedvols) + + def test_get_masking_views_from_volume(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + host = 'HostX' + ref_mv_list = [self.data.masking_view_name_f] + maskingview_list = self.common.get_masking_views_from_volume( + array, device_id, host) + self.assertEqual(ref_mv_list, maskingview_list) + + def test_get_masking_views_from_volume_wrong_host(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + host = 'DifferentHost' + maskingview_list = self.common.get_masking_views_from_volume( + array, device_id, host) + self.assertFalse(maskingview_list) + + def test_register_config_file_from_config_group_exists(self): + config_group_name = 'CommonTests' + config_file = self.common._register_config_file_from_config_group( + config_group_name) + self.assertEqual(self.fake_xml, config_file) + + def test_register_config_file_from_config_group_does_not_exist(self): + config_group_name = 'IncorrectName' + self.assertRaises(exception.VolumeBackendAPIException, + self.common._register_config_file_from_config_group, + config_group_name) + + def test_initial_setup_success(self): + volume = self.data.test_volume + ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) + ref_extra_specs['port_group_name'] = self.data.port_group_name_f + extra_specs = self.common._initial_setup(volume) + self.assertEqual(ref_extra_specs, extra_specs) + + def test_initial_setup_failed(self): + volume = self.data.test_volume + with mock.patch.object(self.utils, 'parse_file_to_get_array_map', + return_value=None): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._initial_setup, volume) + + def test_populate_masking_dict(self): + volume = self.data.test_volume + connector = self.data.connector + extra_specs = deepcopy(self.data.extra_specs) + extra_specs['port_group_name'] = self.data.port_group_name_f + ref_mv_dict = self.data.masking_view_dict + masking_view_dict = self.common._populate_masking_dict( + volume, connector, extra_specs) + self.assertEqual(ref_mv_dict, masking_view_dict) + + def test_populate_masking_dict_no_slo(self): + volume = self.data.test_volume + connector = self.data.connector + extra_specs = { + 'slo': None, + 'workload': None, + 'srp': self.data.srp, + 'array': self.data.array, + 'port_group_name': self.data.port_group_name_f} + ref_mv_dict = self.data.masking_view_dict_no_slo + masking_view_dict = self.common._populate_masking_dict( + volume, connector, extra_specs) + self.assertEqual(ref_mv_dict, masking_view_dict) + + def test_create_cloned_volume(self): + volume = self.data.test_clone_volume + source_volume = self.data.test_volume + extra_specs = self.data.extra_specs + ref_dict = self.data.provider_location + clone_dict = self.common._create_cloned_volume( + volume, source_volume, extra_specs) + self.assertEqual(ref_dict, clone_dict) + + def test_create_cloned_volume_is_snapshot(self): + volume = self.data.test_snapshot + source_volume = self.data.test_volume + extra_specs = self.data.extra_specs + ref_dict = self.data.snap_location + clone_dict = self.common._create_cloned_volume( + volume, source_volume, extra_specs, True, False) + self.assertEqual(ref_dict, clone_dict) + + def test_create_cloned_volume_from_snapshot(self): + volume = self.data.test_clone_volume + source_volume = self.data.test_snapshot + extra_specs = self.data.extra_specs + ref_dict = self.data.provider_location + clone_dict = self.common._create_cloned_volume( + volume, source_volume, extra_specs, False, True) + self.assertEqual(ref_dict, clone_dict) + + def test_create_cloned_volume_not_licenced(self): + volume = self.data.test_clone_volume + source_volume = self.data.test_volume + extra_specs = self.data.extra_specs + with mock.patch.object(self.rest, 'is_snapvx_licensed', + return_value=False): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._create_cloned_volume, + volume, source_volume, extra_specs) + + def test_parse_snap_info_found(self): + ref_device_id = self.data.volume_details[0]['volumeId'] + ref_snap_name = self.data.snap_location['snap_name'] + sourcedevice_id, foundsnap_name = self.common._parse_snap_info( + self.data.array, self.data.test_snapshot) + self.assertEqual(ref_device_id, sourcedevice_id) + self.assertEqual(ref_snap_name, foundsnap_name) + + def test_parse_snap_info_not_found(self): + ref_snap_name = None + with mock.patch.object(self.rest, 'get_volume_snap', + return_value=None): + __, foundsnap_name = self.common._parse_snap_info( + self.data.array, self.data.test_snapshot) + self.assertIsNone(ref_snap_name, foundsnap_name) + + def test_parse_snap_info_exception(self): + with mock.patch.object( + self.rest, 'get_volume_snap', + side_effect=exception.VolumeBackendAPIException): + __, foundsnap_name = self.common._parse_snap_info( + self.data.array, self.data.test_snapshot) + self.assertIsNone(foundsnap_name) + + def test_parse_snap_info_provider_location_not_string(self): + snapshot = fake_snapshot.fake_snapshot_obj( + context='ctxt', provider_loaction={'not': 'string'}) + sourcedevice_id, foundsnap_name = self.common._parse_snap_info( + self.data.array, snapshot) + self.assertIsNone(foundsnap_name) + + def test_create_snapshot_success(self): + array = self.data.array + snapshot = self.data.test_snapshot + source_device_id = self.data.volume_details[0]['volumeId'] + extra_specs = self.data.extra_specs + ref_dict = {'snap_name': '12345', 'source_id': '00001'} + snap_dict = self.common._create_snapshot( + array, snapshot, source_device_id, extra_specs) + self.assertEqual(ref_dict, snap_dict) + + def test_create_snapshot_exception(self): + array = self.data.array + snapshot = self.data.test_snapshot + source_device_id = self.data.volume_details[0]['volumeId'] + extra_specs = self.data.extra_specs + with mock.patch.object( + self.provision, 'create_volume_snapvx', + side_effect=exception.VolumeBackendAPIException): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._create_snapshot, + array, snapshot, source_device_id, extra_specs) + + def test_delete_volume_from_srp(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + volume_name = self.data.test_volume.name + ref_extra_specs = self.data.extra_specs_intervals_set + ref_extra_specs['port_group_name'] = self.data.port_group_name_f + volume = self.data.test_volume + with mock.patch.object(self.common, '_sync_check'): + with mock.patch.object(self.common, '_delete_from_srp'): + self.common._delete_volume(volume) + self.common._delete_from_srp.assert_called_once_with( + array, device_id, volume_name, ref_extra_specs) + + def test_delete_volume_not_found(self): + volume = self.data.test_volume + with mock.patch.object(self.common, '_find_device_on_array', + return_value=None): + with mock.patch.object(self.common, '_delete_from_srp'): + self.common._delete_volume(volume) + self.common._delete_from_srp.assert_not_called() + + def test_create_volume_success(self): + volume_name = '1' + volume_size = self.data.test_volume.size + extra_specs = self.data.extra_specs + ref_dict = self.data.provider_location + volume_dict = self.common._create_volume( + volume_name, volume_size, extra_specs) + self.assertEqual(ref_dict, volume_dict) + + def test_create_volume_failed(self): + volume_name = self.data.test_volume.name + volume_size = self.data.test_volume.size + extra_specs = self.data.extra_specs + with mock.patch.object(self.masking, + 'get_or_create_default_storage_group', + return_value=self.data.failed_resource): + with mock.patch.object(self.rest, 'delete_storage_group'): + # path 1: not last vol in sg + with mock.patch.object(self.rest, 'get_num_vols_in_sg', + return_value=2): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._create_volume, + volume_name, volume_size, extra_specs) + self.rest.delete_storage_group.assert_not_called() + # path 2: last vol in sg, delete sg + with mock.patch.object(self.rest, 'get_num_vols_in_sg', + return_value=0): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._create_volume, + volume_name, volume_size, extra_specs) + (self.rest.delete_storage_group. + assert_called_once_with(self.data.array, + self.data.failed_resource)) + + def test_create_volume_incorrect_slo(self): + volume_name = self.data.test_volume.name + volume_size = self.data.test_volume.size + extra_specs = {'slo': 'Diamondz', + 'workload': 'DSSSS', + 'srp': self.data.srp, + 'array': self.data.array} + self.assertRaises( + exception.VolumeBackendAPIException, + self.common._create_volume, + volume_name, volume_size, extra_specs) + + def test_set_vmax_extra_specs(self): + srp_record = self.utils.parse_file_to_get_array_map( + self.fake_xml) + extra_specs = self.common._set_vmax_extra_specs( + self.data.vol_type_extra_specs, srp_record) + ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) + ref_extra_specs['port_group_name'] = self.data.port_group_name_f + self.assertEqual(ref_extra_specs, extra_specs) + + def test_set_vmax_extra_specs_no_srp_name(self): + srp_record = self.utils.parse_file_to_get_array_map( + self.fake_xml) + extra_specs = self.common._set_vmax_extra_specs({}, srp_record) + self.assertEqual('Optimized', extra_specs['slo']) + + def test_set_vmax_extra_specs_portgroup_as_spec(self): + srp_record = self.utils.parse_file_to_get_array_map( + self.fake_xml) + extra_specs = self.common._set_vmax_extra_specs( + {'port_group_name': 'extra_spec_pg'}, srp_record) + self.assertEqual('extra_spec_pg', extra_specs['port_group_name']) + + def test_set_vmax_extra_specs_no_portgroup_set(self): + fake_xml = FakeXML().create_fake_config_file( + 'test_no_pg_set', '') + srp_record = self.utils.parse_file_to_get_array_map(fake_xml) + self.assertRaises(exception.VolumeBackendAPIException, + self.common._set_vmax_extra_specs, + {}, srp_record) + + def test_delete_volume_from_srp_success(self): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + volume_name = self.data.test_volume.name + extra_specs = self.data.extra_specs + with mock.patch.object(self.masking, 'remove_and_reset_members'): + self.common._delete_from_srp(array, device_id, volume_name, + extra_specs) + self.masking.remove_and_reset_members.assert_called_once_with( + array, device_id, volume_name, extra_specs, False) + + def test_delete_volume_from_srp_failed(self): + array = self.data.array + device_id = self.data.failed_resource + volume_name = self.data.test_volume.name + extra_specs = self.data.extra_specs + with mock.patch.object(self.masking, 'remove_and_reset_members'): + with mock.patch.object(self.masking, + 'return_volume_to_default_storage_group'): + self.assertRaises(exception.VolumeBackendAPIException, + self.common._delete_from_srp, array, + device_id, volume_name, extra_specs) + (self.masking.return_volume_to_default_storage_group. + assert_called_once_with( + array, device_id, volume_name, extra_specs)) + + def test_get_target_wwns_from_masking_view(self): + target_wwns = self.common.get_target_wwns_from_masking_view( + self.data.test_volume, self.data.connector) + ref_wwns = [self.data.wwnn1] + self.assertEqual(ref_wwns, target_wwns) + + def test_get_target_wwns_from_masking_view_no_mv(self): + with mock.patch.object(self.common, 'get_masking_views_from_volume', + return_value=None): + target_wwns = self.common.get_target_wwns_from_masking_view( + self.data.test_volume, self.data.connector) + self.assertFalse(target_wwns) + + def test_get_port_group_from_masking_view(self): + array = self.data.array + maskingview_name = self.data.masking_view_name_f + with mock.patch.object(self.rest, + 'get_element_from_masking_view'): + self.common.get_port_group_from_masking_view( + array, maskingview_name) + self.rest.get_element_from_masking_view.assert_called_once_with( + array, maskingview_name, portgroup=True) + + def test_get_initiator_group_from_masking_view(self): + array = self.data.array + maskingview_name = self.data.masking_view_name_f + with mock.patch.object(self.rest, + 'get_element_from_masking_view'): + self.common.get_initiator_group_from_masking_view( + array, maskingview_name) + self.rest.get_element_from_masking_view.assert_called_once_with( + array, maskingview_name, host=True) + + def test_get_common_masking_views(self): + array = self.data.array + portgroup_name = self.data.port_group_name_f + initiator_group_name = self.data.initiatorgroup_name_f + with mock.patch.object(self.rest, 'get_common_masking_views'): + self.common.get_common_masking_views( + array, portgroup_name, initiator_group_name) + self.rest.get_common_masking_views.assert_called_once_with( + array, portgroup_name, initiator_group_name) + + def test_get_ip_and_iqn(self): + ref_ip_iqn = [{'iqn': self.data.initiator, + 'ip': self.data.ip}] + port = self.data.portgroup[1]['symmetrixPortKey'][0]['portId'] + ip_iqn_list = self.common._get_ip_and_iqn(self.data.array, port) + self.assertEqual(ref_ip_iqn, ip_iqn_list) + + def test_find_ip_and_iqns(self): + ref_ip_iqn = [{'iqn': self.data.initiator, + 'ip': self.data.ip}] + ip_iqn_list = self.common._find_ip_and_iqns( + self.data.array, self.data.port_group_name_i) + self.assertEqual(ref_ip_iqn, ip_iqn_list) + + def test_create_replica_snap_name(self): + array = self.data.array + clone_volume = self.data.test_clone_volume + source_device_id = self.data.volume_details[0]['volumeId'] + snap_name = self.data.snap_location['snap_name'] + ref_dict = self.data.provider_location + clone_dict = self.common._create_replica( + array, clone_volume, source_device_id, + self.data.extra_specs, snap_name) + self.assertEqual(ref_dict, clone_dict) + + def test_create_replica_no_snap_name(self): + array = self.data.array + clone_volume = self.data.test_clone_volume + source_device_id = self.data.volume_details[0]['volumeId'] + snap_name = "temp-" + source_device_id + clone_volume.id + ref_dict = self.data.provider_location + with mock.patch.object(self.utils, 'get_temp_snap_name', + return_value=snap_name): + clone_dict = self.common._create_replica( + array, clone_volume, source_device_id, + self.data.extra_specs) + self.assertEqual(ref_dict, clone_dict) + self.utils.get_temp_snap_name.assert_called_once_with( + ('OS-' + clone_volume.id), source_device_id) + + def test_create_replica_failed_cleanup_target(self): + array = self.data.array + clone_volume = self.data.test_clone_volume + device_id = self.data.volume_details[0]['volumeId'] + snap_name = self.data.failed_resource + clone_name = 'OS-' + clone_volume.id + extra_specs = self.data.extra_specs + with mock.patch.object(self.common, '_cleanup_target'): + self.assertRaises( + exception.VolumeBackendAPIException, + self.common._create_replica, array, clone_volume, + device_id, self.data.extra_specs, snap_name) + self.common._cleanup_target.assert_called_once_with( + array, device_id, device_id, clone_name, + snap_name, extra_specs) + + def test_create_replica_failed_no_target(self): + array = self.data.array + clone_volume = self.data.test_clone_volume + source_device_id = self.data.volume_details[0]['volumeId'] + snap_name = self.data.failed_resource + with mock.patch.object(self.common, '_create_volume', + return_value={'device_id': None}): + with mock.patch.object(self.common, '_cleanup_target'): + self.assertRaises( + exception.VolumeBackendAPIException, + self.common._create_replica, array, clone_volume, + source_device_id, self.data.extra_specs, snap_name) + self.common._cleanup_target.assert_not_called() + + @mock.patch.object( + masking.VMAXMasking, + 'remove_and_reset_members') + def test_cleanup_target_sync_present(self, mock_remove): + array = self.data.array + clone_volume = self.data.test_clone_volume + source_device_id = self.data.volume_details[0]['volumeId'] + target_device_id = self.data.volume_details[1]['volumeId'] + snap_name = self.data.failed_resource + clone_name = clone_volume.name + extra_specs = self.data.extra_specs + with mock.patch.object(self.rest, '_get_sync_session', + return_value='session'): + with mock.patch.object(self.provision, + 'break_replication_relationship'): + self.common._cleanup_target( + array, target_device_id, source_device_id, + clone_name, snap_name, extra_specs) + (self.provision.break_replication_relationship. + assert_called_once_with( + array, target_device_id, source_device_id, + snap_name, extra_specs)) + + def test_cleanup_target_no_sync(self): + array = self.data.array + clone_volume = self.data.test_clone_volume + source_device_id = self.data.volume_details[0]['volumeId'] + target_device_id = self.data.volume_details[1]['volumeId'] + snap_name = self.data.failed_resource + clone_name = clone_volume.name + extra_specs = self.data.extra_specs + with mock.patch.object(self.rest, '_get_sync_session', + return_value=None): + with mock.patch.object(self.common, + '_delete_from_srp'): + self.common._cleanup_target( + array, target_device_id, source_device_id, + clone_name, snap_name, extra_specs) + self.common._delete_from_srp.assert_called_once_with( + array, target_device_id, clone_name, + extra_specs) + + @mock.patch.object( + provision.VMAXProvision, + 'delete_volume_snap') + @mock.patch.object( + provision.VMAXProvision, + 'break_replication_relationship') + def test_sync_check_temp_snap(self, mock_break, mock_delete): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + target = self.data.volume_details[1]['volumeId'] + volume_name = self.data.test_volume.name + extra_specs = self.data.extra_specs + snap_name = 'temp-1' + with mock.patch.object(self.rest, 'get_volume_snap', + return_value=snap_name): + self.common._sync_check(array, device_id, volume_name, + extra_specs) + mock_break.assert_called_with( + array, target, device_id, snap_name, + extra_specs, wait_for_sync=True) + mock_delete.assert_called_with( + array, snap_name, device_id) + + @mock.patch.object( + provision.VMAXProvision, + 'delete_volume_snap') + @mock.patch.object( + provision.VMAXProvision, + 'break_replication_relationship') + def test_sync_check_not_temp_snap(self, mock_break, mock_delete): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + target = self.data.volume_details[1]['volumeId'] + volume_name = self.data.test_volume.name + extra_specs = self.data.extra_specs + snap_name = 'OS-1' + sessions = [{'source_vol': device_id, + 'snap_name': snap_name, + 'target_vol_list': [target]}] + with mock.patch.object(self.rest, 'find_snap_vx_sessions', + return_value=sessions): + self.common._sync_check(array, device_id, volume_name, + extra_specs) + mock_break.assert_called_with( + array, target, device_id, snap_name, + extra_specs, wait_for_sync=True) + mock_delete.assert_not_called() + + @mock.patch.object( + provision.VMAXProvision, + 'break_replication_relationship') + def test_sync_check_no_sessions(self, mock_break): + array = self.data.array + device_id = self.data.volume_details[0]['volumeId'] + volume_name = self.data.test_volume.name + extra_specs = self.data.extra_specs + with mock.patch.object(self.rest, 'find_snap_vx_sessions', + return_value=None): + self.common._sync_check(array, device_id, volume_name, + extra_specs) + mock_break.assert_not_called() + + def test_manage_existing_success(self): + external_ref = {u'source-name': u'00002'} + volume_name = self.utils.get_volume_element_name( + self.data.test_volume.id) + provider_location = {'device_id': u'00002', 'array': u'000197800123'} + ref_update = {'provider_location': six.text_type(provider_location), + 'display_name': volume_name} + with mock.patch.object( + self.common, '_check_lun_valid_for_cinder_management'): + model_update = self.common.manage_existing( + self.data.test_volume, external_ref) + self.assertEqual(ref_update, model_update) + + @mock.patch.object( + rest.VMAXRest, 'get_masking_views_from_storage_group', + return_value=None) + def test_check_lun_valid_for_cinder_management(self, mock_mv): + external_ref = {u'source-name': u'00001'} + self.common._check_lun_valid_for_cinder_management( + self.data.array, '00001', + self.data.test_volume.id, external_ref) + + @mock.patch.object( + rest.VMAXRest, 'get_volume', + side_effect=[ + None, + VMAXCommonData.volume_details[0], + VMAXCommonData.volume_details[0], + VMAXCommonData.volume_details[1]]) + @mock.patch.object( + rest.VMAXRest, 'get_masking_views_from_storage_group', + side_effect=[VMAXCommonData.sg_details[1]['maskingview'], + None]) + @mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume', + return_value=[VMAXCommonData.defaultstoragegroup_name]) + @mock.patch.object(rest.VMAXRest, 'is_vol_in_rep_session', + side_effect=[(True, False, []), (False, False, None)]) + def test_check_lun_valid_for_cinder_management_exception( + self, mock_rep, mock_sg, mock_mvs, mock_get_vol): + external_ref = {u'source-name': u'00001'} + for x in range(0, 3): + self.assertRaises( + exception.ManageExistingInvalidReference, + self.common._check_lun_valid_for_cinder_management, + self.data.array, '00001', + self.data.test_volume.id, external_ref) + self.assertRaises(exception.ManageExistingAlreadyManaged, + self.common._check_lun_valid_for_cinder_management, + self.data.array, '00001', + self.data.test_volume.id, external_ref) + + def test_manage_existing_get_size(self): + external_ref = {u'source-name': u'00001'} + size = self.common.manage_existing_get_size( + self.data.test_volume, external_ref) + self.assertEqual(2, size) + + def test_unmanage_success(self): + volume = self.data.test_volume + with mock.patch.object(self.rest, 'rename_volume'): + self.common.unmanage(volume) + self.rest.rename_volume.assert_called_once_with( + self.data.array, '00001', self.data.test_volume.id) + + def test_unmanage_device_not_found(self): + volume = self.data.test_volume + with mock.patch.object(self.common, '_find_device_on_array', + return_value=None): + with mock.patch.object(self.rest, 'rename_volume'): + self.common.unmanage(volume) + self.rest.rename_volume.assert_not_called() + + +class VMAXFCTest(test.TestCase): + def setUp(self): + self.data = VMAXCommonData() + + super(VMAXFCTest, self).setUp() + config_group = 'FCTests' + self.fake_xml = FakeXML().create_fake_config_file( + config_group, self.data.port_group_name_f) + self.configuration = FakeConfiguration(self.fake_xml, config_group) + rest.VMAXRest._establish_rest_session = mock.Mock( + return_value=FakeRequestsSession()) + driver = fc.VMAXFCDriver(configuration=self.configuration) + self.driver = driver + self.common = self.driver.common + self.masking = self.common.masking + self.utils = self.common.utils + self.utils.get_volumetype_extra_specs = ( + mock.Mock(return_value=self.data.vol_type_extra_specs)) + + def test_create_volume(self): + with mock.patch.object(self.common, 'create_volume'): + self.driver.create_volume(self.data.test_volume) + self.common.create_volume.assert_called_once_with( + self.data.test_volume) + + def test_create_volume_from_snapshot(self): + volume = self.data.test_clone_volume + snapshot = self.data.test_snapshot + with mock.patch.object(self.common, 'create_volume_from_snapshot'): + self.driver.create_volume_from_snapshot(volume, snapshot) + self.common.create_volume_from_snapshot.assert_called_once_with( + volume, snapshot) + + def test_create_cloned_volume(self): + volume = self.data.test_clone_volume + src_volume = self.data.test_volume + with mock.patch.object(self.common, 'create_cloned_volume'): + self.driver.create_cloned_volume(volume, src_volume) + self.common.create_cloned_volume.assert_called_once_with( + volume, src_volume) + + def test_delete_volume(self): + with mock.patch.object(self.common, 'delete_volume'): + self.driver.delete_volume(self.data.test_volume) + self.common.delete_volume.assert_called_once_with( + self.data.test_volume) + + def test_create_snapshot(self): + with mock.patch.object(self.common, 'create_snapshot'): + self.driver.create_snapshot(self.data.test_snapshot) + self.common.create_snapshot.assert_called_once_with( + self.data.test_snapshot, self.data.test_snapshot.volume) + + def test_delete_snapshot(self): + with mock.patch.object(self.common, 'delete_snapshot'): + self.driver.delete_snapshot(self.data.test_snapshot) + self.common.delete_snapshot.assert_called_once_with( + self.data.test_snapshot, self.data.test_snapshot.volume) + + def test_initialize_connection(self): + with mock.patch.object(self.common, 'initialize_connection', + return_value=self.data.fc_device_info): + with mock.patch.object(self.driver, 'populate_data'): + self.driver.initialize_connection(self.data.test_volume, + self.data.connector) + self.common.initialize_connection.assert_called_once_with( + self.data.test_volume, self.data.connector) + self.driver.populate_data.assert_called_once_with( + self.data.fc_device_info, self.data.test_volume, + self.data.connector) + + def test_populate_data(self): + with mock.patch.object(self.driver, '_build_initiator_target_map', + return_value=([], {})): + ref_data = { + 'driver_volume_type': 'fibre_channel', + 'data': {'target_lun': self.data.fc_device_info['hostlunid'], + 'target_discovered': True, + 'target_wwn': [], + 'initiator_target_map': {}}} + data = self.driver.populate_data(self.data.fc_device_info, + self.data.test_volume, + self.data.connector) + self.assertEqual(ref_data, data) + self.driver._build_initiator_target_map.assert_called_once_with( + self.data.test_volume, self.data.connector) + + def test_terminate_connection(self): + with mock.patch.object(self.common, 'terminate_connection'): + self.driver.terminate_connection(self.data.test_volume, + self.data.connector) + self.common.terminate_connection.assert_called_once_with( + self.data.test_volume, self.data.connector) + + def test_terminate_connection_no_zoning_mappings(self): + with mock.patch.object(self.driver, '_get_zoning_mappings', + return_value=None): + with mock.patch.object(self.common, 'terminate_connection'): + self.driver.terminate_connection(self.data.test_volume, + self.data.connector) + self.common.terminate_connection.assert_not_called() + + def test_get_zoning_mappings(self): + ref_mappings = self.data.zoning_mappings + zoning_mappings = self.driver._get_zoning_mappings( + self.data.test_volume, self.data.connector) + self.assertEqual(ref_mappings, zoning_mappings) + + def test_get_zoning_mappings_no_mv(self): + ref_mappings = {'port_group': None, + 'initiator_group': None, + 'target_wwns': None, + 'init_targ_map': None, + 'array': None} + with mock.patch.object(self.common, 'get_masking_views_from_volume', + return_value=None): + zoning_mappings = self.driver._get_zoning_mappings( + self.data.test_volume, self.data.connector) + self.assertEqual(ref_mappings, zoning_mappings) + + def test_cleanup_zones_other_vols_mapped(self): + ref_data = {'driver_volume_type': 'fibre_channel', + 'data': {}} + data = self.driver._cleanup_zones(self.data.zoning_mappings) + self.assertEqual(ref_data, data) + + def test_cleanup_zones_no_vols_mapped(self): + zoning_mappings = self.data.zoning_mappings + ref_data = {'driver_volume_type': 'fibre_channel', + 'data': {'target_wwn': zoning_mappings['target_wwns'], + 'initiator_target_map': + zoning_mappings['init_targ_map']}} + with mock.patch.object(self.common, 'get_common_masking_views', + return_value=[]): + data = self.driver._cleanup_zones(self.data.zoning_mappings) + self.assertEqual(ref_data, data) + + def test_build_initiator_target_map(self): + ref_target_map = {'123456789012345': ['543210987654321'], + '123456789054321': ['123450987654321']} + with mock.patch.object(fczm_utils, 'create_lookup_service', + return_value=FakeLookupService()): + driver = fc.VMAXFCDriver(configuration=self.configuration) + with mock.patch.object(driver.common, + 'get_target_wwns_from_masking_view', + return_value=self.data.target_wwns): + targets, target_map = driver._build_initiator_target_map( + self.data.test_volume, self.data.connector) + self.assertEqual(ref_target_map, target_map) + + def test_extend_volume(self): + with mock.patch.object(self.common, 'extend_volume'): + self.driver.extend_volume(self.data.test_volume, '3') + self.common.extend_volume.assert_called_once_with( + self.data.test_volume, '3') + + def test_get_volume_stats(self): + with mock.patch.object(self.driver, 'update_volume_stats'): + # no refresh + self.driver.get_volume_stats() + self.driver.update_volume_stats.assert_not_called() + # with refresh + self.driver.get_volume_stats(True) + self.driver.update_volume_stats.assert_called_once_with() + + def test_update_volume_stats(self): + with mock.patch.object(self.common, 'update_volume_stats', + return_value={}): + self.driver.update_volume_stats() + self.common.update_volume_stats.assert_called_once_with() + + def test_check_for_setup_error(self): + self.driver.check_for_setup_error() + + def test_ensure_export(self): + self.driver.ensure_export('context', 'volume') + + def test_create_export(self): + self.driver.create_export('context', 'volume', 'connector') + + def test_remove_export(self): + self.driver.remove_export('context', 'volume') + + def test_check_for_export(self): + self.driver.check_for_export('context', 'volume_id') + + def test_manage_existing(self): + with mock.patch.object(self.common, 'manage_existing', + return_value={}): + external_ref = {u'source-name': u'00002'} + self.driver.manage_existing(self.data.test_volume, external_ref) + self.common.manage_existing.assert_called_once_with( + self.data.test_volume, external_ref) + + def test_manage_existing_get_size(self): + with mock.patch.object(self.common, 'manage_existing_get_size', + return_value='1'): + external_ref = {u'source-name': u'00002'} + self.driver.manage_existing_get_size( + self.data.test_volume, external_ref) + self.common.manage_existing_get_size.assert_called_once_with( + self.data.test_volume, external_ref) + + def test_unmanage_volume(self): + with mock.patch.object(self.common, 'unmanage', + return_value={}): + self.driver.unmanage(self.data.test_volume) + self.common.unmanage.assert_called_once_with( + self.data.test_volume) + + +class VMAXISCSITest(test.TestCase): + def setUp(self): + self.data = VMAXCommonData() + + super(VMAXISCSITest, self).setUp() + config_group = 'ISCSITests' + self.fake_xml = FakeXML().create_fake_config_file( + config_group, self.data.port_group_name_i) + configuration = FakeConfiguration(self.fake_xml, config_group) + rest.VMAXRest._establish_rest_session = mock.Mock( + return_value=FakeRequestsSession()) + driver = iscsi.VMAXISCSIDriver(configuration=configuration) + self.driver = driver + self.common = self.driver.common + self.masking = self.common.masking + self.utils = self.common.utils + self.utils.get_volumetype_extra_specs = ( + mock.Mock(return_value=self.data.vol_type_extra_specs)) + + def test_create_volume(self): + with mock.patch.object(self.common, 'create_volume'): + self.driver.create_volume(self.data.test_volume) + self.common.create_volume.assert_called_once_with( + self.data.test_volume) + + def test_create_volume_from_snapshot(self): + volume = self.data.test_clone_volume + snapshot = self.data.test_snapshot + with mock.patch.object(self.common, 'create_volume_from_snapshot'): + self.driver.create_volume_from_snapshot(volume, snapshot) + self.common.create_volume_from_snapshot.assert_called_once_with( + volume, snapshot) + + def test_create_cloned_volume(self): + volume = self.data.test_clone_volume + src_volume = self.data.test_volume + with mock.patch.object(self.common, 'create_cloned_volume'): + self.driver.create_cloned_volume(volume, src_volume) + self.common.create_cloned_volume.assert_called_once_with( + volume, src_volume) + + def test_delete_volume(self): + with mock.patch.object(self.common, 'delete_volume'): + self.driver.delete_volume(self.data.test_volume) + self.common.delete_volume.assert_called_once_with( + self.data.test_volume) + + def test_create_snapshot(self): + with mock.patch.object(self.common, 'create_snapshot'): + self.driver.create_snapshot(self.data.test_snapshot) + self.common.create_snapshot.assert_called_once_with( + self.data.test_snapshot, self.data.test_snapshot.volume) + + def test_delete_snapshot(self): + with mock.patch.object(self.common, 'delete_snapshot'): + self.driver.delete_snapshot(self.data.test_snapshot) + self.common.delete_snapshot.assert_called_once_with( + self.data.test_snapshot, self.data.test_snapshot.volume) + + def test_initialize_connection(self): + ref_dict = {'maskingview': self.data.masking_view_name_f, + 'array': self.data.array, + 'hostlunid': 3, + 'ip_and_iqn': [{'ip': self.data.ip, + 'iqn': self.data.initiator}], + 'is_multipath': False} + with mock.patch.object(self.driver, 'get_iscsi_dict'): + with mock.patch.object( + self.common, 'get_port_group_from_masking_view', + return_value=self.data.port_group_name_i): + self.driver.initialize_connection(self.data.test_volume, + self.data.connector) + self.driver.get_iscsi_dict.assert_called_once_with( + ref_dict, self.data.test_volume) + + def test_get_iscsi_dict_success(self): + ip_and_iqn = self.common._find_ip_and_iqns( + self.data.array, self.data.port_group_name_i) + host_lun_id = self.data.iscsi_device_info['hostlunid'] + volume = self.data.test_volume + device_info = self.data.iscsi_device_info + ref_data = {'driver_volume_type': 'iscsi', 'data': {}} + with mock.patch.object( + self.driver, 'vmax_get_iscsi_properties', return_value={}): + data = self.driver.get_iscsi_dict(device_info, volume) + self.assertEqual(ref_data, data) + self.driver.vmax_get_iscsi_properties.assert_called_once_with( + volume, ip_and_iqn, True, host_lun_id) + + def test_get_iscsi_dict_exception(self): + device_info = {'ip_and_iqn': ''} + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.get_iscsi_dict, + device_info, self.data.test_volume) + + def test_vmax_get_iscsi_properties_one_target_no_auth(self): + vol = deepcopy(self.data.test_volume) + ip_and_iqn = self.common._find_ip_and_iqns( + self.data.array, self.data.port_group_name_i) + host_lun_id = self.data.iscsi_device_info['hostlunid'] + ref_properties = { + 'target_discovered': True, + 'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0], + 'target_portal': ip_and_iqn[0]['ip'] + ":3260", + 'target_lun': host_lun_id, + 'volume_id': self.data.test_volume.id} + iscsi_properties = self.driver.vmax_get_iscsi_properties( + vol, ip_and_iqn, True, host_lun_id) + self.assertEqual(type(ref_properties), type(iscsi_properties)) + self.assertEqual(ref_properties, iscsi_properties) + + def test_vmax_get_iscsi_properties_multiple_targets(self): + ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator}, + {'ip': self.data.ip, 'iqn': self.data.iqn}] + host_lun_id = self.data.iscsi_device_info['hostlunid'] + ref_properties = { + 'target_portals': ( + [t['ip'] + ":3260" for t in ip_and_iqn]), + 'target_iqns': ( + [t['iqn'].split(",")[0] for t in ip_and_iqn]), + 'target_luns': [host_lun_id] * len(ip_and_iqn), + 'target_discovered': True, + 'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0], + 'target_portal': ip_and_iqn[0]['ip'] + ":3260", + 'target_lun': host_lun_id, + 'volume_id': self.data.test_volume.id} + iscsi_properties = self.driver.vmax_get_iscsi_properties( + self.data.test_volume, ip_and_iqn, True, host_lun_id) + self.assertEqual(ref_properties, iscsi_properties) + + def test_vmax_get_iscsi_properties_auth(self): + vol = deepcopy(self.data.test_volume) + vol.provider_auth = "auth_method auth_username auth_secret" + ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator}, + {'ip': self.data.ip, 'iqn': self.data.iqn}] + host_lun_id = self.data.iscsi_device_info['hostlunid'] + ref_properties = { + 'target_portals': ( + [t['ip'] + ":3260" for t in ip_and_iqn]), + 'target_iqns': ( + [t['iqn'].split(",")[0] for t in ip_and_iqn]), + 'target_luns': [host_lun_id] * len(ip_and_iqn), + 'target_discovered': True, + 'target_iqn': ip_and_iqn[0]['iqn'].split(",")[0], + 'target_portal': ip_and_iqn[0]['ip'] + ":3260", + 'target_lun': host_lun_id, + 'volume_id': self.data.test_volume.id, + 'auth_method': 'auth_method', + 'auth_username': 'auth_username', + 'auth_password': 'auth_secret'} + iscsi_properties = self.driver.vmax_get_iscsi_properties( + vol, ip_and_iqn, True, host_lun_id) + self.assertEqual(ref_properties, iscsi_properties) + + def test_terminate_connection(self): + with mock.patch.object(self.common, 'terminate_connection'): + self.driver.terminate_connection(self.data.test_volume, + self.data.connector) + self.common.terminate_connection.assert_called_once_with( + self.data.test_volume, self.data.connector) + + def test_extend_volume(self): + with mock.patch.object(self.common, 'extend_volume'): + self.driver.extend_volume(self.data.test_volume, '3') + self.common.extend_volume.assert_called_once_with( + self.data.test_volume, '3') + + def test_get_volume_stats(self): + with mock.patch.object(self.driver, 'update_volume_stats'): + # no refresh + self.driver.get_volume_stats() + self.driver.update_volume_stats.assert_not_called() + # with refresh + self.driver.get_volume_stats(True) + self.driver.update_volume_stats.assert_called_once_with() + + def test_update_volume_stats(self): + with mock.patch.object(self.common, 'update_volume_stats', + return_value={}): + self.driver.update_volume_stats() + self.common.update_volume_stats.assert_called_once_with() + + def test_check_for_setup_error(self): + self.driver.check_for_setup_error() + + def test_ensure_export(self): + self.driver.ensure_export('context', 'volume') + + def test_create_export(self): + self.driver.create_export('context', 'volume', 'connector') + + def test_remove_export(self): + self.driver.remove_export('context', 'volume') + + def test_check_for_export(self): + self.driver.check_for_export('context', 'volume_id') + + def test_manage_existing(self): + with mock.patch.object(self.common, 'manage_existing', + return_value={}): + external_ref = {u'source-name': u'00002'} + self.driver.manage_existing(self.data.test_volume, external_ref) + self.common.manage_existing.assert_called_once_with( + self.data.test_volume, external_ref) + + def test_manage_existing_get_size(self): + with mock.patch.object(self.common, 'manage_existing_get_size', + return_value='1'): + external_ref = {u'source-name': u'00002'} + self.driver.manage_existing_get_size( + self.data.test_volume, external_ref) + self.common.manage_existing_get_size.assert_called_once_with( + self.data.test_volume, external_ref) + + def test_unmanage_volume(self): + with mock.patch.object(self.common, 'unmanage', + return_value={}): + self.driver.unmanage(self.data.test_volume) + self.common.unmanage.assert_called_once_with( + self.data.test_volume) + + +class VMAXMaskingTest(test.TestCase): + def setUp(self): + self.data = VMAXCommonData() + + super(VMAXMaskingTest, self).setUp() + + configuration = mock.Mock() + configuration.safe_get.return_value = 'MaskingTests' + configuration.config_group = 'MaskingTests' + self._gather_info = common.VMAXCommon._gather_info + common.VMAXCommon._gather_info = mock.Mock() + driver = common.VMAXCommon( + 'iSCSI', common.VMAXCommon.VERSION, configuration=configuration) + driver_fc = common.VMAXCommon( + 'FC', common.VMAXCommon.VERSION, configuration=configuration) + self.driver = driver + self.driver_fc = driver_fc + self.mask = self.driver.masking + self.extra_specs = self.data.extra_specs + self.extra_specs['port_group_name'] = self.data.port_group_name_i + self.maskingviewdict = self.driver._populate_masking_dict( + self.data.test_volume, self.data.connector, self.extra_specs) + self.maskingviewdict['extra_specs'] = self.extra_specs + self.device_id = self.data.volume_details[0]['volumeId'] + self.volume_name = self.data.volume_details[0]['volume_identifier'] + + def tearDown(self): + super(VMAXMaskingTest, self).tearDown() + common.VMAXCommon._gather_info = self._gather_info + + @mock.patch.object( + masking.VMAXMasking, + 'get_or_create_masking_view_and_map_lun') + def test_setup_masking_view(self, mock_get_or_create_mv): + self.driver.masking.setup_masking_view( + self.data.array, self.maskingviewdict, self.extra_specs) + mock_get_or_create_mv.assert_called_once() + + @mock.patch.object( + masking.VMAXMasking, + '_check_adding_volume_to_storage_group') + @mock.patch.object( + masking.VMAXMasking, + '_get_default_storagegroup_and_remove_vol', + return_value=VMAXCommonData.defaultstoragegroup_name) + @mock.patch.object( + masking.VMAXMasking, + '_get_or_create_masking_view', + side_effect=[None, "Error in masking view retrieval", + exception.VolumeBackendAPIException]) + @mock.patch.object( + rest.VMAXRest, + 'get_element_from_masking_view', + side_effect=[VMAXCommonData.port_group_name_i, Exception]) + def test_get_or_create_masking_view_and_map_lun( + self, mock_masking_view_element, mock_masking, mock_default_sg, + mock_add_volume): + rollback_dict = ( + self.driver.masking.get_or_create_masking_view_and_map_lun( + self.data.array, self.maskingviewdict['maskingview_name'], + self.maskingviewdict, self.extra_specs)) + self.assertEqual(self.maskingviewdict, rollback_dict) + self.assertRaises( + exception.VolumeBackendAPIException, + self.driver.masking.get_or_create_masking_view_and_map_lun, + self.data.array, self.maskingviewdict['maskingview_name'], + self.maskingviewdict, self.extra_specs) + self.maskingviewdict['slo'] = None + self.assertRaises( + exception.VolumeBackendAPIException, + self.driver.masking.get_or_create_masking_view_and_map_lun, + self.data.array, self.maskingviewdict['maskingview_name'], + self.maskingviewdict, self.extra_specs) + + @mock.patch.object( + masking.VMAXMasking, + 'remove_volume_from_sg') + @mock.patch.object( + rest.VMAXRest, + 'is_volume_in_storagegroup', + side_effect=[True, False]) + def test_get_default_storagegroup_and_remove_vol( + self, mock_volume_in_sg, mock_remove_volume): + + self.driver.masking._get_default_storagegroup_and_remove_vol( + self.data.array, self.device_id, self.maskingviewdict, + self.volume_name, self.extra_specs) + mock_remove_volume.assert_called_once() + default_sg_name = ( + self.driver.masking._get_default_storagegroup_and_remove_vol( + self.data.array, self.device_id, self.maskingviewdict, + self.volume_name, self.extra_specs)) + self.assertEqual(self.data.defaultstoragegroup_name, default_sg_name) + + @mock.patch.object( + rest.VMAXRest, + 'get_masking_view', + side_effect=[VMAXCommonData.maskingview, + VMAXCommonData.maskingview, None]) + @mock.patch.object( + masking.VMAXMasking, + '_validate_existing_masking_view', + side_effect=[(VMAXCommonData.maskingview[1]['storageGroupId'], + None), (None, "Error Message")]) + @mock.patch.object( + masking.VMAXMasking, + '_create_new_masking_view', + return_value=None) + def test_get_or_create_masking_view( + self, mock_create_mv, mock_validate_mv, + mock_get_mv): + for x in range(0, 3): + self.driver.masking._get_or_create_masking_view( + self.data.array, self.maskingviewdict, self.extra_specs) + mock_create_mv.assert_called_once() + + @mock.patch.object( + masking.VMAXMasking, + '_get_or_create_storage_group', + side_effect=["Storage group not found", None, + "Storage group not found", None, None, None, + None, None, None, None, None]) + @mock.patch.object( + masking.VMAXMasking, + '_check_port_group', + side_effect=[(None, "Port group error"), (None, None), (None, None), + (None, None)]) + @mock.patch.object( + masking.VMAXMasking, + '_get_or_create_initiator_group', + side_effect=[(None, "Initiator group error"), (None, None), + (None, None)]) + @mock.patch.object( + masking.VMAXMasking, + '_check_adding_volume_to_storage_group', + side_effect=["Storage group error", None]) + @mock.patch.object( + masking.VMAXMasking, + 'create_masking_view', + return_value=None) + def test_create_new_masking_view( + self, mock_create_mv, mock_add_volume, mock_create_IG, + mock_check_PG, mock_create_SG): + for x in range(0, 6): + self.driver.masking._create_new_masking_view( + self.data.array, self.maskingviewdict, + self.maskingviewdict['maskingview_name'], self.extra_specs) + mock_create_mv.assert_called_once() + + @mock.patch.object( + masking.VMAXMasking, + '_check_existing_storage_group', + side_effect=[(VMAXCommonData.storagegroup_name_i, None), + (VMAXCommonData.storagegroup_name_i, None), + (None, "Error Checking existing storage group")]) + @mock.patch.object( + rest.VMAXRest, + 'get_element_from_masking_view', + return_value=VMAXCommonData.port_group_name_i) + @mock.patch.object( + masking.VMAXMasking, + '_check_port_group', + side_effect=[(None, None), (None, "Error checking pg")]) + @mock.patch.object( + masking.VMAXMasking, + '_check_existing_initiator_group', + return_value=(VMAXCommonData.initiatorgroup_name_i, None)) + def test_validate_existing_masking_view( + self, mock_check_ig, mock_check_pg, mock_get_mv_element, + mock_check_sg): + for x in range(0, 3): + self.driver.masking._validate_existing_masking_view( + self.data.array, self.maskingviewdict, + self.maskingviewdict['maskingview_name'], self.extra_specs) + self.assertEqual(3, mock_check_sg.call_count) + mock_get_mv_element.assert_called_with( + self.data.array, self.maskingviewdict['maskingview_name'], + portgroup=True) + mock_check_ig.assert_called_once() + + @mock.patch.object( + rest.VMAXRest, + 'get_storage_group', + side_effect=[VMAXCommonData.storagegroup_name_i, None, None]) + @mock.patch.object( + provision.VMAXProvision, + 'create_storage_group', + side_effect=[VMAXCommonData.storagegroup_name_i, None]) + def test_get_or_create_storage_group(self, mock_sg, mock_get_sg): + for x in range(0, 2): + self.driver.masking._get_or_create_storage_group( + self.data.array, self.maskingviewdict, + self.data.storagegroup_name_i, self.extra_specs) + self.driver.masking._get_or_create_storage_group( + self.data.array, self.maskingviewdict, + self.data.storagegroup_name_i, self.extra_specs, True) + self.assertEqual(3, mock_get_sg.call_count) + self.assertEqual(2, mock_sg.call_count) + + @mock.patch.object( + masking.VMAXMasking, + '_check_adding_volume_to_storage_group', + return_value=None) + @mock.patch.object( + masking.VMAXMasking, + '_get_or_create_storage_group', + return_value=None) + @mock.patch.object( + rest.VMAXRest, + 'get_element_from_masking_view', + return_value=VMAXCommonData.parent_sg_i) + @mock.patch.object( + rest.VMAXRest, + 'is_child_sg_in_parent_sg', + side_effect=[True, False]) + @mock.patch.object( + masking.VMAXMasking, + '_check_add_child_sg_to_parent_sg', + return_value=None) + def test_check_existing_storage_group_success( + self, mock_add_sg, mock_is_child, mock_get_mv_element, + mock_create_sg, mock_add): + masking_view_dict = deepcopy(self.data.masking_view_dict) + masking_view_dict['extra_specs'] = self.data.extra_specs + with mock.patch.object(self.driver.rest, 'get_storage_group', + side_effect=[ + VMAXCommonData.parent_sg_i, + VMAXCommonData.storagegroup_name_i]): + _, msg = ( + self.driver.masking._check_existing_storage_group( + self.data.array, self.maskingviewdict['maskingview_name'], + masking_view_dict)) + self.assertIsNone(msg) + mock_create_sg.assert_not_called() + with mock.patch.object(self.driver.rest, 'get_storage_group', + side_effect=[ + VMAXCommonData.parent_sg_i, None]): + _, msg = ( + self.driver.masking._check_existing_storage_group( + self.data.array, self.maskingviewdict['maskingview_name'], + masking_view_dict)) + self.assertIsNone(msg) + mock_create_sg.assert_called_once_with( + self.data.array, masking_view_dict, + VMAXCommonData.storagegroup_name_f, + self.data.extra_specs) + + @mock.patch.object( + masking.VMAXMasking, + '_check_adding_volume_to_storage_group', + side_effect=[None, "Error Message"]) + @mock.patch.object( + rest.VMAXRest, + 'is_child_sg_in_parent_sg', + side_effect=[True, False, False]) + @mock.patch.object( + rest.VMAXRest, + 'get_element_from_masking_view', + return_value=VMAXCommonData.parent_sg_i) + @mock.patch.object( + rest.VMAXRest, + 'get_storage_group', + side_effect=[None, VMAXCommonData.parent_sg_i, None, + VMAXCommonData.parent_sg_i, None, + VMAXCommonData.parent_sg_i, None]) + def test_check_existing_storage_group_failed( + self, mock_get_sg, mock_get_mv_element, mock_child, mock_check): + masking_view_dict = deepcopy(self.data.masking_view_dict) + masking_view_dict['extra_specs'] = self.data.extra_specs + for x in range(0, 4): + _, msg = ( + self.driver.masking._check_existing_storage_group( + self.data.array, self.maskingviewdict['maskingview_name'], + masking_view_dict)) + self.assertIsNotNone(msg) + self.assertEqual(7, mock_get_sg.call_count) + self.assertEqual(1, mock_check.call_count) + + @mock.patch.object(rest.VMAXRest, 'get_portgroup', + side_effect=[VMAXCommonData.port_group_name_i, None]) + def test_check_port_group( + self, mock_get_pg): + for x in range(0, 2): + _, msg = self.driver.masking._check_port_group( + self.data.array, self.maskingviewdict['maskingview_name']) + self.assertIsNotNone(msg) + self.assertEqual(2, mock_get_pg.call_count) + + @mock.patch.object( + masking.VMAXMasking, '_find_initiator_group', + side_effect=[VMAXCommonData.initiatorgroup_name_i, None, None]) + @mock.patch.object(masking.VMAXMasking, '_create_initiator_group', + side_effect=[VMAXCommonData.initiatorgroup_name_i, None] + ) + def test_get_or_create_initiator_group(self, mock_create_ig, mock_find_ig): + self.driver.masking._get_or_create_initiator_group( + self.data.array, self.data.initiatorgroup_name_i, + self.data.connector, self.extra_specs) + mock_create_ig.assert_not_called() + found_init_group, msg = ( + self.driver.masking._get_or_create_initiator_group( + self.data.array, self.data.initiatorgroup_name_i, + self.data.connector, self.extra_specs)) + self.assertIsNone(msg) + found_init_group, msg = ( + self.driver.masking._get_or_create_initiator_group( + self.data.array, self.data.initiatorgroup_name_i, + self.data.connector, self.extra_specs)) + self.assertIsNotNone(msg) + + def test_check_existing_initiator_group(self): + with mock.patch.object( + rest.VMAXRest, 'get_element_from_masking_view', + return_value=VMAXCommonData.inititiatorgroup): + ig_from_mv, msg = ( + self.driver.masking._check_existing_initiator_group( + self.data.array, self.maskingviewdict['maskingview_name'], + self.maskingviewdict, self.data.storagegroup_name_i, + self.data.port_group_name_i, self.extra_specs)) + self.assertEqual(self.data.inititiatorgroup, ig_from_mv) + + def test_check_adding_volume_to_storage_group(self): + with mock.patch.object( + masking.VMAXMasking, '_create_initiator_group'): + with mock.patch.object( + rest.VMAXRest, 'is_volume_in_storagegroup', + side_effect=[True, False]): + msg = ( + self.driver.masking._check_adding_volume_to_storage_group( + self.data.array, self.device_id, + self.data.storagegroup_name_i, + self.maskingviewdict[utils.VOL_NAME], + self.maskingviewdict[utils.EXTRA_SPECS])) + self.assertIsNone(msg) + msg = ( + self.driver.masking._check_adding_volume_to_storage_group( + self.data.array, self.device_id, + self.data.storagegroup_name_i, + self.maskingviewdict[utils.VOL_NAME], + self.maskingviewdict[utils.EXTRA_SPECS])) + + @mock.patch.object(rest.VMAXRest, 'add_vol_to_sg') + def test_add_volume_to_storage_group(self, mock_add_volume): + self.driver.masking.add_volume_to_storage_group( + self.data.array, self.device_id, self.data.storagegroup_name_i, + self.volume_name, self.extra_specs) + mock_add_volume.assert_called_once() + + @mock.patch.object(rest.VMAXRest, 'remove_vol_from_sg') + def test_remove_vol_from_storage_group(self, mock_remove_volume): + with mock.patch.object( + rest.VMAXRest, 'is_volume_in_storagegroup', + side_effect=[False, True]): + self.driver.masking._remove_vol_from_storage_group( + self.data.array, self.device_id, self.data.storagegroup_name_i, + self.volume_name, self.extra_specs) + mock_remove_volume.assert_called_once() + self.assertRaises( + exception.VolumeBackendAPIException, + self.driver.masking._remove_vol_from_storage_group, + self.data.array, self.device_id, self.data.storagegroup_name_i, + self.volume_name, self.extra_specs) + + def test_find_initiator_names(self): + foundinitiatornames = self.driver.masking.find_initiator_names( + self.data.connector) + self.assertEqual(self.data.connector['initiator'], + foundinitiatornames[0]) + foundinitiatornames = self.driver_fc.masking.find_initiator_names( + self.data.connector) + self.assertEqual(self.data.connector['wwpns'][0], + foundinitiatornames[0]) + connector = {'ip': self.data.ip, 'initiator': None, 'host': 'HostX'} + self.assertRaises( + exception.VolumeBackendAPIException, + self.driver.masking.find_initiator_names, connector) + self.assertRaises( + exception.VolumeBackendAPIException, + self.driver_fc.masking.find_initiator_names, connector) + + def test_find_initiator_group(self): + with mock.patch.object( + rest.VMAXRest, 'get_in_use_initiator_list_from_array', + return_value=self.data.initiator_list[2]['initiatorId']): + with mock.patch.object( + rest.VMAXRest, 'get_initiator_group_from_initiator', + return_value=self.data.initiator_list): + found_init_group_nam = ( + self.driver.masking._find_initiator_group( + self.data.array, ['FA-1D:4:123456789012345'])) + self.assertEqual(self.data.initiator_list, + found_init_group_nam) + found_init_group_nam = ( + self.driver.masking._find_initiator_group( + self.data.array, ['Error'])) + self.assertIsNone(found_init_group_nam) + + def test_create_masking_view(self): + with mock.patch.object(rest.VMAXRest, 'create_masking_view', + side_effect=[None, Exception]): + error_message = self.driver.masking.create_masking_view( + self.data.array, self.maskingviewdict['maskingview_name'], + self.data.storagegroup_name_i, self.data.port_group_name_i, + self.data.initiatorgroup_name_i, self.extra_specs) + self.assertIsNone(error_message) + error_message = self.driver.masking.create_masking_view( + self.data.array, self.maskingviewdict['maskingview_name'], + self.data.storagegroup_name_i, self.data.port_group_name_i, + self.data.initiatorgroup_name_i, self.extra_specs) + self.assertIsNotNone(error_message) + + @mock.patch.object(masking.VMAXMasking, '_check_ig_rollback') + def test_check_if_rollback_action_for_masking_required(self, + mock_check_ig): + with mock.patch.object(rest.VMAXRest, + 'get_storage_groups_from_volume', + side_effect=[ + exception.VolumeBackendAPIException, + self.data.defaultstoragegroup_name, + self.data.defaultstoragegroup_name, None, + None, ]): + self.assertRaises( + exception.VolumeBackendAPIException, + self.mask.check_if_rollback_action_for_masking_required, + self.data.array, self.device_id, self.maskingviewdict) + with mock.patch.object(masking.VMAXMasking, + 'remove_and_reset_members'): + self.maskingviewdict[ + 'default_sg_name'] = self.data.defaultstoragegroup_name + error_message = ( + self.mask.check_if_rollback_action_for_masking_required( + self.data.array, self.device_id, self.maskingviewdict)) + self.assertIsNone(error_message) + + @mock.patch.object(rest.VMAXRest, 'delete_masking_view') + @mock.patch.object(rest.VMAXRest, 'delete_initiator_group') + @mock.patch.object(rest.VMAXRest, 'get_initiator_group') + @mock.patch.object(masking.VMAXMasking, '_find_initiator_group', + return_value=VMAXCommonData.initiatorgroup_name_i) + def test_verify_initiator_group_from_masking_view( + self, mock_find_ig, mock_get_ig, mock_delete_ig, mock_delete_mv): + self.mask._verify_initiator_group_from_masking_view( + self.data.array, self.maskingviewdict['maskingview_name'], + self.maskingviewdict, self.data.initiatorgroup_name_i, + self.data.storagegroup_name_i, self.data.port_group_name_i, + self.extra_specs) + mock_get_ig.assert_not_called() + mock_get_ig.return_value = False + self.mask._verify_initiator_group_from_masking_view( + self.data.array, self.maskingviewdict['maskingview_name'], + self.maskingviewdict, 'OS-Wrong-Host-I-IG', + self.data.storagegroup_name_i, self.data.port_group_name_i, + self.extra_specs) + mock_get_ig.assert_called() + + @mock.patch.object(rest.VMAXRest, 'delete_masking_view') + @mock.patch.object(rest.VMAXRest, 'delete_initiator_group') + @mock.patch.object(rest.VMAXRest, 'get_initiator_group', + return_value=True) + @mock.patch.object(masking.VMAXMasking, '_find_initiator_group', + return_value=VMAXCommonData.initiatorgroup_name_i) + def test_verify_initiator_group_from_masking_view2( + self, mock_find_ig, mock_get_ig, mock_delete_ig, mock_delete_mv): + mock_delete_mv.side_effect = [None, Exception] + self.mask._verify_initiator_group_from_masking_view( + self.data.array, self.maskingviewdict['maskingview_name'], + self.maskingviewdict, 'OS-Wrong-Host-I-IG', + self.data.storagegroup_name_i, self.data.port_group_name_i, + self.extra_specs) + mock_delete_mv.assert_called() + _, found_ig_from_connector = ( + self.mask._verify_initiator_group_from_masking_view( + self.data.array, self.maskingviewdict['maskingview_name'], + self.maskingviewdict, 'OS-Wrong-Host-I-IG', + self.data.storagegroup_name_i, self.data.port_group_name_i, + self.extra_specs)) + self.assertEqual(self.data.initiatorgroup_name_i, + found_ig_from_connector) + + @mock.patch.object(rest.VMAXRest, 'create_initiator_group') + def test_create_initiator_group(self, mock_create_ig): + initiator_names = self.mask.find_initiator_names(self.data.connector) + ret_init_group_name = self.mask._create_initiator_group( + self.data.array, self.data.initiatorgroup_name_i, initiator_names, + self.extra_specs) + self.assertEqual(self.data.initiatorgroup_name_i, ret_init_group_name) + + @mock.patch.object(masking.VMAXMasking, + '_last_volume_delete_initiator_group') + def test_check_ig_rollback(self, mock_last_volume): + with mock.patch.object(masking.VMAXMasking, '_find_initiator_group', + side_effect=[ + None, 'FAKE-I-IG', + self.data.initiatorgroup_name_i]): + for x in range(0, 2): + self.mask._check_ig_rollback(self.data.array, + self.data.initiatorgroup_name_i, + self.data.connector) + mock_last_volume.assert_not_called() + self.mask._check_ig_rollback( + self.data.array, self.data.initiatorgroup_name_i, + self.data.connector) + mock_last_volume.assert_called() + + @mock.patch.object(masking.VMAXMasking, '_cleanup_deletion') + @mock.patch.object(masking.VMAXMasking, + 'return_volume_to_default_storage_group') + def test_remove_and_reset_members(self, mock_ret_to_sg, mock_cleanup): + self.mask.remove_and_reset_members(self.data.array, self.device_id, + self.volume_name, self.extra_specs, + reset=False) + mock_ret_to_sg.assert_not_called() + self.mask.remove_and_reset_members(self.data.array, self.device_id, + self.volume_name, self.extra_specs) + mock_ret_to_sg.assert_called_once() + + @mock.patch.object(rest.VMAXRest, 'get_storage_groups_from_volume', + return_value=[VMAXCommonData.storagegroup_name_i]) + @mock.patch.object(masking.VMAXMasking, 'remove_volume_from_sg') + def test_cleanup_deletion(self, mock_remove_vol, mock_get_sg): + self.mask._cleanup_deletion(self.data.array, self.device_id, + self.volume_name, self.extra_specs) + mock_get_sg.assert_called_once() + + @mock.patch.object(masking.VMAXMasking, '_last_vol_in_sg') + @mock.patch.object(masking.VMAXMasking, '_multiple_vols_in_sg') + def test_remove_volume_from_sg(self, mock_multiple_vols, mock_last_vol): + with mock.patch.object( + rest.VMAXRest, 'get_masking_views_from_storage_group', + return_value=None): + with mock.patch.object( + rest.VMAXRest, 'get_num_vols_in_sg', + side_effect=[2, 1]): + self.mask.remove_volume_from_sg( + self.data.array, self.device_id, self.volume_name, + self.data.defaultstoragegroup_name, self.extra_specs) + mock_last_vol.assert_not_called() + self.mask.remove_volume_from_sg( + self.data.array, self.device_id, self.volume_name, + self.data.defaultstoragegroup_name, self.extra_specs) + mock_last_vol.assert_called() + + @mock.patch.object(masking.VMAXMasking, '_last_vol_in_sg') + @mock.patch.object(masking.VMAXMasking, '_multiple_vols_in_sg') + def test_remove_volume_from_sg_2(self, mock_multiple_vols, mock_last_vol): + with mock.patch.object( + rest.VMAXRest, 'is_volume_in_storagegroup', + return_value=True): + with mock.patch.object( + rest.VMAXRest, 'get_masking_views_from_storage_group', + return_value=[self.data.masking_view_name_i]): + with mock.patch.object( + rest.VMAXRest, 'get_num_vols_in_sg', + side_effect=[2, 1]): + self.mask.remove_volume_from_sg( + self.data.array, self.device_id, self.volume_name, + self.data.storagegroup_name_i, self.extra_specs) + mock_last_vol.assert_not_called() + self.mask.remove_volume_from_sg( + self.data.array, self.device_id, self.volume_name, + self.data.storagegroup_name_i, self.extra_specs) + mock_last_vol.assert_called() + + @mock.patch.object(masking.VMAXMasking, '_last_vol_masking_views', + return_value=True) + @mock.patch.object(masking.VMAXMasking, '_last_vol_no_masking_views', + return_value=True) + def test_last_vol_in_sg(self, mock_no_mv, mock_mv): + mv_list = [self.data.masking_view_name_i, + self.data.masking_view_name_f] + with mock.patch.object(rest.VMAXRest, + 'get_masking_views_from_storage_group', + side_effect=[mv_list, []]): + for x in range(0, 2): + self.mask._last_vol_in_sg( + self.data.array, self.device_id, self.volume_name, + self.data.storagegroup_name_i, self.extra_specs) + self.assertEqual(1, mock_mv.call_count) + self.assertEqual(1, mock_no_mv.call_count) + + @mock.patch.object(masking.VMAXMasking, '_remove_last_vol_and_delete_sg') + @mock.patch.object(masking.VMAXMasking, '_delete_cascaded_storage_groups') + @mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg', + side_effect=[1, 3]) + @mock.patch.object(rest.VMAXRest, 'delete_storage_group') + @mock.patch.object(masking.VMAXMasking, 'get_parent_sg_from_child', + side_effect=[None, 'parent_sg_name', 'parent_sg_name']) + def test_last_vol_no_masking_views( + self, mock_get_parent, mock_delete, mock_num_vols, + mock_delete_casc, mock_remove): + for x in range(0, 3): + self.mask._last_vol_no_masking_views( + self.data.array, self.data.storagegroup_name_i, + self.device_id, self.volume_name, self.extra_specs) + self.assertEqual(1, mock_delete.call_count) + self.assertEqual(1, mock_delete_casc.call_count) + self.assertEqual(1, mock_remove.call_count) + + @mock.patch.object(masking.VMAXMasking, '_remove_last_vol_and_delete_sg') + @mock.patch.object(masking.VMAXMasking, '_delete_mv_ig_and_sg') + @mock.patch.object(masking.VMAXMasking, '_get_num_vols_from_mv', + side_effect=[(1, 'parent_name'), (3, 'parent_name')]) + def test_last_vol_masking_views( + self, mock_num_vols, mock_delete_all, mock_remove): + for x in range(0, 2): + self.mask._last_vol_masking_views( + self.data.array, self.data.storagegroup_name_i, + [self.data.masking_view_name_i], self.device_id, + self.volume_name, self.extra_specs) + self.assertEqual(1, mock_delete_all.call_count) + self.assertEqual(1, mock_remove.call_count) + + @mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg') + @mock.patch.object(masking.VMAXMasking, '_remove_vol_from_storage_group') + def test_multiple_vols_in_sg(self, mock_remove_vol, mock_get_volumes): + self.mask._multiple_vols_in_sg( + self.data.array, self.device_id, self.data.storagegroup_name_i, + self.volume_name, self.extra_specs) + mock_get_volumes.assert_called_once() + + @mock.patch.object(rest.VMAXRest, 'get_element_from_masking_view') + @mock.patch.object(masking.VMAXMasking, '_last_volume_delete_masking_view') + @mock.patch.object(masking.VMAXMasking, + '_last_volume_delete_initiator_group') + @mock.patch.object(masking.VMAXMasking, '_delete_cascaded_storage_groups') + def test_delete_mv_ig_and_sg(self, mock_delete_sg, mock_delete_ig, + mock_delete_mv, mock_get_element): + self.mask._delete_mv_ig_and_sg( + self.data.array, self.data.masking_view_name_i, + self.data.storagegroup_name_i, self.data.parent_sg_i) + mock_delete_sg.assert_called_once() + + @mock.patch.object(rest.VMAXRest, 'delete_masking_view') + def test_last_volume_delete_masking_view(self, mock_delete_mv): + self.mask._last_volume_delete_masking_view( + self.data.array, self.data.masking_view_name_i) + mock_delete_mv.assert_called_once() + + @mock.patch.object(masking.VMAXMasking, + 'get_or_create_default_storage_group') + @mock.patch.object(masking.VMAXMasking, 'add_volume_to_storage_group') + def test_return_volume_to_default_storage_group(self, mock_add_sg, + mock_get_sg): + self.mask.return_volume_to_default_storage_group( + self.data.array, self.device_id, self.volume_name, + self.extra_specs) + mock_add_sg.assert_called_once() + + @mock.patch.object(provision.VMAXProvision, 'create_storage_group') + def test_get_or_create_default_storage_group(self, mock_create_sg): + with mock.patch.object( + rest.VMAXRest, 'get_vmax_default_storage_group', + return_value=(None, self.data.storagegroup_name_i)): + storage_group_name = self.mask.get_or_create_default_storage_group( + self.data.array, self.data.srp, self.data.slo, + self.data.workload, self.extra_specs) + self.assertEqual(self.data.storagegroup_name_i, storage_group_name) + with mock.patch.object( + rest.VMAXRest, 'get_vmax_default_storage_group', + return_value=("test_sg", self.data.storagegroup_name_i)): + with mock.patch.object( + rest.VMAXRest, 'get_masking_views_from_storage_group', + return_value=self.data.masking_view_name_i): + self.assertRaises( + exception.VolumeBackendAPIException, + self.mask.get_or_create_default_storage_group, + self.data.array, self.data.srp, self.data.slo, + self.data.workload, self.extra_specs) + + @mock.patch.object(rest.VMAXRest, 'delete_storage_group') + @mock.patch.object(masking.VMAXMasking, '_remove_vol_from_storage_group') + def test_remove_last_vol_and_delete_sg(self, mock_delete_sg, mock_vol_sg): + self.mask._remove_last_vol_and_delete_sg( + self.data.array, self.device_id, self.volume_name, + self.data.storagegroup_name_i, self.extra_specs) + mock_delete_sg.assert_called_once() + + @mock.patch.object(rest.VMAXRest, 'delete_initiator_group') + def test_last_volume_delete_initiator_group(self, mock_delete_ig): + self.mask._last_volume_delete_initiator_group( + self.data.array, self.data.initiatorgroup_name_f, 'Wrong_Host') + mock_delete_ig.assert_not_called() + mv_list = [self.data.masking_view_name_i, + self.data.masking_view_name_f] + with mock.patch.object(rest.VMAXRest, + 'get_masking_views_by_initiator_group', + side_effect=[mv_list, []]): + self.mask._last_volume_delete_initiator_group( + self.data.array, self.data.initiatorgroup_name_i, + self.data.connector['host']) + mock_delete_ig.assert_not_called() + self.mask._last_volume_delete_initiator_group( + self.data.array, self.data.initiatorgroup_name_i, + self.data.connector['host']) + mock_delete_ig.assert_called_once() + + def test_populate_masking_dict_init_check_false(self): + extra_specs = self.data.extra_specs + connector = self.data.connector + with mock.patch.object(self.driver, '_get_initiator_check_flag', + return_value=False): + masking_view_dict = self.driver._populate_masking_dict( + self.data.test_volume, connector, extra_specs) + self.assertFalse(masking_view_dict['initiator_check']) + + def test_populate_masking_dict_init_check_true(self): + extra_specs = self.data.extra_specs + connector = self.data.connector + with mock.patch.object(self.driver, '_get_initiator_check_flag', + return_value=True): + masking_view_dict = self.driver._populate_masking_dict( + self.data.test_volume, connector, extra_specs) + self.assertTrue(masking_view_dict['initiator_check']) + + def test_check_existing_initiator_group_verify_true(self): + mv_dict = deepcopy(self.data.masking_view_dict) + mv_dict['initiator_check'] = True + with mock.patch.object( + rest.VMAXRest, 'get_element_from_masking_view', + return_value=VMAXCommonData.initiatorgroup_name_f): + with mock.patch.object( + self.mask, '_verify_initiator_group_from_masking_view', + return_value=(True, self.data.initiatorgroup_name_f)): + self.mask._check_existing_initiator_group( + self.data.array, self.data.masking_view_name_f, + mv_dict, self.data.storagegroup_name_f, + self.data.port_group_name_f, self.data.extra_specs) + (self.mask._verify_initiator_group_from_masking_view. + assert_called_once_with( + self.data.array, self.data.masking_view_name_f, + mv_dict, self.data.initiatorgroup_name_f, + self.data.storagegroup_name_f, + self.data.port_group_name_f, self.data.extra_specs)) + + @mock.patch.object(masking.VMAXMasking, 'add_child_sg_to_parent_sg', + side_effect=[ + None, exception.VolumeBackendAPIException]) + @mock.patch.object(rest.VMAXRest, 'is_child_sg_in_parent_sg', + side_effect=[True, False, False]) + def test_check_add_child_sg_to_parent_sg(self, mock_is_child, mock_add): + for x in range(0, 3): + message = self.mask._check_add_child_sg_to_parent_sg( + self.data.array, self.data.storagegroup_name_i, + self.data.parent_sg_i, self.data.extra_specs) + self.assertIsNotNone(message) + + @mock.patch.object(rest.VMAXRest, 'add_child_sg_to_parent_sg') + @mock.patch.object(rest.VMAXRest, 'is_child_sg_in_parent_sg', + side_effect=[True, False]) + def test_add_child_sg_to_parent_sg(self, mock_is_child, mock_add): + for x in range(0, 2): + self.mask.add_child_sg_to_parent_sg( + self.data.array, self.data.storagegroup_name_i, + self.data.parent_sg_i, self.data.extra_specs) + self.assertEqual(1, mock_add.call_count) + + def test_get_parent_sg_from_child(self): + with mock.patch.object(self.driver.rest, 'get_storage_group', + side_effect=[None, self.data.sg_details[1]]): + sg_name = self.mask.get_parent_sg_from_child( + self.data.array, self.data.storagegroup_name_i) + self.assertIsNone(sg_name) + sg_name2 = self.mask.get_parent_sg_from_child( + self.data.array, self.data.storagegroup_name_f) + self.assertEqual(self.data.parent_sg_f, sg_name2) + + @mock.patch.object(rest.VMAXRest, 'get_element_from_masking_view', + return_value='parent_sg') + @mock.patch.object(rest.VMAXRest, 'get_num_vols_in_sg', + return_value=2) + def test_get_num_vols_from_mv(self, mock_num, mock_element): + num_vols, sg = self.mask._get_num_vols_from_mv( + self.data.array, self.data.masking_view_name_f) + self.assertEqual(2, num_vols) + + @mock.patch.object(rest.VMAXRest, 'delete_storage_group') + def test_delete_cascaded(self, mock_delete): + self.mask._delete_cascaded_storage_groups( + self.data.array, self.data.masking_view_name_f, + self.data.parent_sg_f) + self.assertEqual(2, mock_delete.call_count) diff --git a/cinder/volume/drivers/dell_emc/vmax/common.py b/cinder/volume/drivers/dell_emc/vmax/common.py index 98cca8187bf..fda7e237331 100644 --- a/cinder/volume/drivers/dell_emc/vmax/common.py +++ b/cinder/volume/drivers/dell_emc/vmax/common.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. +# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,167 +15,107 @@ import ast import os.path +import sys from oslo_config import cfg from oslo_log import log as logging -from oslo_utils import units -import re import six -import uuid from cinder import exception from cinder.i18n import _ -import cinder.objects.consistencygroup as cg_obj -from cinder.objects import fields -import cinder.objects.group as group_obj -from cinder import utils as cinder_utils -from cinder.volume.drivers.dell_emc.vmax import fast -from cinder.volume.drivers.dell_emc.vmax import https from cinder.volume.drivers.dell_emc.vmax import masking from cinder.volume.drivers.dell_emc.vmax import provision -from cinder.volume.drivers.dell_emc.vmax import provision_v3 +from cinder.volume.drivers.dell_emc.vmax import rest from cinder.volume.drivers.dell_emc.vmax import utils -from cinder.volume import utils as volume_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF -try: - import pywbem - pywbemAvailable = True -except ImportError: - pywbemAvailable = False - -CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_emc_config.xml' -CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_emc_config_' +CINDER_EMC_CONFIG_FILE = '/etc/cinder/cinder_dell_emc_config.xml' +CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_dell_emc_config_' CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml' BACKENDNAME = 'volume_backend_name' -PREFIXBACKENDNAME = 'capabilities:volume_backend_name' -PORTGROUPNAME = 'portgroupname' -EMC_ROOT = 'root/emc' -POOL = 'storagetype:pool' -ARRAY = 'storagetype:array' -FASTPOLICY = 'storagetype:fastpolicy' -COMPOSITETYPE = 'storagetype:compositetype' -MULTI_POOL_SUPPORT = 'MultiPoolSupport' -STRIPECOUNT = 'storagetype:stripecount' -MEMBERCOUNT = 'storagetype:membercount' -STRIPED = 'striped' -CONCATENATED = 'concatenated' -SMI_VERSION_8 = 800 -# V3 -SLO = 'storagetype:slo' -WORKLOAD = 'storagetype:workload' -INTERVAL = 'storagetype:interval' -RETRIES = 'storagetype:retries' -ISV3 = 'isV3' -TRUNCATE_5 = 5 -TRUNCATE_27 = 27 -SNAPVX = 7 -DISSOLVE_SNAPVX = 9 -CREATE_NEW_TARGET = 2 -SNAPVX_REPLICATION_TYPE = 6 -# Replication -IS_RE = 'replication_enabled' -REPLICATION_DISABLED = fields.ReplicationStatus.DISABLED -REPLICATION_ENABLED = fields.ReplicationStatus.ENABLED -REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER -FAILOVER_ERROR = fields.ReplicationStatus.FAILOVER_ERROR -REPLICATION_ERROR = fields.ReplicationStatus.ERROR -SUSPEND_SRDF = 22 -DETACH_SRDF = 8 -MIRROR_SYNC_TYPE = 6 -emc_opts = [ - cfg.StrOpt('cinder_emc_config_file', +vmax_opts = [ + cfg.StrOpt('cinder_dell_emc_config_file', default=CINDER_EMC_CONFIG_FILE, help='Use this file for cinder emc plugin ' - 'config data'), - cfg.StrOpt('multi_pool_support', - default=False, + 'config data.'), + cfg.StrOpt('intervals', + default=3, help='Use this value to specify ' - 'multi-pool support for VMAX3'), + 'length of intervals in seconds.'), + cfg.StrOpt('retries', + default=200, + help='Use this value to specify ' + 'number of retries.'), cfg.StrOpt('initiator_check', default=False, help='Use this value to enable ' - 'the initiator_check')] + 'the initiator_check.')] -CONF.register_opts(emc_opts) +CONF.register_opts(vmax_opts) class VMAXCommon(object): - """Common class for SMI-S based EMC volume drivers. + """Common class for Rest based VMAX volume drivers. - This common class is for EMC volume drivers based on SMI-S. - It supports VNX and VMAX arrays. + This common class is for Dell EMC VMAX volume drivers + based on UniSphere Rest API. + It supports VMAX 3 and VMAX All Flash arrays. """ - VERSION = "2.0.0" + VERSION = "3.0.0" - stats = {'driver_version': '1.0', + stats = {'driver_version': '3.0', 'free_capacity_gb': 0, 'reserved_percentage': 0, 'storage_protocol': None, 'total_capacity_gb': 0, 'vendor_name': 'Dell EMC', - 'volume_backend_name': None, - 'replication_enabled': False, - 'replication_targets': None} + 'volume_backend_name': None} pool_info = {'backend_name': None, 'config_file': None, 'arrays_info': {}, 'max_over_subscription_ratio': None, - 'reserved_percentage': None, - 'replication_enabled': False - } + 'reserved_percentage': 0} - def __init__(self, prtcl, version, configuration=None, - active_backend_id=None): - - if not pywbemAvailable: - LOG.info("Module PyWBEM not installed. Install PyWBEM using the " - "python-pywbem package.") + def __init__(self, prtcl, version, configuration=None): self.protocol = prtcl self.configuration = configuration - self.configuration.append_config_values(emc_opts) - self.conn = None - self.url = None - self.user = None - self.passwd = None - self.masking = masking.VMAXMasking(prtcl) - self.utils = utils.VMAXUtils(prtcl) - self.fast = fast.VMAXFast(prtcl) - self.provision = provision.VMAXProvision(prtcl) - self.provisionv3 = provision_v3.VMAXProvisionV3(prtcl) + self.configuration.append_config_values(vmax_opts) + self.rest = rest.VMAXRest() + self.utils = utils.VMAXUtils() + self.masking = masking.VMAXMasking(prtcl, self.rest) + self.provision = provision.VMAXProvision(self.rest) self.version = version - # replication - self.replication_enabled = False - self.extendReplicatedVolume = False - self.active_backend_id = active_backend_id - self.failover = False - self._get_replication_info() - self.multiPoolSupportEnabled = False - self.initiatorCheck = False self._gather_info() def _gather_info(self): """Gather the relevant information for update_volume_stats.""" - if hasattr(self.configuration, 'cinder_emc_config_file'): + self._get_attributes_from_config() + array_info = self.utils.parse_file_to_get_array_map( + self.pool_info['config_file']) + self.rest.set_rest_credentials(array_info) + finalarrayinfolist = self._get_slo_workload_combinations( + array_info) + self.pool_info['arrays_info'] = finalarrayinfolist + + def _get_attributes_from_config(self): + """Get relevent details from configuration file.""" + if hasattr(self.configuration, 'cinder_dell_emc_config_file'): self.pool_info['config_file'] = ( - self.configuration.cinder_emc_config_file) + self.configuration.cinder_dell_emc_config_file) else: self.pool_info['config_file'] = ( - self.configuration.safe_get('cinder_emc_config_file')) - if hasattr(self.configuration, 'multi_pool_support'): - tempMultiPoolSupported = cinder_utils.get_bool_param( - 'multi_pool_support', self.configuration) - if tempMultiPoolSupported: - self.multiPoolSupportEnabled = True + self.configuration.safe_get('cinder_dell_emc_config_file')) + self.intervals = self.configuration.safe_get('intervals') + self.retries = self.configuration.safe_get('retries') self.pool_info['backend_name'] = ( self.configuration.safe_get('volume_backend_name')) self.pool_info['max_over_subscription_ratio'] = ( @@ -188,319 +128,178 @@ class VMAXCommon(object): {'emcConfigFileName': self.pool_info['config_file'], 'backendName': self.pool_info['backend_name']}) - arrayInfoList = self.utils.parse_file_to_get_array_map( - self.pool_info['config_file']) - # Assuming that there is a single array info object always - # Check if Multi pool support is enabled - if self.multiPoolSupportEnabled is False: - self.pool_info['arrays_info'] = arrayInfoList - else: - finalArrayInfoList = self._get_slo_workload_combinations( - arrayInfoList) - self.pool_info['arrays_info'] = finalArrayInfoList + def _get_initiator_check_flag(self): + """Reads the configuration for initator_check flag. - def _get_replication_info(self): - """Gather replication information, if provided.""" - self.rep_config = None - self.replication_targets = None - if hasattr(self.configuration, 'replication_device'): - self.rep_devices = self.configuration.safe_get( - 'replication_device') - if self.rep_devices and len(self.rep_devices) == 1: - self.rep_config = self.utils.get_replication_config( - self.rep_devices) - if self.rep_config: - self.replication_targets = [self.rep_config['array']] - if self.active_backend_id == self.rep_config['array']: - self.failover = True - self.extendReplicatedVolume = self.rep_config['allow_extend'] - # use self.replication_enabled for update_volume_stats - self.replication_enabled = True - LOG.debug("The replication configuration is %(rep_config)s.", - {'rep_config': self.rep_config}) - elif self.rep_devices and len(self.rep_devices) > 1: - LOG.error("More than one replication target is configured. " - "EMC VMAX only suppports a single replication " - "target. Replication will not be enabled.") + :returns: flag + """ + conf_string = (self.configuration.safe_get('initiator_check')) + ret_val = False + string_true = "True" + if conf_string: + if conf_string.lower() == string_true.lower(): + ret_val = True + return ret_val - def _get_slo_workload_combinations(self, arrayInfoList): + def _get_slo_workload_combinations(self, array_info): """Method to query the array for SLO and Workloads. - Takes the arrayInfoList object and generates a set which has + Takes the arrayinfolist object and generates a set which has all available SLO & Workload combinations - - :param arrayInfoList: - :return: finalArrayInfoList - :raises Exception: + :param array_info: the array information + :returns: finalarrayinfolist + :raises VolumeBackendAPIException: """ try: - sloWorkloadSet = set() - # Pattern for extracting the SLO & Workload String - pattern = re.compile("^-S[A-Z]+") - for arrayInfo in arrayInfoList: - self._set_ecom_credentials(arrayInfo) - isV3 = self.utils.isArrayV3(self.conn, - arrayInfo['SerialNumber']) - # Only if the array is VMAX3 - if isV3: - poolInstanceName, storageSystemStr = ( - self._find_pool_in_array(arrayInfo['SerialNumber'], - arrayInfo['PoolName'], isV3)) - # Get the pool capability - storagePoolCapability = ( - self.provisionv3.get_storage_pool_capability( - self.conn, poolInstanceName)) - # Get the pool settings - storagePoolSettings = self.conn.AssociatorNames( - storagePoolCapability, - ResultClass='CIM_storageSetting') - for storagePoolSetting in storagePoolSettings: - settingInstanceID = storagePoolSetting['InstanceID'] - settingInstanceDetails = settingInstanceID.split('+') - sloWorkloadString = settingInstanceDetails[2] - if pattern.match(sloWorkloadString): - length = len(sloWorkloadString) - tempSloWorkloadString = ( - sloWorkloadString[2:length - 1]) - sloWorkloadSet.add(tempSloWorkloadString) - # Assuming that there is always a single arrayInfo object - finalArrayInfoList = [] - for sloWorkload in sloWorkloadSet: + array = array_info['SerialNumber'] + # Get the srp slo & workload settings + slo_settings = self.rest.get_slo_list(array) + # Remove 'None' from the list (so a 'None' slo is not combined + # with a workload, which is not permitted) + slo_settings = [x for x in slo_settings + if x.lower() not in ['none']] + workload_settings = self.rest.get_workload_settings(array) + workload_settings.append("None") + slo_workload_set = set( + ['%(slo)s:%(workload)s' % {'slo': slo, 'workload': workload} + for slo in slo_settings for workload in workload_settings]) + # Add back in in the only allowed 'None' slo/ workload combination + slo_workload_set.add('None:None') + + finalarrayinfolist = [] + for sloWorkload in slo_workload_set: # Doing a shallow copy will work as we are modifying # only strings - temparrayInfo = arrayInfoList[0].copy() + temparray_info = array_info.copy() slo, workload = sloWorkload.split(':') - # Check if we got SLO and workload from the set (from array) - # The previous check was done by mistake against the value - # from XML file - if slo: - temparrayInfo['SLO'] = slo - if workload: - temparrayInfo['Workload'] = workload - finalArrayInfoList.append(temparrayInfo) - except Exception: - exceptionMessage = (_( - "Unable to get the SLO/Workload combinations from the array")) - LOG.exception(exceptionMessage) + temparray_info['SLO'] = slo + temparray_info['Workload'] = workload + finalarrayinfolist.append(temparray_info) + except Exception as e: + exception_message = (_( + "Unable to get the SLO/Workload combinations from the array. " + "Exception received was %(e)s") % {'e': six.text_type(e)}) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( - data=exceptionMessage) - return finalArrayInfoList + data=exception_message) + return finalarrayinfolist def create_volume(self, volume): - """Creates a EMC(VMAX) volume from a pre-existing storage pool. + """Creates a EMC(VMAX) volume from a storage group. - For a concatenated compositeType: - If the volume size is over 240GB then a composite is created - EMCNumberOfMembers > 1, otherwise it defaults to a non composite - - For a striped compositeType: - The user must supply an extra spec to determine how many metas - will make up the striped volume. If the meta size is greater - than 240GB an error is returned to the user. Otherwise the - EMCNumberOfMembers is what the user specifies. - - :param volume: volume Object - :returns: model_update, dict + :param volume: volume object + :returns: model_update - dict """ model_update = {} - volumeSize = int(self.utils.convert_gb_to_bits(volume['size'])) - volumeId = volume['id'] - extraSpecs = self._initial_setup(volume) - self.conn = self._get_ecom_connection() + volume_id = volume.id + extra_specs = self._initial_setup(volume) - # VolumeName naming convention is 'OS-UUID'. - volumeName = self.utils.get_volume_element_name(volumeId) + # Volume_name naming convention is 'OS-UUID'. + volume_name = self.utils.get_volume_element_name(volume_id) + volume_size = volume.size - if extraSpecs[ISV3]: - rc, volumeDict, storageSystemName = ( - self._create_v3_volume(volume, volumeName, volumeSize, - extraSpecs)) - else: - rc, volumeDict, storageSystemName = ( - self._create_composite_volume(volume, volumeName, volumeSize, - extraSpecs)) - - # set-up volume replication, if enabled (V3 only) - if self.utils.is_replication_enabled(extraSpecs): - try: - replication_status, replication_driver_data = ( - self.setup_volume_replication( - self.conn, volume, volumeDict, extraSpecs)) - except Exception: - self._cleanup_replication_source(self.conn, volumeName, - volumeDict, extraSpecs) - raise - model_update.update( - {'replication_status': replication_status, - 'replication_driver_data': six.text_type( - replication_driver_data)}) - - # If volume is created as part of a consistency group. - if 'consistencygroup_id' in volume and volume['consistencygroup_id']: - volumeInstance = self.utils.find_volume_instance( - self.conn, volumeDict, volumeName) - replicationService = ( - self.utils.find_replication_service(self.conn, - storageSystemName)) - cgInstanceName, cgName = ( - self._find_consistency_group( - replicationService, - six.text_type(volume['consistencygroup_id']))) - self.provision.add_volume_to_cg(self.conn, - replicationService, - cgInstanceName, - volumeInstance.path, - cgName, - volumeName, - extraSpecs) - - LOG.info("Leaving create_volume: %(volumeName)s " - "Return code: %(rc)lu " - "volume dict: %(name)s.", - {'volumeName': volumeName, - 'rc': rc, - 'name': volumeDict}) - # Adding version information - volumeDict['version'] = self.version + volume_dict = (self._create_volume( + volume_name, volume_size, extra_specs)) + LOG.info("Leaving create_volume: %(name)s. Volume dict: %(dict)s.", + {'name': volume_name, 'dict': volume_dict}) model_update.update( - {'provider_location': six.text_type(volumeDict)}) - + {'provider_location': six.text_type(volume_dict)}) return model_update def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. - For VMAX, replace snapshot with clone. - - :param volume: volume Object + :param volume: volume object :param snapshot: snapshot object - :returns: model_update, dict + :returns: model_update :raises VolumeBackendAPIException: """ LOG.debug("Entering create_volume_from_snapshot.") - extraSpecs = self._initial_setup(snapshot, host=volume['host']) model_update = {} - self.conn = self._get_ecom_connection() - snapshotInstance = self._find_lun(snapshot) + extra_specs = self._initial_setup(snapshot) - self._sync_check(snapshotInstance, snapshot['name'], extraSpecs) + clone_dict = self._create_cloned_volume( + volume, snapshot, extra_specs, is_snapshot=False, + from_snapvx=True) - cloneDict = self._create_cloned_volume(volume, snapshot, - extraSpecs, False) - # set-up volume replication, if enabled - if self.utils.is_replication_enabled(extraSpecs): - try: - replication_status, replication_driver_data = ( - self.setup_volume_replication( - self.conn, volume, cloneDict, extraSpecs)) - except Exception: - self._cleanup_replication_source(self.conn, snapshot['name'], - cloneDict, extraSpecs) - raise - model_update.update( - {'replication_status': replication_status, - 'replication_driver_data': six.text_type( - replication_driver_data)}) - - cloneDict['version'] = self.version model_update.update( - {'provider_location': six.text_type(cloneDict)}) - + {'provider_location': six.text_type(clone_dict)}) return model_update - def create_cloned_volume(self, cloneVolume, sourceVolume): + def create_cloned_volume(self, clone_volume, source_volume): """Creates a clone of the specified volume. - :param cloneVolume: clone volume Object - :param sourceVolume: volume object + :param clone_volume: clone volume Object + :param source_volume: volume object :returns: model_update, dict """ model_update = {} - extraSpecs = self._initial_setup(sourceVolume) - cloneDict = self._create_cloned_volume(cloneVolume, sourceVolume, - extraSpecs, False) + extra_specs = self._initial_setup(source_volume) + clone_dict = self._create_cloned_volume(clone_volume, source_volume, + extra_specs) - # set-up volume replication, if enabled - if self.utils.is_replication_enabled(extraSpecs): - try: - replication_status, replication_driver_data = ( - self.setup_volume_replication( - self.conn, cloneVolume, cloneDict, extraSpecs)) - except Exception: - self._cleanup_replication_source( - self.conn, cloneVolume['name'], cloneDict, extraSpecs) - raise - model_update.update( - {'replication_status': replication_status, - 'replication_driver_data': six.text_type( - replication_driver_data)}) - - cloneDict['version'] = self.version model_update.update( - {'provider_location': six.text_type(cloneDict)}) - + {'provider_location': six.text_type(clone_dict)}) return model_update def delete_volume(self, volume): """Deletes a EMC(VMAX) volume. - :param volume: volume Object + :param volume: volume object """ LOG.info("Deleting Volume: %(volume)s", - {'volume': volume['name']}) - - rc, volumeName = self._delete_volume(volume) - LOG.info("Leaving delete_volume: %(volumename)s Return code: " - "%(rc)lu.", - {'volumename': volumeName, - 'rc': rc}) + {'volume': volume.name}) + volume_name = self._delete_volume(volume) + LOG.info("Leaving delete_volume: %(volume_name)s.", + {'volume_name': volume_name}) def create_snapshot(self, snapshot, volume): """Creates a snapshot. - For VMAX, replace snapshot with clone. - :param snapshot: snapshot object :param volume: volume Object to create snapshot from :returns: dict -- the cloned volume dictionary """ - extraSpecs = self._initial_setup(volume) - return self._create_cloned_volume(snapshot, volume, extraSpecs, True) + extra_specs = self._initial_setup(volume) + snapshot_dict = self._create_cloned_volume( + snapshot, volume, extra_specs, is_snapshot=True) + model_update = {'provider_location': six.text_type(snapshot_dict)} + return model_update def delete_snapshot(self, snapshot, volume): """Deletes a snapshot. :param snapshot: snapshot object - :param volume: volume Object to create snapshot from + :param volume: source volume """ LOG.info("Delete Snapshot: %(snapshotName)s.", - {'snapshotName': snapshot['name']}) - self._delete_snapshot(snapshot, volume['host']) + {'snapshotName': snapshot.name}) + extra_specs = self._initial_setup(volume) + sourcedevice_id, snap_name = self._parse_snap_info( + extra_specs[utils.ARRAY], snapshot) + if not sourcedevice_id or not snap_name: + LOG.info("No snapshot found on the array") + else: + self.provision.delete_volume_snap_check_for_links( + extra_specs[utils.ARRAY], snap_name, + sourcedevice_id, extra_specs) + LOG.info("Leaving delete_snapshot: %(ssname)s.", + {'ssname': snap_name}) - def _remove_members(self, controllerConfigService, - volumeInstance, connector, extraSpecs): + def _remove_members(self, array, volume, device_id, extra_specs): """This method unmaps a volume from a host. - Removes volume from the Device Masking Group that belongs to - a Masking View. - Check if fast policy is in the extra specs. If it isn't we do - not need to do any thing for FAST. - Assume that isTieringPolicySupported is False unless the FAST - policy is in the extra specs and tiering is enabled on the array. - - :param controllerConfigService: instance name of - ControllerConfigurationService - :param volumeInstance: volume Object - :param connector: the connector object - :param extraSpecs: extra specifications - :returns: storageGroupInstanceName + Removes volume from the storage group that belongs to a masking view. + :param array: the array serial number + :param volume: volume object + :param device_id: the VMAX volume device id + :param extra_specs: extra specifications """ - volumeName = volumeInstance['ElementName'] - LOG.debug("Detaching volume %s.", volumeName) + volume_name = volume.name + LOG.debug("Detaching volume %s.", volume_name) return self.masking.remove_and_reset_members( - self.conn, controllerConfigService, volumeInstance, - volumeName, extraSpecs, connector) + array, device_id, volume_name, extra_specs, True) def _unmap_lun(self, volume, connector): """Unmaps a volume from the host. @@ -509,60 +308,22 @@ class VMAXCommon(object): :param connector: the connector Object :raises VolumeBackendAPIException: """ - extraSpecs = self._initial_setup(volume) - if self.utils.is_volume_failed_over(volume): - extraSpecs = self._get_replication_extraSpecs( - extraSpecs, self.rep_config) - volumename = volume['name'] + device_info = {} + extra_specs = self._initial_setup(volume) + volume_name = volume.name LOG.info("Unmap volume: %(volume)s.", - {'volume': volumename}) - - device_info, __, __ = self.find_device_number( - volume, connector['host']) + {'volume': volume_name}) + if connector is not None: + device_info = self.find_host_lun_id( + volume, connector['host'], extra_specs) if 'hostlunid' not in device_info: LOG.info("Volume %s is not mapped. No volume to unmap.", - volumename) + volume_name) return - vol_instance = self._find_lun(volume) - storage_system = vol_instance['SystemName'] - - livemigrationrecord = self.utils.get_live_migration_record(volume) - if livemigrationrecord: - self.utils.delete_live_migration_record(volume) - - if livemigrationrecord and self._is_volume_multiple_masking_views( - vol_instance): - return - - configservice = self.utils.find_controller_configuration_service( - self.conn, storage_system) - if configservice is None: - exception_message = (_("Cannot find Controller Configuration " - "Service for storage system " - "%(storage_system)s.") - % {'storage_system': storage_system}) - raise exception.VolumeBackendAPIException(data=exception_message) - - self._remove_members(configservice, vol_instance, connector, - extraSpecs) - - def _is_volume_multiple_masking_views(self, vol_instance): - """Check if volume is in more than one MV. - - :param vol_instance: the volume instance - :returns: boolean - """ - storageGroupInstanceNames = ( - self.masking.get_associated_masking_groups_from_device( - self.conn, vol_instance.path)) - - for storageGroupInstanceName in storageGroupInstanceNames: - mvInstanceNames = self.masking.get_masking_view_from_storage_group( - self.conn, storageGroupInstanceName) - if len(mvInstanceNames) > 1: - return True - return False + device_id = self._find_device_on_array(volume, extra_specs) + array = extra_specs[utils.ARRAY] + self._remove_members(array, volume, device_id, extra_specs) def initialize_connection(self, volume, connector): """Initializes the connection and returns device and connection info. @@ -577,471 +338,245 @@ class VMAXCommon(object): .. code-block:: none - initiatorGroupName = OS---IG + initiator_group_name = OS---IG e.g OS-myShortHost-I-IG - storageGroupName = OS----SG - e.g OS-myShortHost-SATA_BRONZ1-I-SG - portGroupName = OS--PG The portGroupName will come from + storage_group_name = OS----SG + e.g OS-myShortHost-SRP_1-I-SG + port_group_name = OS--PG The port_group_name will come from the EMC configuration xml file. These are precreated. If the portGroup does not exist then an error will be returned to the user - maskingView = OS----MV - e.g OS-myShortHost-SATA_BRONZ1-I-MV + maskingview_name = OS----MV + e.g OS-myShortHost-SRP_1-I-MV :param volume: volume Object :param connector: the connector Object - :returns: dict -- deviceInfoDict - device information dict + :returns: dict -- device_info_dict - device information dict :raises VolumeBackendAPIException: """ - portGroupName = None - extraSpecs = self._initial_setup(volume) + extra_specs = self._initial_setup(volume) is_multipath = connector.get('multipath', False) - volumeName = volume['name'] + volume_name = volume.name LOG.info("Initialize connection: %(volume)s.", - {'volume': volumeName}) - self.conn = self._get_ecom_connection() + {'volume': volume_name}) + device_info_dict = self.find_host_lun_id( + volume, connector['host'], extra_specs) + masking_view_dict = self._populate_masking_dict( + volume, connector, extra_specs) - if self.utils.is_volume_failed_over(volume): - extraSpecs = self._get_replication_extraSpecs( - extraSpecs, self.rep_config) - deviceInfoDict, isLiveMigration, sourceInfoDict = ( - self._wrap_find_device_number( - volume, connector['host'])) - maskingViewDict = self._populate_masking_dict( - volume, connector, extraSpecs) - - if ('hostlunid' in deviceInfoDict and - deviceInfoDict['hostlunid'] is not None): - deviceNumber = deviceInfoDict['hostlunid'] + if ('hostlunid' in device_info_dict and + device_info_dict['hostlunid'] is not None): + hostlunid = device_info_dict['hostlunid'] LOG.info("Volume %(volume)s is already mapped. " - "The device number is %(deviceNumber)s.", - {'volume': volumeName, - 'deviceNumber': deviceNumber}) - self.utils.insert_live_migration_record(volume) - # Special case, we still need to get the iscsi ip address. - portGroupName = ( - self._get_correct_port_group( - deviceInfoDict, maskingViewDict['storageSystemName'])) + "The hostlunid is %(hostlunid)s.", + {'volume': volume_name, + 'hostlunid': hostlunid}) + port_group_name = ( + self.get_port_group_from_masking_view( + extra_specs[utils.ARRAY], + device_info_dict['maskingview'])) + else: - if isLiveMigration: - self.utils.insert_live_migration_record(volume) - maskingViewDict['storageGroupInstanceName'] = ( - self._get_storage_group_from_source(sourceInfoDict)) - maskingViewDict['portGroupInstanceName'] = ( - self._get_port_group_from_source(sourceInfoDict)) - deviceInfoDict, portGroupName = self._attach_volume( - volume, connector, extraSpecs, maskingViewDict, True) - else: - deviceInfoDict, portGroupName = ( - self._attach_volume( - volume, connector, extraSpecs, maskingViewDict)) - + device_info_dict, port_group_name = ( + self._attach_volume( + volume, connector, extra_specs, masking_view_dict)) if self.protocol.lower() == 'iscsi': - deviceInfoDict['ip_and_iqn'] = ( - self._find_ip_protocol_endpoints( - self.conn, deviceInfoDict['storagesystem'], - portGroupName)) - deviceInfoDict['is_multipath'] = is_multipath + device_info_dict['ip_and_iqn'] = ( + self._find_ip_and_iqns( + extra_specs[utils.ARRAY], port_group_name)) + device_info_dict['is_multipath'] = is_multipath + return device_info_dict - return deviceInfoDict - - def _attach_volume(self, volume, connector, extraSpecs, - maskingViewDict, isLiveMigration=False): + def _attach_volume(self, volume, connector, extra_specs, + masking_view_dict): """Attach a volume to a host. - If live migration is being undertaken then the volume - remains attached to the source host. - - :params volume: the volume object - :params connector: the connector object - :param extraSpecs: extra specifications - :param maskingViewDict: masking view information - :param isLiveMigration: boolean, can be None - :returns: dict -- deviceInfoDict + :param volume: the volume object + :param connector: the connector object + :param extra_specs: extra specifications + :param masking_view_dict: masking view information + :returns: dict -- device_info_dict String -- port group name - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException """ - volumeName = volume['name'] - if isLiveMigration: - maskingViewDict['isLiveMigration'] = True - else: - maskingViewDict['isLiveMigration'] = False + volume_name = volume.name - rollbackDict = self.masking.setup_masking_view( - self.conn, maskingViewDict, extraSpecs) + rollback_dict = self.masking.setup_masking_view( + masking_view_dict[utils.ARRAY], + masking_view_dict, extra_specs) # Find host lun id again after the volume is exported to the host. - deviceInfoDict, __, __ = self.find_device_number( - volume, connector['host']) - if 'hostlunid' not in deviceInfoDict: + device_info_dict = self.find_host_lun_id(volume, connector['host'], + extra_specs) + if 'hostlunid' not in device_info_dict: # Did not successfully attach to host, # so a rollback for FAST is required. - LOG.error("Error Attaching volume %(vol)s.", - {'vol': volumeName}) - if ((rollbackDict['fastPolicyName'] is not None) or - (rollbackDict['isV3'] is not None)): - (self.masking._check_if_rollback_action_for_masking_required( - self.conn, rollbackDict)) - livemigrationrecord = self.utils.get_live_migration_record(volume) - if livemigrationrecord: - self.utils.delete_live_migration_record(volume) + LOG.error("Error Attaching volume %(vol)s. " + "Cannot retrieve hostlunid. ", + {'vol': volume_name}) + self.masking.check_if_rollback_action_for_masking_required( + masking_view_dict[utils.ARRAY], + masking_view_dict[utils.DEVICE_ID], + rollback_dict) exception_message = (_("Error Attaching volume %(vol)s.") - % {'vol': volumeName}) + % {'vol': volume_name}) raise exception.VolumeBackendAPIException( data=exception_message) - return deviceInfoDict, rollbackDict['pgGroupName'] - - def _is_same_host(self, connector, deviceInfoDict): - """Check if the host is the same. - - Check if the host to attach to is the same host - that is already attached. This is necessary for - live migration. - - :params connector: the connector object - :params deviceInfoDict: the device information dictionary - :returns: boolean -- True if the host is the same, False otherwise. - """ - if 'host' in connector: - currentHost = connector['host'] - if ('maskingview' in deviceInfoDict and - deviceInfoDict['maskingview'] is not None): - if currentHost in deviceInfoDict['maskingview']: - return True - return False - - def _get_correct_port_group(self, deviceInfoDict, storageSystemName): - """Get the portgroup name from the existing masking view. - - :params deviceInfoDict: the device info dictionary - :params storageSystemName: storage system name - :returns: String port group name - """ - if ('controller' in deviceInfoDict and - deviceInfoDict['controller'] is not None): - maskingViewInstanceName = deviceInfoDict['controller'] - try: - maskingViewInstance = ( - self.conn.GetInstance(maskingViewInstanceName)) - except Exception: - exception_message = (_("Unable to get the name of " - "the masking view.")) - raise exception.VolumeBackendAPIException( - data=exception_message) - - # Get the portgroup from masking view - portGroupInstanceName = ( - self.masking._get_port_group_from_masking_view( - self.conn, - maskingViewInstance['ElementName'], - storageSystemName)) - try: - portGroupInstance = ( - self.conn.GetInstance(portGroupInstanceName)) - portGroupName = ( - portGroupInstance['ElementName']) - except Exception: - exception_message = (_("Unable to get the name of " - "the portgroup.")) - raise exception.VolumeBackendAPIException( - data=exception_message) - else: - exception_message = (_("Cannot get the portgroup from " - "the masking view.")) - raise exception.VolumeBackendAPIException( - data=exception_message) - return portGroupName - - def _get_storage_group_from_source(self, deviceInfoDict): - """Get the storage group from the existing masking view. - - :params deviceInfoDict: the device info dictionary - :returns: storage group instance - """ - storageGroupInstanceName = None - if ('controller' in deviceInfoDict and - deviceInfoDict['controller'] is not None): - maskingViewInstanceName = deviceInfoDict['controller'] - - # Get the storage group from masking view - storageGroupInstanceName = ( - self.masking._get_storage_group_from_masking_view_instance( - self.conn, - maskingViewInstanceName)) - else: - exception_message = (_("Cannot get the storage group from " - "the masking view.")) - raise exception.VolumeBackendAPIException( - data=exception_message) - return storageGroupInstanceName - - def _get_port_group_from_source(self, deviceInfoDict): - """Get the port group from the existing masking view. - - :params deviceInfoDict: the device info dictionary - :returns: port group instance - """ - portGroupInstanceName = None - if ('controller' in deviceInfoDict and - deviceInfoDict['controller'] is not None): - maskingViewInstanceName = deviceInfoDict['controller'] - - # Get the port group from masking view - portGroupInstanceName = ( - self.masking.get_port_group_from_masking_view_instance( - self.conn, - maskingViewInstanceName)) - else: - exception_message = (_("Cannot get the port group from " - "the masking view.")) - raise exception.VolumeBackendAPIException( - data=exception_message) - return portGroupInstanceName - - def check_ig_instance_name(self, initiatorGroupInstanceName): - """Check if an initiator group instance is on the array. - - :param initiatorGroupInstanceName: initiator group instance name - :returns: initiator group name, or None if deleted - """ - return self.utils.check_ig_instance_name( - self.conn, initiatorGroupInstanceName) + return device_info_dict, rollback_dict['port_group_name'] def terminate_connection(self, volume, connector): """Disallow connection from connector. - :params volume: the volume Object - :params connector: the connector Object + :param volume: the volume Object + :param connector: the connector Object """ - volumename = volume['name'] + volume_name = volume.name LOG.info("Terminate connection: %(volume)s.", - {'volume': volumename}) - + {'volume': volume_name}) self._unmap_lun(volume, connector) - def extend_volume(self, volume, newSize): - """Extends an existing volume. - - Prequisites: - 1. The volume must be composite e.g StorageVolume.EMCIsComposite=True - 2. The volume can only be concatenated - e.g StorageExtent.IsConcatenated=True - - :params volume: the volume Object - :params newSize: the new size to increase the volume to - :returns: dict -- modifiedVolumeDict - the extended volume Object - :raises VolumeBackendAPIException: - """ - originalVolumeSize = volume['size'] - volumeName = volume['name'] - extraSpecs = self._initial_setup(volume) - self.conn = self._get_ecom_connection() - volumeInstance = self._find_lun(volume) - if volumeInstance is None: - exceptionMessage = (_("Cannot find Volume: %(volumename)s. " - "Extend operation. Exiting....") - % {'volumename': volumeName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - return self._extend_volume( - volume, volumeInstance, volumeName, newSize, - originalVolumeSize, extraSpecs) - - def _extend_volume( - self, volume, volumeInstance, volumeName, newSize, - originalVolumeSize, extraSpecs): + def extend_volume(self, volume, new_size): """Extends an existing volume. :param volume: the volume Object - :param volumeInstance: the volume instance - :param volumeName: the volume name - :param newSize: the new size to increase the volume to - :param originalVolumeSize: - :param extraSpecs: extra specifications - :return: dict -- modifiedVolumeDict - the extended volume Object + :param new_size: the new size to increase the volume to + :returns: dict -- modifiedVolumeDict - the extended volume Object :raises VolumeBackendAPIException: """ - if int(originalVolumeSize) > int(newSize): - exceptionMessage = (_( - "Your original size: %(originalVolumeSize)s GB is greater " - "than: %(newSize)s GB. Only Extend is supported. Exiting...") - % {'originalVolumeSize': originalVolumeSize, - 'newSize': newSize}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) + original_vol_size = volume.size + volume_name = volume.name + extra_specs = self._initial_setup(volume) + device_id = self._find_device_on_array(volume, extra_specs) + array = extra_specs[utils.ARRAY] + # check if volume is part of an on-going clone operation + self._sync_check(array, device_id, volume_name, extra_specs) + if device_id is None: + exception_message = (_("Cannot find Volume: %(volume_name)s. " + "Extend operation. Exiting....") + % {'volume_name': volume_name}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + __, snapvx_src, __ = self.rest.is_vol_in_rep_session(array, device_id) + if snapvx_src: + exception_message = ( + _("The volume: %(volume)s is a snapshot source. Extending a " + "volume with snapVx snapshots is not supported. Exiting...") + % {'volume': volume_name}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) - additionalVolumeSize = six.text_type( - int(newSize) - int(originalVolumeSize)) - additionalVolumeSize = self.utils.convert_gb_to_bits( - additionalVolumeSize) + if int(original_vol_size) > int(new_size): + exception_message = (_( + "Your original size: %(original_vol_size)s GB is greater " + "than: %(new_size)s GB. Only Extend is supported. Exiting...") + % {'original_vol_size': original_vol_size, + 'new_size': new_size}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + LOG.info("Extending volume %(volume)s to %(new_size)d GBs", + {'volume': volume_name, + 'new_size': int(new_size)}) + self.provision.extend_volume(array, device_id, new_size, extra_specs) - if extraSpecs[ISV3]: - if self.utils.is_replication_enabled(extraSpecs): - # extra logic required if volume is replicated - rc, modifiedVolumeDict = self.extend_volume_is_replicated( - volume, volumeInstance, volumeName, newSize, - extraSpecs) - else: - rc, modifiedVolumeDict = self._extend_v3_volume( - volumeInstance, volumeName, newSize, extraSpecs) - else: - # This is V2. - rc, modifiedVolumeDict = self._extend_composite_volume( - volumeInstance, volumeName, newSize, additionalVolumeSize, - extraSpecs) - # Check the occupied space of the new extended volume. - extendedVolumeInstance = self.utils.find_volume_instance( - self.conn, modifiedVolumeDict, volumeName) - extendedVolumeSize = self.utils.get_volume_size( - self.conn, extendedVolumeInstance) - LOG.debug( - "The actual volume size of the extended volume: %(volumeName)s " - "is %(volumeSize)s.", - {'volumeName': volumeName, - 'volumeSize': extendedVolumeSize}) - - # If the requested size and the actual size don't - # tally throw an exception. - newSizeBits = self.utils.convert_gb_to_bits(newSize) - diffVolumeSize = self.utils.compare_size( - newSizeBits, extendedVolumeSize) - if diffVolumeSize != 0: - exceptionMessage = (_( - "The requested size : %(requestedSize)s is not the same as " - "resulting size: %(resultSize)s.") - % {'requestedSize': newSizeBits, - 'resultSize': extendedVolumeSize}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - LOG.debug( - "Leaving extend_volume: %(volumeName)s. " - "Return code: %(rc)lu, " - "volume dict: %(name)s.", - {'volumeName': volumeName, - 'rc': rc, - 'name': modifiedVolumeDict}) - - return modifiedVolumeDict + LOG.debug("Leaving extend_volume: %(volume_name)s. ", + {'volume_name': volume_name}) def update_volume_stats(self): """Retrieve stats info.""" pools = [] - # Dictionary to hold the VMAX3 arrays for which the SRP details - # have already been queried + # Dictionary to hold the arrays for which the SRP details + # have already been queried. # This only applies to the arrays for which WLP is not enabled arrays = {} - backendName = self.pool_info['backend_name'] + wlp_enabled = False + total_capacity_gb = 0 + free_capacity_gb = 0 + provisioned_capacity_gb = 0 + location_info = None + backend_name = self.pool_info['backend_name'] max_oversubscription_ratio = ( self.pool_info['max_over_subscription_ratio']) - reservedPercentage = self.pool_info['reserved_percentage'] + reserved_percentage = self.pool_info['reserved_percentage'] array_max_over_subscription = None array_reserve_percent = None - for arrayInfo in self.pool_info['arrays_info']: - alreadyQueried = False - self._set_ecom_credentials(arrayInfo) - # Check what type of array it is - isV3 = self.utils.isArrayV3(self.conn, - arrayInfo['SerialNumber']) - if isV3: - if self.failover: - arrayInfo = self.get_secondary_stats_info( - self.rep_config, arrayInfo) - # Report only the SLO name in the pool name for - # backward compatibility - if self.multiPoolSupportEnabled is False: - (location_info, total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb, - array_reserve_percent, - wlpEnabled) = self._update_srp_stats(arrayInfo) - poolName = ("%(slo)s+%(poolName)s+%(array)s" - % {'slo': arrayInfo['SLO'], - 'poolName': arrayInfo['PoolName'], - 'array': arrayInfo['SerialNumber']}) - else: - # Add both SLO & Workload name in the pool name - # Query the SRP only once if WLP is not enabled - # Only insert the array details in the dict once - if arrayInfo['SerialNumber'] not in arrays: - (location_info, total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb, - array_reserve_percent, - wlpEnabled) = self._update_srp_stats(arrayInfo) - else: - alreadyQueried = True - poolName = ("%(slo)s+%(workload)s+%(poolName)s+%(array)s" - % {'slo': arrayInfo['SLO'], - 'workload': arrayInfo['Workload'], - 'poolName': arrayInfo['PoolName'], - 'array': arrayInfo['SerialNumber']}) - if wlpEnabled is False: - arrays[arrayInfo['SerialNumber']] = ( - [total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb, array_reserve_percent]) - else: - # This is V2 + array_info_list = self.pool_info['arrays_info'] + already_queried = False + for array_info in array_info_list: + # Add both SLO & Workload name in the pool name + # Query the SRP only once if WLP is not enabled + # Only insert the array details in the dict once + self.rest.set_rest_credentials(array_info) + if array_info['SerialNumber'] not in arrays: (location_info, total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb, array_max_over_subscription) = ( - self._update_pool_stats(backendName, arrayInfo)) - poolName = ("%(poolName)s+%(array)s" - % {'poolName': arrayInfo['PoolName'], - 'array': arrayInfo['SerialNumber']}) + provisioned_capacity_gb, + array_reserve_percent, + wlp_enabled) = self._update_srp_stats(array_info) + else: + already_queried = True + pool_name = ("%(slo)s+%(workload)s+%(srpName)s+%(array)s" + % {'slo': array_info['SLO'], + 'workload': array_info['Workload'], + 'srpName': array_info['srpName'], + 'array': array_info['SerialNumber']}) + if wlp_enabled is False: + arrays[array_info['SerialNumber']] = ( + [total_capacity_gb, free_capacity_gb, + provisioned_capacity_gb, array_reserve_percent]) - if alreadyQueried and self.multiPoolSupportEnabled: - # The dictionary will only have one key per VMAX3 + if already_queried: + # The dictionary will only have one key per VMAX # Construct the location info temp_location_info = ( - ("%(arrayName)s#%(poolName)s#%(slo)s#%(workload)s" - % {'arrayName': arrayInfo['SerialNumber'], - 'poolName': arrayInfo['PoolName'], - 'slo': arrayInfo['SLO'], - 'workload': arrayInfo['Workload']})) - pool = {'pool_name': poolName, + ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" + % {'arrayName': array_info['SerialNumber'], + 'srpName': array_info['srpName'], + 'slo': array_info['SLO'], + 'workload': array_info['Workload']})) + pool = {'pool_name': pool_name, 'total_capacity_gb': - arrays[arrayInfo['SerialNumber']][0], + arrays[array_info['SerialNumber']][0], 'free_capacity_gb': - arrays[arrayInfo['SerialNumber']][1], + arrays[array_info['SerialNumber']][1], 'provisioned_capacity_gb': - arrays[arrayInfo['SerialNumber']][2], - 'QoS_support': True, + arrays[array_info['SerialNumber']][2], + 'QoS_support': False, 'location_info': temp_location_info, - 'consistencygroup_support': True, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'max_over_subscription_ratio': max_oversubscription_ratio, - 'replication_enabled': self.replication_enabled - } - if ( - arrays[arrayInfo['SerialNumber']][3] and - (arrays[arrayInfo['SerialNumber']][3] > - reservedPercentage)): - pool['reserved_percentage'] = ( - arrays[arrayInfo['SerialNumber']][3]) - else: - pool['reserved_percentage'] = reservedPercentage + 'reserved_percentage': reserved_percentage} + if arrays[array_info['SerialNumber']][3]: + if reserved_percentage: + if (arrays[array_info['SerialNumber']][3] > + reserved_percentage): + pool['reserved_percentage'] = ( + arrays[array_info['SerialNumber']][3]) + else: + pool['reserved_percentage'] = ( + arrays[array_info['SerialNumber']][3]) else: - pool = {'pool_name': poolName, + pool = {'pool_name': pool_name, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, 'QoS_support': False, 'location_info': location_info, - 'consistencygroup_support': True, + 'consistencygroup_support': False, 'thin_provisioning_support': True, 'thick_provisioning_support': False, + 'consistent_group_snapshot_enabled': False, 'max_over_subscription_ratio': max_oversubscription_ratio, - 'replication_enabled': self.replication_enabled - } - if ( - array_reserve_percent and - (array_reserve_percent > reservedPercentage)): - pool['reserved_percentage'] = array_reserve_percent - else: - pool['reserved_percentage'] = reservedPercentage + 'reserved_percentage': reserved_percentage} + if array_reserve_percent: + if isinstance(reserved_percentage, int): + if array_reserve_percent > reserved_percentage: + pool['reserved_percentage'] = array_reserve_percent + else: + pool['reserved_percentage'] = array_reserve_percent if array_max_over_subscription: pool['max_over_subscription_ratio'] = ( @@ -1053,23 +588,21 @@ class VMAXCommon(object): data = {'vendor_name': "Dell EMC", 'driver_version': self.version, 'storage_protocol': 'unknown', - 'volume_backend_name': self.pool_info['backend_name'] or + 'volume_backend_name': backend_name or self.__class__.__name__, # Use zero capacities here so we always use a pool. 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'reserved_percentage': 0, - 'replication_enabled': self.replication_enabled, - 'replication_targets': self.replication_targets, 'pools': pools} return data - def _update_srp_stats(self, arrayInfo): + def _update_srp_stats(self, array_info): """Update SRP stats. - :param arrayInfo: array information + :param array_info: array information :returns: location_info :returns: totalManagedSpaceGbs :returns: remainingManagedSpaceGbs @@ -1077,606 +610,34 @@ class VMAXCommon(object): :returns: array_reserve_percent :returns: wlpEnabled """ - (totalManagedSpaceGbs, remainingManagedSpaceGbs, - provisionedManagedSpaceGbs, array_reserve_percent, wlpEnabled) = ( - self.provisionv3.get_srp_pool_stats(self.conn, arrayInfo)) + provisionedManagedSpaceGbs, array_reserve_percent, + wlpEnabled) = ( + self.provision.get_srp_pool_stats( + array_info['SerialNumber'], array_info)) - LOG.info( - "Capacity stats for SRP pool %(poolName)s on array " - "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " - "free_capacity_gb=%(free_capacity_gb)lu, " - "provisioned_capacity_gb=%(provisioned_capacity_gb)lu", - {'poolName': arrayInfo['PoolName'], - 'arrayName': arrayInfo['SerialNumber'], - 'total_capacity_gb': totalManagedSpaceGbs, - 'free_capacity_gb': remainingManagedSpaceGbs, - 'provisioned_capacity_gb': provisionedManagedSpaceGbs}) + LOG.info("Capacity stats for SRP pool %(srpName)s on array " + "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " + "free_capacity_gb=%(free_capacity_gb)lu, " + "provisioned_capacity_gb=%(provisioned_capacity_gb)lu", + {'srpName': array_info['srpName'], + 'arrayName': array_info['SerialNumber'], + 'total_capacity_gb': totalManagedSpaceGbs, + 'free_capacity_gb': remainingManagedSpaceGbs, + 'provisioned_capacity_gb': provisionedManagedSpaceGbs}) - location_info = ("%(arrayName)s#%(poolName)s#%(slo)s#%(workload)s" - % {'arrayName': arrayInfo['SerialNumber'], - 'poolName': arrayInfo['PoolName'], - 'slo': arrayInfo['SLO'], - 'workload': arrayInfo['Workload']}) + location_info = ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" + % {'arrayName': array_info['SerialNumber'], + 'srpName': array_info['srpName'], + 'slo': array_info['SLO'], + 'workload': array_info['Workload']}) return (location_info, totalManagedSpaceGbs, remainingManagedSpaceGbs, provisionedManagedSpaceGbs, array_reserve_percent, wlpEnabled) - def retype(self, ctxt, volume, new_type, diff, host): - """Migrate volume to another host using retype. - - :param ctxt: context - :param volume: the volume object including the volume_type_id - :param new_type: the new volume type. - :param diff: Unused parameter. - :param host: The host dict holding the relevant target(destination) - information - :returns: boolean -- True if retype succeeded, False if error - """ - - volumeName = volume['name'] - volumeStatus = volume['status'] - LOG.info("Migrating using retype Volume: %(volume)s.", - {'volume': volumeName}) - - extraSpecs = self._initial_setup(volume) - self.conn = self._get_ecom_connection() - - volumeInstance = self._find_lun(volume) - if volumeInstance is None: - LOG.error("Volume %(name)s not found on the array. " - "No volume to migrate using retype.", - {'name': volumeName}) - return False - - if extraSpecs[ISV3]: - if self.utils.is_replication_enabled(extraSpecs): - LOG.error("Volume %(name)s is replicated - " - "Replicated volumes are not eligible for " - "storage assisted retype. Host assisted " - "retype is supported.", - {'name': volumeName}) - return False - - return self._slo_workload_migration(volumeInstance, volume, host, - volumeName, volumeStatus, - new_type, extraSpecs) - else: - return self._pool_migration(volumeInstance, volume, host, - volumeName, volumeStatus, - extraSpecs[FASTPOLICY], - new_type, extraSpecs) - - def migrate_volume(self, ctxt, volume, host, new_type=None): - """Migrate volume to another host. - - :param ctxt: context - :param volume: the volume object including the volume_type_id - :param host: the host dict holding the relevant target(destination) - information - :param new_type: None - :returns: boolean -- Always returns True - :returns: dict -- Empty dict {} - """ - LOG.warning("The VMAX plugin only supports Retype. " - "If a pool based migration is necessary " - "this will happen on a Retype " - "From the command line: " - "cinder --os-volume-api-version 2 retype " - " --migration-policy on-demand") - return True, {} - - def _migrate_volume( - self, volume, volumeInstance, targetPoolName, - targetFastPolicyName, sourceFastPolicyName, extraSpecs, - new_type=None): - """Migrate volume to another host. - - :param volume: the volume object including the volume_type_id - :param volumeInstance: the volume instance - :param targetPoolName: the target poolName - :param targetFastPolicyName: the target FAST policy name, can be None - :param sourceFastPolicyName: the source FAST policy name, can be None - :param extraSpecs: extra specifications - :param new_type: None - :returns: boolean -- True/False - :returns: list -- empty list - """ - volumeName = volume['name'] - storageSystemName = volumeInstance['SystemName'] - - sourcePoolInstanceName = self.utils.get_assoc_pool_from_volume( - self.conn, volumeInstance.path) - - moved, rc = self._migrate_volume_from( - volume, volumeInstance, targetPoolName, sourceFastPolicyName, - extraSpecs) - - if moved is False and sourceFastPolicyName is not None: - # Return the volume to the default source fast policy storage - # group because the migrate was unsuccessful. - LOG.warning( - "Failed to migrate: %(volumeName)s from " - "default source storage group " - "for FAST policy: %(sourceFastPolicyName)s. " - "Attempting cleanup... ", - {'volumeName': volumeName, - 'sourceFastPolicyName': sourceFastPolicyName}) - if sourcePoolInstanceName == self.utils.get_assoc_pool_from_volume( - self.conn, volumeInstance.path): - self._migrate_cleanup(self.conn, volumeInstance, - storageSystemName, sourceFastPolicyName, - volumeName, extraSpecs) - else: - # Migrate was successful but still issues. - self._migrate_rollback( - self.conn, volumeInstance, storageSystemName, - sourceFastPolicyName, volumeName, sourcePoolInstanceName, - extraSpecs) - - return moved - - if targetFastPolicyName == 'None': - targetFastPolicyName = None - - if moved is True and targetFastPolicyName is not None: - if not self._migrate_volume_fast_target( - volumeInstance, storageSystemName, - targetFastPolicyName, volumeName, extraSpecs): - LOG.warning( - "Attempting a rollback of: %(volumeName)s to " - "original pool %(sourcePoolInstanceName)s.", - {'volumeName': volumeName, - 'sourcePoolInstanceName': sourcePoolInstanceName}) - self._migrate_rollback( - self.conn, volumeInstance, storageSystemName, - sourceFastPolicyName, volumeName, sourcePoolInstanceName, - extraSpecs) - - if rc == 0: - moved = True - - return moved - - def _migrate_rollback(self, conn, volumeInstance, - storageSystemName, sourceFastPolicyName, - volumeName, sourcePoolInstanceName, extraSpecs): - """Full rollback. - - Failed on final step on adding migrated volume to new target - default storage group for the target FAST policy. - - :param conn: connection info to ECOM - :param volumeInstance: the volume instance - :param storageSystemName: the storage system name - :param sourceFastPolicyName: the source FAST policy name - :param volumeName: the volume Name - :param sourcePoolInstanceName: the instance name of the source pool - :param extraSpecs: extra specifications - """ - - LOG.warning("_migrate_rollback on : %(volumeName)s.", - {'volumeName': volumeName}) - - storageRelocationService = self.utils.find_storage_relocation_service( - conn, storageSystemName) - - try: - self.provision.migrate_volume_to_storage_pool( - conn, storageRelocationService, volumeInstance.path, - sourcePoolInstanceName, extraSpecs) - except Exception: - LOG.error( - "Failed to return volume %(volumeName)s to " - "original storage pool. Please contact your system " - "administrator to return it to the correct location.", - {'volumeName': volumeName}) - - if sourceFastPolicyName is not None: - self.add_to_default_SG( - conn, volumeInstance, storageSystemName, sourceFastPolicyName, - volumeName, extraSpecs) - - def _migrate_cleanup(self, conn, volumeInstance, - storageSystemName, sourceFastPolicyName, - volumeName, extraSpecs): - """If the migrate fails, put volume back to source FAST SG. - - :param conn: connection info to ECOM - :param volumeInstance: the volume instance - :param storageSystemName: the storage system name - :param sourceFastPolicyName: the source FAST policy name - :param volumeName: the volume Name - :param extraSpecs: extra specifications - :returns: boolean -- True/False - """ - - LOG.warning("_migrate_cleanup on : %(volumeName)s.", - {'volumeName': volumeName}) - return_to_default = True - controllerConfigurationService = ( - self.utils.find_controller_configuration_service( - conn, storageSystemName)) - - # Check to see what SG it is in. - assocStorageGroupInstanceNames = ( - self.utils.get_storage_groups_from_volume(conn, - volumeInstance.path)) - # This is the SG it should be in. - defaultStorageGroupInstanceName = ( - self.fast.get_policy_default_storage_group( - conn, controllerConfigurationService, sourceFastPolicyName)) - - for assocStorageGroupInstanceName in assocStorageGroupInstanceNames: - # It is in the incorrect storage group. - if (assocStorageGroupInstanceName != - defaultStorageGroupInstanceName): - self.provision.remove_device_from_storage_group( - conn, controllerConfigurationService, - assocStorageGroupInstanceName, - volumeInstance.path, volumeName, extraSpecs) - else: - # The volume is already in the default. - return_to_default = False - if return_to_default: - self.add_to_default_SG( - conn, volumeInstance, storageSystemName, sourceFastPolicyName, - volumeName, extraSpecs) - return return_to_default - - def _migrate_volume_fast_target( - self, volumeInstance, storageSystemName, - targetFastPolicyName, volumeName, extraSpecs): - """If the target host is FAST enabled. - - If the target host is FAST enabled then we need to add it to the - default storage group for that policy. - - :param volumeInstance: the volume instance - :param storageSystemName: the storage system name - :param targetFastPolicyName: the target fast policy name - :param volumeName: the volume name - :param extraSpecs: extra specifications - :returns: boolean -- True/False - """ - falseRet = False - LOG.info( - "Adding volume: %(volumeName)s to default storage group " - "for FAST policy: %(fastPolicyName)s.", - {'volumeName': volumeName, - 'fastPolicyName': targetFastPolicyName}) - - controllerConfigurationService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystemName)) - - defaultStorageGroupInstanceName = ( - self.fast.get_or_create_default_storage_group( - self.conn, controllerConfigurationService, - targetFastPolicyName, volumeInstance, extraSpecs)) - if defaultStorageGroupInstanceName is None: - LOG.error( - "Unable to create or get default storage group for FAST policy" - ": %(fastPolicyName)s.", - {'fastPolicyName': targetFastPolicyName}) - - return falseRet - - defaultStorageGroupInstanceName = ( - self.fast.add_volume_to_default_storage_group_for_fast_policy( - self.conn, controllerConfigurationService, volumeInstance, - volumeName, targetFastPolicyName, extraSpecs)) - if defaultStorageGroupInstanceName is None: - LOG.error( - "Failed to verify that volume was added to storage group for " - "FAST policy: %(fastPolicyName)s.", - {'fastPolicyName': targetFastPolicyName}) - return falseRet - - return True - - def _migrate_volume_from(self, volume, volumeInstance, - targetPoolName, sourceFastPolicyName, - extraSpecs): - """Check FAST policies and migrate from source pool. - - :param volume: the volume object including the volume_type_id - :param volumeInstance: the volume instance - :param targetPoolName: the target poolName - :param sourceFastPolicyName: the source FAST policy name, can be None - :param extraSpecs: extra specifications - :returns: boolean -- True/False - :returns: int -- the return code from migrate operation - """ - falseRet = (False, -1) - volumeName = volume['name'] - storageSystemName = volumeInstance['SystemName'] - - LOG.debug("sourceFastPolicyName is : %(sourceFastPolicyName)s.", - {'sourceFastPolicyName': sourceFastPolicyName}) - - # If the source volume is FAST enabled it must first be removed - # from the default storage group for that policy. - if sourceFastPolicyName is not None: - self.remove_from_default_SG( - self.conn, volumeInstance, storageSystemName, - sourceFastPolicyName, volumeName, extraSpecs) - - # Migrate from one pool to another. - storageRelocationService = self.utils.find_storage_relocation_service( - self.conn, storageSystemName) - - targetPoolInstanceName = self.utils.get_pool_by_name( - self.conn, targetPoolName, storageSystemName) - if targetPoolInstanceName is None: - LOG.error( - "Error finding target pool instance name for pool: " - "%(targetPoolName)s.", - {'targetPoolName': targetPoolName}) - return falseRet - try: - rc = self.provision.migrate_volume_to_storage_pool( - self.conn, storageRelocationService, volumeInstance.path, - targetPoolInstanceName, extraSpecs) - except Exception: - # Rollback by deleting the volume if adding the volume to the - # default storage group were to fail. - LOG.exception( - "Error migrating volume: %(volumename)s. " - "to target pool %(targetPoolName)s.", - {'volumename': volumeName, - 'targetPoolName': targetPoolName}) - return falseRet - - # Check that the volume is now migrated to the correct storage pool, - # if it is terminate the migrate session. - foundPoolInstanceName = self.utils.get_assoc_pool_from_volume( - self.conn, volumeInstance.path) - - if (foundPoolInstanceName is None or - (foundPoolInstanceName['InstanceID'] != - targetPoolInstanceName['InstanceID'])): - LOG.error( - "Volume : %(volumeName)s. was not successfully migrated to " - "target pool %(targetPoolName)s.", - {'volumeName': volumeName, - 'targetPoolName': targetPoolName}) - return falseRet - - else: - LOG.debug("Terminating migration session on: %(volumeName)s.", - {'volumeName': volumeName}) - self.provision._terminate_migrate_session( - self.conn, volumeInstance.path, extraSpecs) - - if rc == 0: - moved = True - - return moved, rc - - def remove_from_default_SG( - self, conn, volumeInstance, storageSystemName, - sourceFastPolicyName, volumeName, extraSpecs): - """For FAST, remove volume from default storage group. - - :param conn: connection info to ECOM - :param volumeInstance: the volume instance - :param storageSystemName: the storage system name - :param sourceFastPolicyName: the source FAST policy name - :param volumeName: the volume Name - :param extraSpecs: extra specifications - :raises VolumeBackendAPIException: - """ - controllerConfigurationService = ( - self.utils.find_controller_configuration_service( - conn, storageSystemName)) - try: - defaultStorageGroupInstanceName = ( - self.masking.remove_device_from_default_storage_group( - conn, controllerConfigurationService, - volumeInstance.path, volumeName, sourceFastPolicyName, - extraSpecs)) - except Exception: - exceptionMessage = (_( - "Failed to remove: %(volumename)s. " - "from the default storage group for " - "FAST policy %(fastPolicyName)s.") - % {'volumename': volumeName, - 'fastPolicyName': sourceFastPolicyName}) - - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - if defaultStorageGroupInstanceName is None: - LOG.warning( - "The volume: %(volumename)s " - "was not first part of the default storage " - "group for FAST policy %(fastPolicyName)s.", - {'volumename': volumeName, - 'fastPolicyName': sourceFastPolicyName}) - - def add_to_default_SG( - self, conn, volumeInstance, storageSystemName, - targetFastPolicyName, volumeName, extraSpecs): - """For FAST, add volume to default storage group. - - :param conn: connection info to ECOM - :param volumeInstance: the volume instance - :param storageSystemName: the storage system name - :param targetFastPolicyName: the target FAST policy name - :param volumeName: the volume Name - :param extraSpecs: extra specifications - """ - controllerConfigurationService = ( - self.utils.find_controller_configuration_service( - conn, storageSystemName)) - assocDefaultStorageGroupName = ( - self.fast - .add_volume_to_default_storage_group_for_fast_policy( - conn, controllerConfigurationService, volumeInstance, - volumeName, targetFastPolicyName, extraSpecs)) - if assocDefaultStorageGroupName is None: - LOG.error( - "Failed to add %(volumeName)s " - "to default storage group for fast policy " - "%(fastPolicyName)s.", - {'volumeName': volumeName, - 'fastPolicyName': targetFastPolicyName}) - - def _is_valid_for_storage_assisted_migration_v3( - self, volumeInstanceName, host, sourceArraySerialNumber, - sourcePoolName, volumeName, volumeStatus, sgName, - doChangeCompression): - """Check if volume is suitable for storage assisted (pool) migration. - - :param volumeInstanceName: the volume instance id - :param host: the host object - :param sourceArraySerialNumber: the array serial number of - the original volume - :param sourcePoolName: the pool name of the original volume - :param volumeName: the name of the volume to be migrated - :param volumeStatus: the status of the volume - :param sgName: storage group name - :param doChangeCompression: do change compression - :returns: boolean -- True/False - :returns: string -- targetSlo - :returns: string -- targetWorkload - """ - falseRet = (False, None, None) - if 'location_info' not in host['capabilities']: - LOG.error('Error getting array, pool, SLO and workload.') - return falseRet - info = host['capabilities']['location_info'] - - LOG.debug("Location info is : %(info)s.", - {'info': info}) - try: - infoDetail = info.split('#') - targetArraySerialNumber = infoDetail[0] - targetPoolName = infoDetail[1] - targetSlo = infoDetail[2] - targetWorkload = infoDetail[3] - except KeyError: - LOG.error("Error parsing array, pool, SLO and workload.") - - if targetArraySerialNumber not in sourceArraySerialNumber: - LOG.error( - "The source array : %(sourceArraySerialNumber)s does not " - "match the target array: %(targetArraySerialNumber)s " - "skipping storage-assisted migration.", - {'sourceArraySerialNumber': sourceArraySerialNumber, - 'targetArraySerialNumber': targetArraySerialNumber}) - return falseRet - - if targetPoolName not in sourcePoolName: - LOG.error( - "Only SLO/workload migration within the same SRP Pool " - "is supported in this version " - "The source pool : %(sourcePoolName)s does not " - "match the target array: %(targetPoolName)s. " - "Skipping storage-assisted migration.", - {'sourcePoolName': sourcePoolName, - 'targetPoolName': targetPoolName}) - return falseRet - - foundStorageGroupInstanceName = ( - self.utils.get_storage_group_from_volume( - self.conn, volumeInstanceName, sgName)) - if foundStorageGroupInstanceName is None: - LOG.warning( - "Volume: %(volumeName)s is not currently " - "belonging to any storage group.", - {'volumeName': volumeName}) - - else: - storageGroupInstance = self.conn.GetInstance( - foundStorageGroupInstanceName) - emcFastSetting = self.utils._get_fast_settings_from_storage_group( - storageGroupInstance) - targetCombination = ("%(targetSlo)s+%(targetWorkload)s" - % {'targetSlo': targetSlo, - 'targetWorkload': targetWorkload}) - if targetCombination in emcFastSetting: - # Check if migration is from compression to non compression - # of vice versa - if not doChangeCompression: - LOG.error( - "No action required. Volume: %(volumeName)s is " - "already part of slo/workload combination: " - "%(targetCombination)s.", - {'volumeName': volumeName, - 'targetCombination': targetCombination}) - return falseRet - - return (True, targetSlo, targetWorkload) - - def _is_valid_for_storage_assisted_migration( - self, volumeInstanceName, host, sourceArraySerialNumber, - volumeName, volumeStatus): - """Check if volume is suitable for storage assisted (pool) migration. - - :param volumeInstanceName: the volume instance id - :param host: the host object - :param sourceArraySerialNumber: the array serial number of - the original volume - :param volumeName: the name of the volume to be migrated - :param volumeStatus: the status of the volume e.g - :returns: boolean -- True/False - :returns: string -- targetPool - :returns: string -- targetFastPolicy - """ - falseRet = (False, None, None) - if 'location_info' not in host['capabilities']: - LOG.error("Error getting target pool name and array.") - return falseRet - info = host['capabilities']['location_info'] - - LOG.debug("Location info is : %(info)s.", - {'info': info}) - try: - infoDetail = info.split('#') - targetArraySerialNumber = infoDetail[0] - targetPoolName = infoDetail[1] - targetFastPolicy = infoDetail[2] - except KeyError: - LOG.error( - "Error parsing target pool name, array, and fast policy.") - - if targetArraySerialNumber not in sourceArraySerialNumber: - LOG.error( - "The source array : %(sourceArraySerialNumber)s does not " - "match the target array: %(targetArraySerialNumber)s, " - "skipping storage-assisted migration.", - {'sourceArraySerialNumber': sourceArraySerialNumber, - 'targetArraySerialNumber': targetArraySerialNumber}) - return falseRet - - # Get the pool from the source array and check that is different - # to the pool in the target array. - assocPoolInstanceName = self.utils.get_assoc_pool_from_volume( - self.conn, volumeInstanceName) - assocPoolInstance = self.conn.GetInstance( - assocPoolInstanceName) - if assocPoolInstance['ElementName'] == targetPoolName: - LOG.error( - "No action required. Volume: %(volumeName)s is " - "already part of pool: %(pool)s.", - {'volumeName': volumeName, - 'pool': targetPoolName}) - return falseRet - - LOG.info("Volume status is: %s.", volumeStatus) - if (host['capabilities']['storage_protocol'] != self.protocol and - (volumeStatus != 'available' and volumeStatus != 'retyping')): - LOG.error( - "Only available volumes can be migrated between " - "different protocols.") - return falseRet - - return (True, targetPoolName, targetFastPolicy) - - def _set_config_file_and_get_extra_specs(self, volume, volumeTypeId=None): + def _set_config_file_and_get_extra_specs(self, volume, + volume_type_id=None): """Given the volume object get the associated volumetype. Given the volume object get the associated volumetype and the @@ -1684,3025 +645,849 @@ class VMAXCommon(object): Based on the name of the config group, register the config file :param volume: the volume object including the volume_type_id - :param volumeTypeId: Optional override of volume['volume_type_id'] + :param volume_type_id: Optional override of volume.volume_type_id :returns: dict -- the extra specs dict :returns: string -- configuration file """ - extraSpecs = self.utils.get_volumetype_extraspecs( - volume, volumeTypeId) - qosSpecs = self.utils.get_volumetype_qosspecs(volume, volumeTypeId) - configGroup = None + extra_specs = self.utils.get_volumetype_extra_specs( + volume, volume_type_id) + config_group = None # If there are no extra specs then the default case is assumed. - if extraSpecs: - configGroup = self.configuration.config_group - configurationFile = self._register_config_file_from_config_group( - configGroup) - self.multiPoolSupportEnabled = ( - self._get_multi_pool_support_enabled_flag()) - extraSpecs[MULTI_POOL_SUPPORT] = self.multiPoolSupportEnabled - if extraSpecs.get('replication_enabled') == ' True': - extraSpecs[IS_RE] = True - return extraSpecs, configurationFile, qosSpecs + if extra_specs: + config_group = self.configuration.config_group + config_file = self._register_config_file_from_config_group( + config_group) + return extra_specs, config_file - def _get_multi_pool_support_enabled_flag(self): - """Reads the configuration for multi pool support flag. - - :returns: MultiPoolSupportEnabled flag - """ - - confString = ( - self.configuration.safe_get('multi_pool_support')) - retVal = False - stringTrue = "True" - if confString: - if confString.lower() == stringTrue.lower(): - retVal = True - return retVal - - def _get_initiator_check_flag(self): - """Reads the configuration for initator_check flag. - - :returns: flag - """ - - confString = ( - self.configuration.safe_get('initiator_check')) - retVal = False - stringTrue = "True" - if confString: - if confString.lower() == stringTrue.lower(): - retVal = True - return retVal - - def _get_ecom_connection(self): - """Get the ecom connection. - - :returns: pywbem.WBEMConnection -- conn, the ecom connection - :raises VolumeBackendAPIException: - """ - ecomx509 = None - if self.ecomUseSSL: - if (self.configuration.safe_get('driver_client_cert_key') and - self.configuration.safe_get('driver_client_cert')): - ecomx509 = {"key_file": - self.configuration.safe_get( - 'driver_client_cert_key'), - "cert_file": - self.configuration.safe_get( - 'driver_client_cert')} - pywbem.cim_http.wbem_request = https.wbem_request - conn = pywbem.WBEMConnection( - self.url, - (self.user, self.passwd), - default_namespace='root/emc', - x509=ecomx509, - ca_certs=self.configuration.safe_get('driver_ssl_cert_path'), - no_verification=not self.configuration.safe_get( - 'driver_ssl_cert_verify')) - - else: - conn = pywbem.WBEMConnection( - self.url, - (self.user, self.passwd), - default_namespace='root/emc') - - if conn is None: - exception_message = (_("Cannot connect to ECOM server.")) - raise exception.VolumeBackendAPIException(data=exception_message) - - return conn - - def _find_pool_in_array(self, arrayStr, poolNameInStr, isV3): - """Find a pool based on the pool name on a given array. - - :param arrayStr: the array Serial number (String) - :param poolNameInStr: the name of the poolname (String) - :param isv3: True/False - :returns: foundPoolInstanceName - the CIM Instance Name of the Pool - :returns: string -- systemNameStr - :raises VolumeBackendAPIException: - """ - foundPoolInstanceName = None - systemNameStr = None - - storageSystemInstanceName = self.utils.find_storageSystem( - self.conn, arrayStr) - - if isV3: - foundPoolInstanceName, systemNameStr = ( - self.utils.get_pool_and_system_name_v3( - self.conn, storageSystemInstanceName, poolNameInStr)) - else: - foundPoolInstanceName, systemNameStr = ( - self.utils.get_pool_and_system_name_v2( - self.conn, storageSystemInstanceName, poolNameInStr)) - - if foundPoolInstanceName is None: - exceptionMessage = (_("Pool %(poolNameInStr)s is not found.") - % {'poolNameInStr': poolNameInStr}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - if systemNameStr is None: - exception_message = (_("Storage system not found for pool " - "%(poolNameInStr)s.") - % {'poolNameInStr': poolNameInStr}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - LOG.debug("Pool: %(pool)s SystemName: %(systemname)s.", - {'pool': foundPoolInstanceName, - 'systemname': systemNameStr}) - return foundPoolInstanceName, systemNameStr - - def _find_lun(self, volume): - """Given the volume get the instance from it. + def _find_device_on_array(self, volume, extra_specs): + """Given the volume get the VMAX device Id. :param volume: volume object - :returns: foundVolumeinstance + :param extra_specs: the extra Specs + :returns: array, device_id """ - foundVolumeinstance = None - targetVolName = None - volumename = volume['id'] + founddevice_id = None + volume_name = volume.id - loc = volume['provider_location'] - if self.conn is None: - self.conn = self._get_ecom_connection() + loc = volume.provider_location if isinstance(loc, six.string_types): name = ast.literal_eval(loc) - keys = name['keybindings'] - systemName = keys['SystemName'] - admin_metadata = {} - if 'admin_metadata' in volume: - admin_metadata = volume.admin_metadata - if 'targetVolumeName' in admin_metadata: - targetVolName = admin_metadata['targetVolumeName'] - prefix1 = 'SYMMETRIX+' - prefix2 = 'SYMMETRIX-+-' - smiversion = self.utils.get_smi_version(self.conn) - if smiversion > SMI_VERSION_8 and prefix1 in systemName: - keys['SystemName'] = systemName.replace(prefix1, prefix2) - name['keybindings'] = keys + array = extra_specs[utils.ARRAY] + device_id = name['device_id'] + element_name = self.utils.get_volume_element_name( + volume_name) + founddevice_id = self.rest.find_volume_device_id( + array, element_name) - instancename = self.utils.get_instance_name( - name['classname'], name['keybindings']) - LOG.debug("Volume instance name: %(in)s", - {'in': instancename}) # Allow for an external app to delete the volume. - try: - foundVolumeinstance = self.conn.GetInstance(instancename) - volumeElementName = (self.utils. - get_volume_element_name(volumename)) - if not (volumeElementName == - foundVolumeinstance['ElementName']): - # Check if it is a vol created as part of a clone group - if not (targetVolName == - foundVolumeinstance['ElementName']): - foundVolumeinstance = None - except Exception as e: - LOG.info("Exception in retrieving volume: %(e)s.", - {'e': e}) - foundVolumeinstance = None + if device_id and device_id != founddevice_id: + founddevice_id = None - if foundVolumeinstance is None: - LOG.debug("Volume %(volumename)s not found on the array.", - {'volumename': volumename}) + if founddevice_id is None: + LOG.debug("Volume %(volume_name)s not found on the array.", + {'volume_name': volume_name}) else: - LOG.debug("Volume name: %(volumename)s Volume instance: " - "%(foundVolumeinstance)s.", - {'volumename': volumename, - 'foundVolumeinstance': foundVolumeinstance}) + LOG.debug("Volume name: %(volume_name)s Volume device id: " + "%(founddevice_id)s.", + {'volume_name': volume_name, + 'founddevice_id': founddevice_id}) - return foundVolumeinstance + return founddevice_id - def _find_storage_sync_sv_sv(self, snapshot, volume, extraSpecs, - waitforsync=True): - """Find the storage synchronized name. - - :param snapshot: snapshot object - :param volume: volume object - :param extraSpecs: extra specifications - :param waitforsync: boolean -- Wait for Solutions Enabler sync. - :returns: string -- foundsyncname - :returns: string -- storage_system - """ - snapshotname = snapshot['name'] - volumename = volume['name'] - LOG.debug("Source: %(volumename)s Target: %(snapshotname)s.", - {'volumename': volumename, 'snapshotname': snapshotname}) - - snapshot_instance = self._find_lun(snapshot) - volume_instance = self._find_lun(volume) - storage_system = volume_instance['SystemName'] - classname = 'SE_StorageSynchronized_SV_SV' - bindings = {'SyncedElement': snapshot_instance.path, - 'SystemElement': volume_instance.path} - foundsyncname = self.utils.get_instance_name(classname, bindings) - - if foundsyncname is None: - LOG.debug( - "Source: %(volumename)s Target: %(snapshotname)s. " - "Storage Synchronized not found.", - {'volumename': volumename, - 'snapshotname': snapshotname}) - else: - LOG.debug("Storage system: %(storage_system)s. " - "Storage Synchronized instance: %(sync)s.", - {'storage_system': storage_system, - 'sync': foundsyncname}) - # Wait for SE_StorageSynchronized_SV_SV to be fully synced. - if waitforsync: - self.utils.wait_for_sync(self.conn, foundsyncname, - extraSpecs) - - return foundsyncname, storage_system - - def _find_initiator_names(self, connector): - foundinitiatornames = [] - iscsi = 'iscsi' - fc = 'fc' - name = 'initiator name' - if self.protocol.lower() == iscsi and connector['initiator']: - foundinitiatornames.append(connector['initiator']) - elif self.protocol.lower() == fc and connector['wwpns']: - for wwn in connector['wwpns']: - foundinitiatornames.append(wwn) - name = 'world wide port names' - - if foundinitiatornames is None or len(foundinitiatornames) == 0: - msg = (_("Error finding %s.") % name) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - LOG.debug("Found %(name)s: %(initiator)s.", - {'name': name, - 'initiator': foundinitiatornames}) - return foundinitiatornames - - def _wrap_find_device_number(self, volume, host): - return self.find_device_number(volume, host) - - def find_device_number(self, volume, host): - """Given the volume dict find a device number. - - Find a device number that a host can see - for a volume. + def find_host_lun_id(self, volume, host, extra_specs): + """Given the volume dict find the host lun id for a volume. :param volume: the volume dict :param host: host from connector + :param extra_specs: the extra specs :returns: dict -- the data dict """ - maskedvols = [] - data = {} - isLiveMigration = False - source_data = {} - foundController = None - foundNumDeviceNumber = None - foundMaskingViewName = None - volumeName = volume['name'] - volumeInstance = self._find_lun(volume) - storageSystemName = volumeInstance['SystemName'] - if not volumeInstance: - return data, isLiveMigration, source_data - - unitnames = self.conn.ReferenceNames( - volumeInstance.path, - ResultClass='CIM_ProtocolControllerForUnit') - - for unitname in unitnames: - controller = unitname['Antecedent'] - classname = controller['CreationClassName'] - index = classname.find('Symm_LunMaskingView') - if index > -1: - unitinstance = self.conn.GetInstance(unitname, - LocalOnly=False) - if unitinstance['DeviceNumber']: - numDeviceNumber = int(unitinstance['DeviceNumber'], 16) - else: - LOG.debug( - "Device number not found for volume " - "%(volumeName)s %(volumeInstance)s.", - {'volumeName': volumeName, - 'volumeInstance': volumeInstance.path}) - break - foundNumDeviceNumber = numDeviceNumber - foundController = controller - controllerInstance = self.conn.GetInstance(controller, - LocalOnly=False) - propertiesList = controllerInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'ElementName': - cimProperties = properties[1] - foundMaskingViewName = cimProperties.value - - devicedict = {'hostlunid': foundNumDeviceNumber, - 'storagesystem': storageSystemName, - 'maskingview': foundMaskingViewName, - 'controller': foundController} - maskedvols.append(devicedict) - - if not maskedvols: - LOG.debug( - "Device number not found for volume " - "%(volumeName)s %(volumeInstance)s.", - {'volumeName': volumeName, - 'volumeInstance': volumeInstance.path}) - else: + maskedvols = {} + volume_name = volume.name + device_id = self._find_device_on_array(volume, extra_specs) + if device_id: + array = extra_specs[utils.ARRAY] host = self.utils.get_host_short_name(host) - hoststr = ("-%(host)s-" - % {'host': host}) - for maskedvol in maskedvols: - if hoststr.lower() in maskedvol['maskingview'].lower(): - data = maskedvol - if not data: - if len(maskedvols) > 0: - source_data = maskedvols[0] - LOG.warning( - "Volume is masked but not to host %(host)s as is " - "expected. Assuming live migration.", - {'host': hoststr}) - isLiveMigration = True + # return only masking views for this host + maskingviews = self.get_masking_views_from_volume( + array, device_id, host) - LOG.debug("Device info: %(data)s.", {'data': data}) - return data, isLiveMigration, source_data + for maskingview in maskingviews: + host_lun_id = self.rest.find_mv_connections_for_vol( + array, maskingview, device_id) + if host_lun_id is not None: + devicedict = {'hostlunid': host_lun_id, + 'maskingview': maskingview, + 'array': array} + maskedvols = devicedict + if not maskedvols: + LOG.debug( + "Host lun id not found for volume: %(volume_name)s " + "with the device id: %(device_id)s.", + {'volume_name': volume_name, + 'device_id': device_id}) + else: + LOG.debug("Device info: %(maskedvols)s.", + {'maskedvols': maskedvols}) + else: + exception_message = (_("Cannot retrieve volume %(vol)s " + "from the array.") % {'vol': volume_name}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(exception_message) - def get_target_wwns_list(self, storage_system, volume, connector): - """Find target WWN list. + return maskedvols - :param storage_system: the storage system name - :param connector: the connector dict - :returns: list -- targetWwns, the target WWN list - :raises: VolumeBackendAPIException + def get_masking_views_from_volume(self, array, device_id, host): + """Retrieve masking view list for a volume. + + :param array: array serial number + :param device_id: the volume device id + :param host: the host + :return: masking view list """ - targetWwns = set() - try: - fc_targets = self.get_target_wwns_from_masking_view( - storage_system, volume, connector) - except Exception: - exception_message = _("Unable to get fc targets.") - raise exception.VolumeBackendAPIException( - data=exception_message) + LOG.debug("Getting masking views from volume") + maskingview_list = [] + short_host = self.utils.get_host_short_name(host) + storagegrouplist = self.rest.get_storage_groups_from_volume( + array, device_id) + for sg in storagegrouplist: + mvs = self.rest.get_masking_views_from_storage_group( + array, sg) + for mv in mvs: + if short_host.lower() in mv.lower(): + maskingview_list.append(mv) + return maskingview_list - LOG.debug("There are %(len)lu endpoints.", {'len': len(fc_targets)}) - for fc_target in fc_targets: - wwn = fc_target - # Add target wwn to the list if it is not already there. - targetWwns.add(wwn) - - if not targetWwns: - exception_message = (_( - "Unable to get target endpoints.")) - raise exception.VolumeBackendAPIException(data=exception_message) - - LOG.debug("Target WWNs: %(targetWwns)s.", - {'targetWwns': targetWwns}) - - return list(targetWwns) - - def _find_storage_hardwareids( - self, connector, hardwareIdManagementService): - """Find the storage hardware ID instances. - - :param connector: the connector dict - :param hardwareIdManagementService: the storage Hardware - management service - :returns: list -- the list of storage hardware ID instances - """ - foundHardwareIdList = [] - wwpns = self._find_initiator_names(connector) - - hardwareIdInstances = ( - self.utils.get_hardware_id_instances_from_array( - self.conn, hardwareIdManagementService)) - for hardwareIdInstance in hardwareIdInstances: - storageId = hardwareIdInstance['StorageID'] - for wwpn in wwpns: - if wwpn.lower() == storageId.lower(): - # Check that the found hardwareId has not been - # deleted. If it has, we don't want to add it to the list. - instance = self.utils.get_existing_instance( - self.conn, hardwareIdInstance.path) - if instance is None: - # HardwareId doesn't exist any more. Skip it. - break - foundHardwareIdList.append(hardwareIdInstance.path) - break - - LOG.debug("Storage Hardware IDs for %(wwpns)s is " - "%(foundInstances)s.", - {'wwpns': wwpns, - 'foundInstances': foundHardwareIdList}) - - return foundHardwareIdList - - def _register_config_file_from_config_group(self, configGroupName): + def _register_config_file_from_config_group(self, config_group_name): """Given the config group name register the file. - :param configGroupName: the config group name + :param config_group_name: the config group name :returns: string -- configurationFile - name of the configuration file + :raises VolumeBackendAPIException: """ - if configGroupName is None: + if config_group_name is None: return CINDER_EMC_CONFIG_FILE - if hasattr(self.configuration, 'cinder_emc_config_file'): - configurationFile = self.configuration.cinder_emc_config_file + if hasattr(self.configuration, 'cinder_dell_emc_config_file'): + config_file = self.configuration.cinder_dell_emc_config_file else: - configurationFile = ( + config_file = ( ("%(prefix)s%(configGroupName)s%(postfix)s" % {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX, - 'configGroupName': configGroupName, + 'configGroupName': config_group_name, 'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX})) # The file saved in self.configuration may not be the correct one, # double check. - if configGroupName not in configurationFile: - configurationFile = ( + if config_group_name not in config_file: + config_file = ( ("%(prefix)s%(configGroupName)s%(postfix)s" % {'prefix': CINDER_EMC_CONFIG_FILE_PREFIX, - 'configGroupName': configGroupName, + 'configGroupName': config_group_name, 'postfix': CINDER_EMC_CONFIG_FILE_POSTFIX})) - if os.path.isfile(configurationFile): + if os.path.isfile(config_file): LOG.debug("Configuration file : %(configurationFile)s exists.", - {'configurationFile': configurationFile}) + {'configurationFile': config_file}) else: - exceptionMessage = (_( + exception_message = (_( "Configuration file %(configurationFile)s does not exist.") - % {'configurationFile': configurationFile}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) + % {'configurationFile': config_file}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) - return configurationFile + return config_file - def _set_ecom_credentials(self, arrayInfo): - """Given the array record set the ecom credentials. - - :param arrayInfo: record - :raises VolumeBackendAPIException: - """ - ip = arrayInfo['EcomServerIp'] - port = arrayInfo['EcomServerPort'] - self.user = arrayInfo['EcomUserName'] - self.passwd = arrayInfo['EcomPassword'] - self.ecomUseSSL = self.configuration.safe_get('driver_use_ssl') - ip_port = ("%(ip)s:%(port)s" - % {'ip': ip, - 'port': port}) - if self.ecomUseSSL: - self.url = ("https://%(ip_port)s" - % {'ip_port': ip_port}) - else: - self.url = ("http://%(ip_port)s" - % {'ip_port': ip_port}) - self.conn = self._get_ecom_connection() - - def _initial_setup(self, volume, volumeTypeId=None, host=None): + def _initial_setup(self, volume, volume_type_id=None): """Necessary setup to accumulate the relevant information. The volume object has a host in which we can parse the config group name. The config group name is the key to our EMC - configuration file. The emc configuration file contains pool name + configuration file. The emc configuration file contains srp name and array name which are mandatory fields. - FastPolicy is optional. - StripedMetaCount is an extra spec that determines whether - the composite volume should be concatenated or striped. - - :param volume: the volume Object - :param volumeTypeId: Optional override of volume['volume_type_id'] + :param volume: the volume object + :param volume_type_id: optional override of volume.volume_type_id :returns: dict -- extra spec dict :raises VolumeBackendAPIException: """ try: - extraSpecs, configurationFile, qosSpecs = ( + extra_specs, config_file = ( self._set_config_file_and_get_extra_specs( - volume, volumeTypeId)) - pool = self._validate_pool(volume, extraSpecs=extraSpecs, - host=host) - LOG.debug("Pool returned is %(pool)s.", - {'pool': pool}) - arrayInfo = self.utils.parse_file_to_get_array_map( - configurationFile) - if arrayInfo is not None: - if extraSpecs['MultiPoolSupport'] is True: - poolRecord = arrayInfo[0] - elif len(arrayInfo) == 1: - poolRecord = arrayInfo[0] - else: - poolRecord = self.utils.extract_record(arrayInfo, pool) - - if not poolRecord: - exceptionMessage = (_( - "Unable to get corresponding record for pool.")) + volume, volume_type_id)) + array_info = self.utils.parse_file_to_get_array_map( + config_file) + if not array_info: + exception_message = (_( + "Unable to get corresponding record for srp.")) raise exception.VolumeBackendAPIException( - data=exceptionMessage) + data=exception_message) - self._set_ecom_credentials(poolRecord) - isV3 = self.utils.isArrayV3( - self.conn, poolRecord['SerialNumber']) + self.rest.set_rest_credentials(array_info) - if isV3: - extraSpecs = self._set_v3_extra_specs(extraSpecs, poolRecord) - else: - # V2 extra specs - extraSpecs = self._set_v2_extra_specs(extraSpecs, poolRecord) - if (qosSpecs.get('qos_specs') - and qosSpecs['qos_specs']['consumer'] != "front-end"): - extraSpecs['qos'] = qosSpecs['qos_specs']['specs'] + extra_specs = self._set_vmax_extra_specs(extra_specs, array_info) except Exception: - import sys - exceptionMessage = (_( + exception_message = (_( "Unable to get configuration information necessary to " "create a volume: %(errorMessage)s.") % {'errorMessage': sys.exc_info()[1]}) - raise exception.VolumeBackendAPIException(data=exceptionMessage) + raise exception.VolumeBackendAPIException(data=exception_message) + return extra_specs - return extraSpecs - - def _get_pool_and_storage_system(self, extraSpecs): - """Given the extra specs get the pool and storage system name. - - :param extraSpecs: extra specifications - :returns: poolInstanceName The pool instance name - :returns: string -- the storage system name - :raises VolumeBackendAPIException: - """ - - try: - array = extraSpecs[ARRAY] - poolInstanceName, storageSystemStr = self._find_pool_in_array( - array, extraSpecs[POOL], extraSpecs[ISV3]) - except Exception: - exceptionMessage = (_( - "You must supply an array in your EMC configuration file.")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - if poolInstanceName is None or storageSystemStr is None: - exceptionMessage = (_( - "Cannot get necessary pool or storage system information.")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return poolInstanceName, storageSystemStr - - def _populate_masking_dict(self, volume, connector, extraSpecs): - """Get all the names of the maskingView and subComponents. + def _populate_masking_dict(self, volume, connector, extra_specs): + """Get all the names of the maskingview and sub-components. :param volume: the volume object :param connector: the connector object - :param extraSpecs: extra specifications + :param extra_specs: extra specifications :returns: dict -- a dictionary with masking view information """ - maskingViewDict = {} - hostName = connector['host'] - uniqueName = self.utils.generate_unique_trunc_pool(extraSpecs[POOL]) - isV3 = extraSpecs[ISV3] - maskingViewDict['isV3'] = isV3 + masking_view_dict = {} + host_name = connector['host'] + unique_name = self.utils.truncate_string(extra_specs[utils.SRP], 12) protocol = self.utils.get_short_protocol_type(self.protocol) - shortHostName = self.utils.get_host_short_name(hostName) - if isV3: - maskingViewDict['isCompressionDisabled'] = False - maskingViewDict['replication_enabled'] = False - slo = extraSpecs[SLO] - workload = extraSpecs[WORKLOAD] - rep_enabled = self.utils.is_replication_enabled(extraSpecs) - maskingViewDict['slo'] = slo - maskingViewDict['workload'] = workload - maskingViewDict['pool'] = uniqueName - if slo: - prefix = ( - ("OS-%(shortHostName)s-%(poolName)s-%(slo)s-" - "%(workload)s-%(protocol)s" - % {'shortHostName': shortHostName, - 'poolName': uniqueName, - 'slo': slo, - 'workload': workload, - 'protocol': protocol})) - doDisableCompression = self.utils.is_compression_disabled( - extraSpecs) - if doDisableCompression: - prefix = ("%(prefix)s-CD" - % {'prefix': prefix}) - maskingViewDict['isCompressionDisabled'] = True - else: - prefix = ( - ("OS-%(shortHostName)s-No_SLO-%(protocol)s" - % {'shortHostName': shortHostName, - 'protocol': protocol})) - if rep_enabled: - prefix += "-RE" - maskingViewDict['replication_enabled'] = True - else: - maskingViewDict['fastPolicy'] = extraSpecs[FASTPOLICY] - if maskingViewDict['fastPolicy']: - uniqueName = self.utils.generate_unique_trunc_fastpolicy( - maskingViewDict['fastPolicy']) + '-FP' - prefix = ( - ("OS-%(shortHostName)s-%(poolName)s-%(protocol)s" - % {'shortHostName': shortHostName, - 'poolName': uniqueName, - 'protocol': protocol})) - - maskingViewDict['sgGroupName'] = ("%(prefix)s-SG" - % {'prefix': prefix}) - - maskingViewDict['maskingViewName'] = ("%(prefix)s-MV" - % {'prefix': prefix}) - - maskingViewDict['maskingViewNameLM'] = ("%(prefix)s-%(volid)s-MV" - % {'prefix': prefix, - 'volid': volume['id'][:8]}) - volumeName = volume['name'] - volumeInstance = self._find_lun(volume) - storageSystemName = volumeInstance['SystemName'] - - maskingViewDict['controllerConfigService'] = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystemName)) - # The portGroup is gotten from emc xml config file. - maskingViewDict['pgGroupName'] = extraSpecs[PORTGROUPNAME] - - maskingViewDict['igGroupName'] = ( - ("OS-%(shortHostName)s-%(protocol)s-IG" - % {'shortHostName': shortHostName, - 'protocol': protocol})) - maskingViewDict['connector'] = connector - maskingViewDict['volumeInstance'] = volumeInstance - maskingViewDict['volumeName'] = volumeName - maskingViewDict['storageSystemName'] = storageSystemName + short_host_name = self.utils.get_host_short_name(host_name) + slo = extra_specs[utils.SLO] + workload = extra_specs[utils.WORKLOAD] + short_pg_name = self.utils.get_pg_short_name( + extra_specs[utils.PORTGROUPNAME]) + masking_view_dict[utils.SLO] = slo + masking_view_dict[utils.WORKLOAD] = workload + masking_view_dict[utils.SRP] = unique_name + masking_view_dict[utils.ARRAY] = extra_specs[utils.ARRAY] + masking_view_dict[utils.PORTGROUPNAME] = ( + extra_specs[utils.PORTGROUPNAME]) if self._get_initiator_check_flag(): - maskingViewDict['initiatorCheck'] = True + masking_view_dict[utils.INITIATOR_CHECK] = True else: - maskingViewDict['initiatorCheck'] = False + masking_view_dict[utils.INITIATOR_CHECK] = False - return maskingViewDict - - def _add_volume_to_default_storage_group_on_create( - self, volumeDict, volumeName, storageConfigService, - storageSystemName, fastPolicyName, extraSpecs): - """Add the volume to the default storage group for that policy. - - On a create when fast policy is enable add the volume to the default - storage group for that policy. If it fails do the necessary rollback. - - :param volumeDict: the volume dictionary - :param volumeName: the volume name (String) - :param storageConfigService: the storage configuration service - :param storageSystemName: the storage system name (String) - :param fastPolicyName: the fast policy name (String) - :param extraSpecs: extra specifications - :returns: dict -- maskingViewDict with masking view information - :raises VolumeBackendAPIException: - """ - try: - volumeInstance = self.utils.find_volume_instance( - self.conn, volumeDict, volumeName) - controllerConfigurationService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystemName)) - defaultSgName = self.fast.format_default_sg_string(fastPolicyName) - - self.fast.add_volume_to_default_storage_group_for_fast_policy( - self.conn, controllerConfigurationService, volumeInstance, - volumeName, fastPolicyName, extraSpecs) - foundStorageGroupInstanceName = ( - self.utils.get_storage_group_from_volume( - self.conn, volumeInstance.path, defaultSgName)) - - if foundStorageGroupInstanceName is None: - exceptionMessage = (_( - "Error adding Volume: %(volumeName)s " - "with instance path: %(volumeInstancePath)s.") - % {'volumeName': volumeName, - 'volumeInstancePath': volumeInstance.path}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - except Exception: - # Rollback by deleting the volume if adding the volume to the - # default storage group were to fail. - errorMessage = (_( - "Rolling back %(volumeName)s by deleting it.") - % {'volumeName': volumeName}) - LOG.exception(errorMessage) - self.provision.delete_volume_from_pool( - self.conn, storageConfigService, volumeInstance.path, - volumeName, extraSpecs) - raise exception.VolumeBackendAPIException(data=errorMessage) - - def _create_and_get_unbound_volume( - self, conn, storageConfigService, compositeVolumeInstanceName, - additionalSize, extraSpecs): - """Create an unbound volume. - - Create an unbound volume so it is in the correct state to add to a - composite volume. - - :param conn: the connection information to the ecom server - :param storageConfigService: the storage config service instance name - :param compositeVolumeInstanceName: the composite volume instance name - :param additionalSize: the size you want to increase the volume by - :param extraSpecs: extra specifications - :returns: volume instance modifiedCompositeVolumeInstance - """ - assocPoolInstanceName = self.utils.get_assoc_pool_from_volume( - conn, compositeVolumeInstanceName) - appendVolumeInstance = self._create_and_get_volume_instance( - conn, storageConfigService, assocPoolInstanceName, 'appendVolume', - additionalSize, extraSpecs) - isVolumeBound = self.utils.is_volume_bound_to_pool( - conn, appendVolumeInstance) - - if 'True' in isVolumeBound: - appendVolumeInstance = ( - self._unbind_and_get_volume_from_storage_pool( - conn, storageConfigService, - appendVolumeInstance.path, 'appendVolume', extraSpecs)) - - return appendVolumeInstance - - def _create_and_get_volume_instance( - self, conn, storageConfigService, poolInstanceName, - volumeName, volumeSize, extraSpecs): - """Create and get a new volume. - - :param conn: the connection information to the ecom server - :param storageConfigService: the storage config service instance name - :param poolInstanceName: the pool instance name - :param volumeName: the volume name - :param volumeSize: the size to create the volume - :param extraSpecs: extra specifications - :returns: volumeInstance -- the volume instance - """ - volumeDict, _rc = ( - self.provision.create_volume_from_pool( - self.conn, storageConfigService, volumeName, poolInstanceName, - volumeSize, extraSpecs)) - volumeInstance = self.utils.find_volume_instance( - self.conn, volumeDict, volumeName) - return volumeInstance - - def _unbind_and_get_volume_from_storage_pool( - self, conn, storageConfigService, - volumeInstanceName, volumeName, extraSpecs): - """Unbind a volume from a pool and return the unbound volume. - - :param conn: the connection information to the ecom server - :param storageConfigService: the storage config service instance name - :param volumeInstanceName: the volume instance name - :param volumeName: string the volumeName - :param extraSpecs: extra specifications - :returns: unboundVolumeInstance -- the unbound volume instance - """ - _rc, _job = ( - self.provision.unbind_volume_from_storage_pool( - conn, storageConfigService, volumeInstanceName, - volumeName, extraSpecs)) - # Check that the volume in unbound - volumeInstance = conn.GetInstance(volumeInstanceName) - isVolumeBound = self.utils.is_volume_bound_to_pool( - conn, volumeInstance) - if 'False' not in isVolumeBound: - exceptionMessage = (_( - "Failed to unbind volume %(volume)s") - % {'volume': volumeInstanceName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return volumeInstance - - def _modify_and_get_composite_volume_instance( - self, conn, elementCompositionServiceInstanceName, volumeInstance, - appendVolumeInstanceName, volumeName, compositeType, extraSpecs): - """Given an existing composite volume add a new composite volume to it. - - :param conn: the connection information to the ecom server - :param elementCompositionServiceInstanceName: the storage element - composition service instance name - :param volumeInstance: the volume instance - :param appendVolumeInstanceName: the appended volume instance name - :param volumeName: the volume name - :param compositeType: concatenated - :param extraSpecs: extra specifications - :returns: int -- the return code - :returns: dict -- modifiedVolumeDict - the modified volume dict - """ - isComposite = self.utils.check_if_volume_is_composite( - self.conn, volumeInstance) - if 'True' in isComposite: - rc, job = self.provision.modify_composite_volume( - conn, elementCompositionServiceInstanceName, - volumeInstance.path, appendVolumeInstanceName, extraSpecs) - elif 'False' in isComposite: - rc, job = self.provision.create_new_composite_volume( - conn, elementCompositionServiceInstanceName, - volumeInstance.path, appendVolumeInstanceName, compositeType, - extraSpecs) + if slo: + slo_wl_combo = self.utils.truncate_string(slo + workload, 10) + child_sg_name = ( + "OS-%(shortHostName)s-%(srpName)s-%(combo)s-%(pg)s" + % {'shortHostName': short_host_name, + 'srpName': unique_name, + 'combo': slo_wl_combo, + 'pg': short_pg_name}) else: - LOG.error( - "Unable to determine whether %(volumeName)s is " - "composite or not.", - {'volumeName': volumeName}) - raise + child_sg_name = ( + "OS-%(shortHostName)s-No_SLO-%(pg)s" + % {'shortHostName': short_host_name, + 'pg': short_pg_name}) - modifiedVolumeDict = self.provision.get_volume_dict_from_job( - conn, job['Job']) + mv_prefix = ( + "OS-%(shortHostName)s-%(protocol)s-%(pg)s" + % {'shortHostName': short_host_name, + 'protocol': protocol, 'pg': short_pg_name}) - return rc, modifiedVolumeDict + masking_view_dict[utils.SG_NAME] = child_sg_name - def _get_or_create_default_storage_group( - self, conn, storageSystemName, volumeDict, volumeName, - fastPolicyName, extraSpecs): - """Get or create a default storage group for a fast policy. + masking_view_dict[utils.MV_NAME] = ("%(prefix)s-MV" + % {'prefix': mv_prefix}) - :param conn: the connection information to the ecom server - :param storageSystemName: the storage system name - :param volumeDict: the volume dictionary - :param volumeName: the volume name - :param fastPolicyName: the fast policy name - :param extraSpecs: extra specifications - :returns: defaultStorageGroupInstanceName - """ - controllerConfigService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystemName)) + masking_view_dict[utils.PARENT_SG_NAME] = ("%(prefix)s-SG" + % {'prefix': mv_prefix}) + volume_name = volume.name + device_id = self._find_device_on_array(volume, extra_specs) + if not device_id: + exception_message = (_("Cannot retrieve volume %(vol)s " + "from the array. ") % {'vol': volume_name}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(exception_message) - volumeInstance = self.utils.find_volume_instance( - self.conn, volumeDict, volumeName) - defaultStorageGroupInstanceName = ( - self.fast.get_or_create_default_storage_group( - self.conn, controllerConfigService, fastPolicyName, - volumeInstance, extraSpecs)) - return defaultStorageGroupInstanceName + masking_view_dict[utils.IG_NAME] = ( + ("OS-%(shortHostName)s-%(protocol)s-IG" + % {'shortHostName': short_host_name, + 'protocol': protocol})) + masking_view_dict[utils.CONNECTOR] = connector + masking_view_dict[utils.DEVICE_ID] = device_id + masking_view_dict[utils.VOL_NAME] = volume_name + + return masking_view_dict def _create_cloned_volume( - self, cloneVolume, sourceVolume, extraSpecs, isSnapshot=False): + self, volume, source_volume, extra_specs, is_snapshot=False, + from_snapvx=False): """Create a clone volume from the source volume. - :param cloneVolume: clone volume - :param sourceVolume: source of the clone volume - :param extraSpecs: extra specs - :param isSnapshot: boolean -- Defaults to False + :param volume: clone volume + :param source_volume: source of the clone volume + :param extra_specs: extra specs + :param is_snapshot: boolean -- Defaults to False + :param from_snapvx: bool -- Defaults to False :returns: dict -- cloneDict the cloned volume dictionary :raises VolumeBackendAPIException: """ - sourceName = sourceVolume['name'] - cloneName = cloneVolume['name'] + clone_name = volume.name + snap_name = None + LOG.info("Create a replica from Volume: Clone Volume: %(clone_name)s " + "from Source Volume: %(source_name)s.", + {'clone_name': clone_name, + 'source_name': source_volume.name}) - LOG.info( - "Create a replica from Volume: Clone Volume: %(cloneName)s " - "Source Volume: %(sourceName)s.", - {'cloneName': cloneName, - 'sourceName': sourceName}) - - self.conn = self._get_ecom_connection() - sourceInstance = self._find_lun(sourceVolume) - storageSystem = sourceInstance['SystemName'] - repServCapabilityInstanceName = ( - self.utils.find_replication_service_capabilities(self.conn, - storageSystem)) - is_clone_license = self.utils.is_clone_licensed( - self.conn, repServCapabilityInstanceName, extraSpecs[ISV3]) - - if is_clone_license is False: - exceptionMessage = (_( - "Clone feature is not licensed on %(storageSystem)s.") - % {'storageSystem': storageSystem}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - repServiceInstanceName = self.utils.find_replication_service( - self.conn, storageSystem) - - LOG.debug("Create volume replica: Volume: %(cloneName)s " - "Source Volume: %(sourceName)s " - "Method: CreateElementReplica " - "ReplicationService: %(service)s ElementName: " - "%(elementname)s SyncType: 8 SourceElement: " - "%(sourceelement)s.", - {'cloneName': cloneName, - 'sourceName': sourceName, - 'service': repServiceInstanceName, - 'elementname': cloneName, - 'sourceelement': sourceInstance.path}) - - if extraSpecs[ISV3]: - rc, cloneDict = self._create_replica_v3(repServiceInstanceName, - cloneVolume, - sourceVolume, - sourceInstance, - isSnapshot, - extraSpecs) + array = extra_specs[utils.ARRAY] + is_clone_license = self.rest.is_snapvx_licensed(array) + if from_snapvx: + source_device_id, snap_name = self._parse_snap_info( + array, source_volume) else: - rc, cloneDict = self._create_clone_v2(repServiceInstanceName, - cloneVolume, - sourceVolume, - sourceInstance, - isSnapshot, - extraSpecs) + source_device_id = self._find_device_on_array( + source_volume, extra_specs) - if not isSnapshot: - old_size_gbs = self.utils.convert_bits_to_gbs( - self.utils.get_volume_size( - self.conn, sourceInstance)) + if not is_clone_license: + exception_message = (_( + "SnapVx feature is not licensed on %(array)s.") + % {'array': array}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) - if cloneVolume['size'] != old_size_gbs: - LOG.info("Extending clone %(cloneName)s to " - "%(newSize)d GBs", - {'cloneName': cloneName, - 'newSize': cloneVolume['size']}) - cloneInstance = self.utils.find_volume_instance( - self.conn, cloneDict, cloneName) - self._extend_volume( - cloneVolume, cloneInstance, cloneName, - cloneVolume['size'], old_size_gbs, extraSpecs) + # Check if source is currently a snap target. Wait for sync if true. + self._sync_check(array, source_device_id, source_volume.name, + extra_specs, tgt_only=True) + + if not is_snapshot: + clone_dict = self._create_replica( + array, volume, source_device_id, extra_specs, + snap_name=snap_name) + else: + clone_dict = self._create_snapshot( + array, volume, source_device_id, extra_specs) LOG.debug("Leaving _create_cloned_volume: Volume: " - "%(cloneName)s Source Volume: %(sourceName)s " - "Return code: %(rc)lu.", - {'cloneName': cloneName, - 'sourceName': sourceName, - 'rc': rc}) - # Adding version information - cloneDict['version'] = self.version + "%(clone_name)s Source Device Id: %(source_name)s ", + {'clone_name': clone_name, + 'source_name': source_device_id}) - return cloneDict + return clone_dict - def _add_clone_to_default_storage_group( - self, fastPolicyName, storageSystemName, cloneDict, cloneName, - extraSpecs): - """Helper function to add clone to the default storage group. + def _parse_snap_info(self, array, snapshot): + """Given a snapshot object, parse the provider_location. - :param fastPolicyName: the fast policy name - :param storageSystemName: the storage system name - :param cloneDict: clone dictionary - :param cloneName: clone name - :param extraSpecs: extra specifications - :raises VolumeBackendAPIException: + :param array: the array serial number + :param snapshot: the snapshot object + :return: sourcedevice_id, foundsnap_name """ - # Check if the clone/snapshot volume already part of the default sg. - cloneInstance = self.utils.find_volume_instance( - self.conn, cloneDict, cloneName) - if self.fast.is_volume_in_default_SG(self.conn, cloneInstance.path): - return + foundsnap_name = None + sourcedevice_id = None + volume_name = snapshot.id - # If FAST enabled place clone volume or volume from snapshot to - # default storage group. - LOG.debug("Adding volume: %(cloneName)s to default storage group " - "for FAST policy: %(fastPolicyName)s.", - {'cloneName': cloneName, - 'fastPolicyName': fastPolicyName}) + loc = snapshot.provider_location - storageConfigService = ( - self.utils.find_storage_configuration_service( - self.conn, storageSystemName)) + if isinstance(loc, six.string_types): + name = ast.literal_eval(loc) + sourcedevice_id = name['source_id'] + snap_name = name['snap_name'] + # Ensure snapvx is on the array. + try: + snap_details = self.rest.get_volume_snap( + array, sourcedevice_id, snap_name) + if snap_details: + foundsnap_name = snap_name + except Exception as e: + LOG.info("Exception in retrieving snapshot: %(e)s.", + {'e': e}) + foundsnap_name = None - defaultStorageGroupInstanceName = ( - self._get_or_create_default_storage_group( - self.conn, storageSystemName, cloneDict, cloneName, - fastPolicyName, extraSpecs)) - if defaultStorageGroupInstanceName is None: - exceptionMessage = (_( - "Unable to create or get default storage group for FAST " - "policy: %(fastPolicyName)s.") - % {'fastPolicyName': fastPolicyName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) + if foundsnap_name is None or sourcedevice_id is None: + exception_message = (_("Error retrieving snapshot details. " + "Snapshot name: %(snap)s") % + {'snap': volume_name}) + LOG.error(exception_message) - self._add_volume_to_default_storage_group_on_create( - cloneDict, cloneName, storageConfigService, storageSystemName, - fastPolicyName, extraSpecs) + else: + LOG.debug("Source volume: %(volume_name)s Snap name: " + "%(foundsnap_name)s.", + {'volume_name': sourcedevice_id, + 'foundsnap_name': foundsnap_name}) - def _delete_volume(self, volume, isSnapshot=False, host=None): + return sourcedevice_id, foundsnap_name + + def _create_snapshot(self, array, snapshot, + source_device_id, extra_specs): + """Create a snap Vx of a volume. + + :param array: the array serial number + :param snapshot: the snapshot object + :param source_device_id: the source device id + :param extra_specs: the extra specifications + :return: snap_dict + """ + clone_name = self.utils.get_volume_element_name(snapshot.id) + snap_name = self.utils.truncate_string(clone_name, 19) + try: + self.provision.create_volume_snapvx(array, source_device_id, + snap_name, extra_specs) + except Exception as e: + exception_message = (_("Error creating snap Vx of %(vol)s. " + "Exception received: %(e)s.") + % {'vol': source_device_id, + 'e': six.text_type(e)}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + snap_dict = {'snap_name': snap_name, 'source_id': source_device_id} + return snap_dict + + def _delete_volume(self, volume): """Helper function to delete the specified volume. + Pass in host if is snapshot :param volume: volume object to be deleted - :returns: tuple -- rc (int return code), volumeName (string vol name) + :returns: volume_name (string vol name) """ - - volumeName = volume['name'] - rc = -1 - errorRet = (rc, volumeName) - - extraSpecs = self._initial_setup(volume, host=host) - self.conn = self._get_ecom_connection() - - volumeInstance = self._find_lun(volume) - if volumeInstance is None: - LOG.error( - "Volume %(name)s not found on the array. " - "No volume to delete.", - {'name': volumeName}) - return errorRet - - self._sync_check(volumeInstance, volumeName, extraSpecs) - - storageConfigService = self.utils.find_storage_configuration_service( - self.conn, volumeInstance['SystemName']) - - deviceId = volumeInstance['DeviceID'] - - if extraSpecs[ISV3]: - if isSnapshot: - rc = self._delete_from_pool_v3( - storageConfigService, volumeInstance, volumeName, - deviceId, extraSpecs) - else: - rc = self._delete_from_pool_v3( - storageConfigService, volumeInstance, volumeName, - deviceId, extraSpecs, volume) - else: - rc = self._delete_from_pool(storageConfigService, volumeInstance, - volumeName, deviceId, - extraSpecs[FASTPOLICY], - extraSpecs) - return (rc, volumeName) - - def _remove_device_from_storage_group( - self, controllerConfigurationService, volumeInstanceName, - volumeName, extraSpecs): - """Check if volume is part of a storage group prior to delete. - - Log a warning if volume is part of storage group. - - :param controllerConfigurationService: controller configuration service - :param volumeInstanceName: volume instance name - :param volumeName: volume name (string) - :param extraSpecs: extra specifications - """ - storageGroupInstanceNames = ( - self.masking.get_associated_masking_groups_from_device( - self.conn, volumeInstanceName)) - if storageGroupInstanceNames: - LOG.warning( - "Pre check for deletion. " - "Volume: %(volumeName)s is part of a storage group. " - "Attempting removal from %(storageGroupInstanceNames)s.", - {'volumeName': volumeName, - 'storageGroupInstanceNames': storageGroupInstanceNames}) - for storageGroupInstanceName in storageGroupInstanceNames: - storageGroupInstance = self.conn.GetInstance( - storageGroupInstanceName) - self.masking.remove_device_from_storage_group( - self.conn, controllerConfigurationService, - storageGroupInstanceName, volumeInstanceName, - volumeName, storageGroupInstance['ElementName'], - extraSpecs) - - def _find_lunmasking_scsi_protocol_controller(self, storageSystemName, - connector): - """Find LunMaskingSCSIProtocolController for the local host. - - Find out how many volumes are mapped to a host - associated to the LunMaskingSCSIProtocolController. - - :param storageSystemName: the storage system name - :param connector: volume object to be deleted - :returns: foundControllerInstanceName - """ - - foundControllerInstanceName = None - initiators = self._find_initiator_names(connector) - - storageSystemInstanceName = self.utils.find_storageSystem( - self.conn, storageSystemName) - controllerInstanceNames = self.conn.AssociatorNames( - storageSystemInstanceName, - ResultClass='EMC_LunMaskingSCSIProtocolController') - - for controllerInstanceName in controllerInstanceNames: - try: - # This is a check to see if the controller has - # been deleted. - self.conn.GetInstance(controllerInstanceName) - storageHardwareIdInstances = self.conn.Associators( - controllerInstanceName, - ResultClass='EMC_StorageHardwareID') - for storageHardwareIdInstance in storageHardwareIdInstances: - # If EMC_StorageHardwareID matches the initiator, we - # found the existing EMC_LunMaskingSCSIProtocolController. - hardwareid = storageHardwareIdInstance['StorageID'] - for initiator in initiators: - if hardwareid.lower() == initiator.lower(): - # This is a check to see if the controller - # has been deleted. - instance = self.utils.get_existing_instance( - self.conn, controllerInstanceName) - if instance is None: - # Skip this controller as it doesn't exist - # any more. - pass - else: - foundControllerInstanceName = ( - controllerInstanceName) - break - - if foundControllerInstanceName is not None: - break - except pywbem.cim_operations.CIMError as arg: - instance = self.utils.process_exception_args( - arg, controllerInstanceName) - if instance is None: - # Skip this controller as it doesn't exist any more. - pass - - if foundControllerInstanceName is not None: - break - - LOG.debug("LunMaskingSCSIProtocolController for storage system " - "%(storage_system)s and initiator %(initiator)s is " - "%(ctrl)s.", - {'storage_system': storageSystemName, - 'initiator': initiators, - 'ctrl': foundControllerInstanceName}) - return foundControllerInstanceName - - def get_num_volumes_mapped(self, volume, connector): - """Returns how many volumes are in the same zone as the connector. - - Find out how many volumes are mapped to a host - associated to the LunMaskingSCSIProtocolController. - - :param volume: volume object to be deleted - :param connector: volume object to be deleted - :returns: int -- numVolumesMapped - :raises VolumeBackendAPIException: - """ - - volumename = volume['name'] - vol_instance = self._find_lun(volume) - if vol_instance is None: - msg = (_("Volume %(name)s not found on the array. " - "Cannot determine if there are volumes mapped.") - % {'name': volumename}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - storage_system = vol_instance['SystemName'] - - ctrl = self._find_lunmasking_scsi_protocol_controller( - storage_system, - connector) - - LOG.debug("LunMaskingSCSIProtocolController for storage system " - "%(storage)s and %(connector)s is %(ctrl)s.", - {'storage': storage_system, - 'connector': connector, - 'ctrl': ctrl}) - - # Return 0 if masking view does not exist. - if ctrl is None: - return 0 - - associators = self.conn.Associators( - ctrl, - ResultClass='EMC_StorageVolume') - - numVolumesMapped = len(associators) - - LOG.debug("Found %(numVolumesMapped)d volumes on storage system " - "%(storage)s mapped to %(connector)s.", - {'numVolumesMapped': numVolumesMapped, - 'storage': storage_system, - 'connector': connector}) - - return numVolumesMapped - - def _delete_snapshot(self, snapshot, host=None): - """Helper function to delete the specified snapshot. - - :param snapshot: snapshot object to be deleted - :raises VolumeBackendAPIException: - """ - LOG.debug("Entering _delete_snapshot.") - - self.conn = self._get_ecom_connection() - - # Delete the target device. - rc, snapshotname = self._delete_volume(snapshot, True, host) - LOG.info("Leaving delete_snapshot: %(ssname)s Return code: " - "%(rc)lu.", - {'ssname': snapshotname, - 'rc': rc}) - - def create_consistencygroup(self, context, group): - """Creates a consistency group. - - :param context: the context - :param group: the group object to be created - :returns: dict -- modelUpdate = {'status': 'available'} - :raises VolumeBackendAPIException: - """ - LOG.info("Create Consistency Group: %(group)s.", - {'group': group['id']}) - - modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} - cgName = self._update_consistency_group_name(group) - - self.conn = self._get_ecom_connection() - - # Find storage system. - try: - replicationService, storageSystem, __, __ = ( - self._get_consistency_group_utils(self.conn, group)) - interval_retries_dict = self.utils.get_default_intervals_retries() - self.provision.create_consistency_group( - self.conn, replicationService, cgName, interval_retries_dict) - except Exception: - exceptionMessage = (_("Failed to create consistency group:" - " %(cgName)s.") - % {'cgName': cgName}) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return modelUpdate - - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group. - - :param context: the context - :param group: the group object to be deleted - :param volumes: the list of volumes in the consisgroup to be deleted - :returns: dict -- modelUpdate - :returns: list -- list of volume objects - :raises VolumeBackendAPIException: - """ - LOG.info("Delete Consistency Group: %(group)s.", - {'group': group['id']}) - - modelUpdate = {} - volumes_model_update = {} - if not self.conn: - self.conn = self._get_ecom_connection() - - try: - replicationService, storageSystem, __, isV3 = ( - self._get_consistency_group_utils(self.conn, group)) - - storageConfigservice = ( - self.utils.find_storage_configuration_service( - self.conn, storageSystem)) - cgInstanceName, cgName = self._find_consistency_group( - replicationService, six.text_type(group['id'])) - if cgInstanceName is None: - LOG.error("Cannot find CG group %(cgName)s.", - {'cgName': six.text_type(group['id'])}) - modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED} - volumes_model_update = self.utils.get_volume_model_updates( - volumes, group.id, - status='deleted') - return modelUpdate, volumes_model_update - - memberInstanceNames = self._get_members_of_replication_group( - cgInstanceName) - interval_retries_dict = self.utils.get_default_intervals_retries() - self.provision.delete_consistency_group(self.conn, - replicationService, - cgInstanceName, cgName, - interval_retries_dict) - - # Do a bulk delete, a lot faster than single deletes. - if memberInstanceNames: - volumes_model_update, modelUpdate = self._do_bulk_delete( - storageSystem, memberInstanceNames, storageConfigservice, - volumes, group, isV3, interval_retries_dict) - - except Exception: - exceptionMessage = (_( - "Failed to delete consistency group: %(cgName)s.") - % {'cgName': six.text_type(group['id'])}) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return modelUpdate, volumes_model_update - - def _do_bulk_delete(self, storageSystem, memberInstanceNames, - storageConfigservice, volumes, group, isV3, - extraSpecs): - """Do a bulk delete. - - :param storageSystem: storage system name - :param memberInstanceNames: volume Instance names - :param storageConfigservice: storage config service - :param volumes: volume objects - :param modelUpdate: dict - :param isV3: boolean - :param extraSpecs: extra specifications - :returns: list -- list of volume objects - :returns: dict -- modelUpdate - """ - try: - controllerConfigurationService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystem)) - for memberInstanceName in memberInstanceNames: - self._remove_device_from_storage_group( - controllerConfigurationService, memberInstanceName, - 'Member Volume', extraSpecs) - if isV3: - self.provisionv3.delete_volume_from_pool( - self.conn, storageConfigservice, - memberInstanceNames, None, extraSpecs) - else: - self.provision.delete_volume_from_pool( - self.conn, storageConfigservice, - memberInstanceNames, None, extraSpecs) - modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED} - except Exception: - modelUpdate = { - 'status': fields.ConsistencyGroupStatus.ERROR_DELETING} - finally: - volumes_model_update = self.utils.get_volume_model_updates( - volumes, group['id'], status=modelUpdate['status']) - - return volumes_model_update, modelUpdate - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot. - - :param context: the context - :param cgsnapshot: the consistency group snapshot to be created - :param snapshots: snapshots - :returns: dict -- modelUpdate - :returns: list -- list of snapshots - :raises VolumeBackendAPIException: - """ - consistencyGroup = cgsnapshot.get('consistencygroup') - - snapshots_model_update = [] - - LOG.info( - "Create snapshot for Consistency Group %(cgId)s " - "cgsnapshotID: %(cgsnapshot)s.", - {'cgsnapshot': cgsnapshot['id'], - 'cgId': cgsnapshot['consistencygroup_id']}) - - self.conn = self._get_ecom_connection() - - try: - replicationService, storageSystem, extraSpecsDictList, isV3 = ( - self._get_consistency_group_utils(self.conn, consistencyGroup)) - - cgInstanceName, cgName = ( - self._find_consistency_group( - replicationService, six.text_type( - cgsnapshot['consistencygroup_id']))) - if cgInstanceName is None: - exception_message = (_( - "Cannot find CG group %s.") % six.text_type( - cgsnapshot['consistencygroup_id'])) - raise exception.VolumeBackendAPIException( - data=exception_message) - - # Create the target consistency group. - targetCgName = self._update_consistency_group_name(cgsnapshot) - interval_retries_dict = self.utils.get_default_intervals_retries() - self.provision.create_consistency_group( - self.conn, replicationService, targetCgName, - interval_retries_dict) - targetCgInstanceName, targetCgName = self._find_consistency_group( - replicationService, cgsnapshot['id']) - LOG.info("Create target consistency group %(targetCg)s.", - {'targetCg': targetCgInstanceName}) - - for snapshot in snapshots: - volume = snapshot['volume'] - for extraSpecsDict in extraSpecsDictList: - if volume['volume_type_id'] in extraSpecsDict.values(): - extraSpecs = extraSpecsDict.get('extraSpecs') - if 'pool_name' in extraSpecs: - extraSpecs = self.utils.update_extra_specs( - extraSpecs) - if 'size' in volume: - volumeSizeInbits = int(self.utils.convert_gb_to_bits( - volume['size'])) - else: - volumeSizeInbits = int(self.utils.convert_gb_to_bits( - volume['volume_size'])) - targetVolumeName = 'targetVol' - - if isV3: - _rc, volumeDict, _storageSystemName = ( - self._create_v3_volume( - volume, targetVolumeName, volumeSizeInbits, - extraSpecs)) - else: - _rc, volumeDict, _storageSystemName = ( - self._create_composite_volume( - volume, targetVolumeName, volumeSizeInbits, - extraSpecs)) - targetVolumeInstance = self.utils.find_volume_instance( - self.conn, volumeDict, targetVolumeName) - LOG.debug("Create target volume for member volume " - "Source volume: %(memberVol)s " - "Target volume %(targetVol)s.", - {'memberVol': volume['id'], - 'targetVol': targetVolumeInstance.path}) - self.provision.add_volume_to_cg(self.conn, - replicationService, - targetCgInstanceName, - targetVolumeInstance.path, - targetCgName, - targetVolumeName, - extraSpecs) - - self._create_group_and_break_relationship( - isV3, cgsnapshot['id'], replicationService, cgInstanceName, - targetCgInstanceName, storageSystem, interval_retries_dict) - - except Exception: - exceptionMessage = (_("Failed to create snapshot for cg:" - " %(cgName)s.") - % {'cgName': cgsnapshot['consistencygroup_id']} - ) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot['id'], - 'status': fields.SnapshotStatus.AVAILABLE}) - modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} - - return modelUpdate, snapshots_model_update - - def _create_group_and_break_relationship( - self, isV3, cgsnapshotId, replicationService, cgInstanceName, - targetCgInstanceName, storageSystem, interval_retries_dict): - """Creates a cg group and deletes the relationship. - - :param isV3: the context - :param cgsnapshotId: the consistency group snapshot id - :param replicationService: replication service - :param cgInstanceName: cg instance name - :param targetCgInstanceName: target cg instance name - :param storageSystem: storage system - :param interval_retries_dict: - """ - # Less than 5 characters relationship name. - relationName = self.utils.truncate_string(cgsnapshotId, 5) - if isV3: - self.provisionv3.create_group_replica( - self.conn, replicationService, cgInstanceName, - targetCgInstanceName, relationName, interval_retries_dict) - else: - self.provision.create_group_replica( - self.conn, replicationService, cgInstanceName, - targetCgInstanceName, relationName, interval_retries_dict) - # Break the replica group relationship. - rgSyncInstanceName = self.utils.find_group_sync_rg_by_target( - self.conn, storageSystem, targetCgInstanceName, - interval_retries_dict, True) - if rgSyncInstanceName is not None: - repservice = self.utils.find_replication_service( - self.conn, storageSystem) - if repservice is None: - exception_message = (_( - "Cannot find Replication service on system %s.") % - storageSystem) - raise exception.VolumeBackendAPIException( - data=exception_message) - if isV3: - # Operation 7: dissolve for snapVx. - operation = self.utils.get_num(9, '16') - self.provisionv3.break_replication_relationship( - self.conn, repservice, rgSyncInstanceName, operation, - interval_retries_dict) - else: - self.provision.delete_clone_relationship(self.conn, repservice, - rgSyncInstanceName, - interval_retries_dict) - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Delete a cgsnapshot. - - :param context: the context - :param cgsnapshot: the consistency group snapshot to be created - :param snapshots: snapshots - :returns: dict -- modelUpdate - :returns: list -- list of snapshots - :raises VolumeBackendAPIException: - """ - consistencyGroup = cgsnapshot.get('consistencygroup') - model_update = {} - snapshots_model_update = [] - LOG.info( - "Delete snapshot for source CG %(cgId)s " - "cgsnapshotID: %(cgsnapshot)s.", - {'cgsnapshot': cgsnapshot['id'], - 'cgId': cgsnapshot['consistencygroup_id']}) - - model_update['status'] = cgsnapshot['status'] - - self.conn = self._get_ecom_connection() - - try: - replicationService, storageSystem, __, isV3 = ( - self._get_consistency_group_utils(self.conn, consistencyGroup)) - interval_retries_dict = self.utils.get_default_intervals_retries() - model_update, snapshots = self._delete_cg_and_members( - storageSystem, cgsnapshot, model_update, - snapshots, isV3, interval_retries_dict) - for snapshot in snapshots: - snapshots_model_update.append( - {'id': snapshot['id'], - 'status': fields.SnapshotStatus.DELETED}) - except Exception: - exceptionMessage = (_("Failed to delete snapshot for cg: " - "%(cgId)s.") - % {'cgId': cgsnapshot['consistencygroup_id']}) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return model_update, snapshots_model_update - - def _find_consistency_group(self, replicationService, cgId): - """Finds a CG given its id. - - :param replicationService: the replication service - :param cgId: the consistency group id - :returns: foundCgInstanceName,cg_name - """ - foundCgInstanceName = None - cg_name = None - cgInstanceNames = ( - self.conn.AssociatorNames(replicationService, - ResultClass='CIM_ReplicationGroup')) - - for cgInstanceName in cgInstanceNames: - instance = self.conn.GetInstance(cgInstanceName, LocalOnly=False) - if cgId in instance['ElementName']: - foundCgInstanceName = cgInstanceName - cg_name = instance['ElementName'] - break - - return foundCgInstanceName, cg_name - - def _get_members_of_replication_group(self, cgInstanceName): - """Get the members of consistency group. - - :param cgInstanceName: the CG instance name - :returns: list -- memberInstanceNames - """ - memberInstanceNames = self.conn.AssociatorNames( - cgInstanceName, - AssocClass='CIM_OrderedMemberOfCollection') - - return memberInstanceNames - - def _create_composite_volume( - self, volume, volumeName, volumeSize, extraSpecs, - memberCount=None): - """Create a composite volume (V2). - - :param volume: the volume object - :param volumeName: the name of the volume - :param volumeSize: the size of the volume - :param extraSpecs: extra specifications - :param memberCount: the number of meta members in a composite volume + volume_name = volume.name + extra_specs = self._initial_setup(volume) + + device_id = self._find_device_on_array(volume, extra_specs) + if device_id is None: + LOG.error("Volume %(name)s not found on the array. " + "No volume to delete.", + {'name': volume_name}) + return volume_name + + array = extra_specs[utils.ARRAY] + # check if volume is snap source + self._sync_check(array, device_id, volume_name, extra_specs) + self._delete_from_srp( + array, device_id, volume_name, extra_specs) + return volume_name + + def _create_volume( + self, volume_name, volume_size, extra_specs): + """Create a volume. + + :param volume_name: the volume name + :param volume_size: the volume size + :param extra_specs: extra specifications :returns: int -- return code - :returns: dict -- volumeDict - :returns: string -- storageSystemName + :returns: dict -- volume_dict :raises VolumeBackendAPIException: """ - if not memberCount: - memberCount, errorDesc = self.utils.determine_member_count( - volume['size'], extraSpecs[MEMBERCOUNT], - extraSpecs[COMPOSITETYPE]) - if errorDesc is not None: - exceptionMessage = (_("The striped meta count of " - "%(memberCount)s is too small for " - "volume: %(volumeName)s, " - "with size %(volumeSize)s.") - % {'memberCount': memberCount, - 'volumeName': volumeName, - 'volumeSize': volume['size']}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) + array = extra_specs[utils.ARRAY] + is_valid_slo, is_valid_workload = self.provision.verify_slo_workload( + array, extra_specs[utils.SLO], + extra_specs[utils.WORKLOAD], extra_specs[utils.SRP]) - poolInstanceName, storageSystemName = ( - self._get_pool_and_storage_system(extraSpecs)) - - LOG.debug("Create Volume: %(volume)s Pool: %(pool)s " - "Storage System: %(storageSystem)s " - "Size: %(size)lu MemberCount: %(memberCount)s.", - {'volume': volumeName, - 'pool': poolInstanceName, - 'storageSystem': storageSystemName, - 'size': volumeSize, - 'memberCount': memberCount}) - - elementCompositionService = ( - self.utils.find_element_composition_service(self.conn, - storageSystemName)) - - storageConfigService = self.utils.find_storage_configuration_service( - self.conn, storageSystemName) - - # If FAST is intended to be used we must first check that the pool - # is associated with the correct storage tier. - if extraSpecs[FASTPOLICY] is not None: - foundPoolInstanceName = self.fast.get_pool_associated_to_policy( - self.conn, extraSpecs[FASTPOLICY], extraSpecs[ARRAY], - storageConfigService, poolInstanceName) - if foundPoolInstanceName is None: - exceptionMessage = (_("Pool: %(poolName)s. " - "is not associated to storage tier for " - "fast policy %(fastPolicy)s.") - % {'poolName': extraSpecs[POOL], - 'fastPolicy': - extraSpecs[FASTPOLICY]}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - compositeType = self.utils.get_composite_type( - extraSpecs[COMPOSITETYPE]) - - volumeDict, rc = self.provision.create_composite_volume( - self.conn, elementCompositionService, volumeSize, volumeName, - poolInstanceName, compositeType, memberCount, extraSpecs) - - # Now that we have already checked that the pool is associated with - # the correct storage tier and the volume was successfully created - # add the volume to the default storage group created for - # volumes in pools associated with this fast policy. - if extraSpecs[FASTPOLICY]: - LOG.info( - "Adding volume: %(volumeName)s to default storage group" - " for FAST policy: %(fastPolicyName)s.", - {'volumeName': volumeName, - 'fastPolicyName': extraSpecs[FASTPOLICY]}) - defaultStorageGroupInstanceName = ( - self._get_or_create_default_storage_group( - self.conn, storageSystemName, volumeDict, - volumeName, extraSpecs[FASTPOLICY], extraSpecs)) - if not defaultStorageGroupInstanceName: - exceptionMessage = (_( - "Unable to create or get default storage group for " - "FAST policy: %(fastPolicyName)s.") - % {'fastPolicyName': extraSpecs[FASTPOLICY]}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - # If qos exists, update storage group to reflect qos parameters - if 'qos' in extraSpecs: - self.utils.update_storagegroup_qos( - self.conn, defaultStorageGroupInstanceName, extraSpecs) - - self._add_volume_to_default_storage_group_on_create( - volumeDict, volumeName, storageConfigService, - storageSystemName, extraSpecs[FASTPOLICY], extraSpecs) - return rc, volumeDict, storageSystemName - - def _create_v3_volume( - self, volume, volumeName, volumeSize, extraSpecs): - """Create a volume (V3). - - :param volume: the volume object - :param volumeName: the volume name - :param volumeSize: the volume size - :param extraSpecs: extra specifications - :returns: int -- return code - :returns: dict -- volumeDict - :returns: string -- storageSystemName - :raises VolumeBackendAPIException: - """ - rc = -1 - volumeDict = {} - isValidSLO, isValidWorkload = self.utils.verify_slo_workload( - extraSpecs[SLO], extraSpecs[WORKLOAD]) - - if not isValidSLO or not isValidWorkload: - exceptionMessage = (_( + if not is_valid_slo or not is_valid_workload: + exception_message = (_( "Either SLO: %(slo)s or workload %(workload)s is invalid. " "Examine previous error statement for valid values.") - % {'slo': extraSpecs[SLO], - 'workload': extraSpecs[WORKLOAD]}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) + % {'slo': extra_specs[utils.SLO], + 'workload': extra_specs[utils.WORKLOAD]}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) - poolInstanceName, storageSystemName = ( - self._get_pool_and_storage_system(extraSpecs)) - - # Check to see if SLO and Workload are configured on the array. - storagePoolCapability = self.provisionv3.get_storage_pool_capability( - self.conn, poolInstanceName) - if extraSpecs[SLO]: - if storagePoolCapability: - storagePoolSetting = self.provisionv3.get_storage_pool_setting( - self.conn, storagePoolCapability, extraSpecs[SLO], - extraSpecs[WORKLOAD]) - if not storagePoolSetting: - exceptionMessage = (_( - "The array does not support the storage pool setting " - "for SLO %(slo)s or workload %(workload)s. Please " - "check the array for valid SLOs and workloads.") - % {'slo': extraSpecs[SLO], - 'workload': extraSpecs[WORKLOAD]}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - else: - exceptionMessage = (_( - "Cannot determine storage pool settings.")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("Create Volume: %(volume)s Pool: %(pool)s " - "Storage System: %(storageSystem)s " + LOG.debug("Create Volume: %(volume)s Srp: %(srp)s " + "Array: %(array)s " "Size: %(size)lu.", - {'volume': volumeName, - 'pool': poolInstanceName, - 'storageSystem': storageSystemName, - 'size': volumeSize}) + {'volume': volume_name, + 'srp': extra_specs[utils.SRP], + 'array': array, + 'size': volume_size}) - storageConfigService = self.utils.find_storage_configuration_service( - self.conn, storageSystemName) - doDisableCompression = self.utils.is_compression_disabled(extraSpecs) - - # A volume created without specifying a storage group during - # creation time is allocated from the default SRP pool and - # assigned the optimized SLO. - sgInstanceName = self._get_or_create_storage_group_v3( - extraSpecs[POOL], extraSpecs[SLO], - extraSpecs[WORKLOAD], doDisableCompression, - storageSystemName, extraSpecs) + storagegroup_name = self.masking.get_or_create_default_storage_group( + array, extra_specs[utils.SRP], extra_specs[utils.SLO], + extra_specs[utils.WORKLOAD], extra_specs) try: - volumeDict, rc = self.provisionv3.create_volume_from_sg( - self.conn, storageConfigService, volumeName, - sgInstanceName, volumeSize, extraSpecs) + volume_dict = self.provision.create_volume_from_sg( + array, volume_name, storagegroup_name, + volume_size, extra_specs) except Exception: # if the volume create fails, check if the # storage group needs to be cleaned up - volumeInstanceNames = ( - self.masking.get_devices_from_storage_group( - self.conn, sgInstanceName)) + LOG.error("Create volume failed. Checking if " + "storage group cleanup necessary...") + num_vol_in_sg = self.rest.get_num_vols_in_sg( + array, storagegroup_name) - if not len(volumeInstanceNames): + if num_vol_in_sg == 0: LOG.debug("There are no volumes in the storage group " - "%(maskingGroup)s. Deleting storage group", - {'maskingGroup': sgInstanceName}) - controllerConfigService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystemName)) - self.masking.delete_storage_group( - self.conn, controllerConfigService, - sgInstanceName, extraSpecs) + "%(sg_id)s. Deleting storage group.", + {'sg_id': storagegroup_name}) + self.rest.delete_storage_group( + array, storagegroup_name) raise - return rc, volumeDict, storageSystemName + return volume_dict - def _get_or_create_storage_group_v3( - self, poolName, slo, workload, doDisableCompression, - storageSystemName, extraSpecs, is_re=False): - """Get or create storage group_v3 (V3). + def _set_vmax_extra_specs(self, extra_specs, pool_record): + """Set the VMAX extra specs. - :param poolName: the SRP pool nsmr - :param slo: the SLO - :param workload: the workload - :param doDisableCompression: flag for compression - :param storageSystemName: storage system name - :param extraSpecs: extra specifications - :param is_re: flag for replication - :returns: sgInstanceName - """ - storageGroupName, controllerConfigService, sgInstanceName = ( - self.utils.get_v3_default_sg_instance_name( - self.conn, poolName, slo, workload, storageSystemName, - doDisableCompression, is_re)) - if sgInstanceName is None: - sgInstanceName = self.provisionv3.create_storage_group_v3( - self.conn, controllerConfigService, storageGroupName, - poolName, slo, workload, extraSpecs, doDisableCompression) - else: - # Check that SG is not part of a masking view - mvInstanceName = self.masking.get_masking_view_from_storage_group( - self.conn, sgInstanceName) - if mvInstanceName: - exceptionMessage = (_( - "Default storage group %(storageGroupName)s is part of " - "masking view %(mvInstanceName)s. Please remove it " - "from this and all masking views") - % {'storageGroupName': storageGroupName, - 'mvInstanceName': mvInstanceName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - # If qos exists, update storage group to reflect qos parameters - if 'qos' in extraSpecs: - self.utils.update_storagegroup_qos( - self.conn, sgInstanceName, extraSpecs) + The pool_name extra spec must be set, otherwise a default slo/workload + will be chosen. The portgroup can either be passed as an extra spec + on the volume type (e.g. 'port_group_name = os-pg1-pg'), or can + be chosen from a list which must be provided in the xml file, e.g.: + + OS-PORTGROUP1-PG + OS-PORTGROUP2-PG + . - return sgInstanceName - - def _extend_composite_volume(self, volumeInstance, volumeName, - newSize, additionalVolumeSize, extraSpecs): - """Extend a composite volume (V2). - - :param volumeInstance: the volume instance - :param volumeName: the name of the volume - :param newSize: in GBs - :param additionalVolumeSize: additional volume size - :param extraSpecs: extra specifications - :returns: int -- return code - :returns: dict -- modifiedVolumeDict - :raises VolumeBackendAPIException: - """ - # Is the volume extendable. - isConcatenated = self.utils.check_if_volume_is_extendable( - self.conn, volumeInstance) - if 'True' not in isConcatenated: - exceptionMessage = (_( - "Volume: %(volumeName)s is not a concatenated volume. " - "You can only perform extend on concatenated volume. " - "Exiting...") - % {'volumeName': volumeName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - else: - compositeType = self.utils.get_composite_type(CONCATENATED) - - LOG.debug("Extend Volume: %(volume)s New size: %(newSize)s GBs.", - {'volume': volumeName, - 'newSize': newSize}) - - deviceId = volumeInstance['DeviceID'] - storageSystemName = volumeInstance['SystemName'] - LOG.debug( - "Device ID: %(deviceid)s: Storage System: " - "%(storagesystem)s.", - {'deviceid': deviceId, - 'storagesystem': storageSystemName}) - - storageConfigService = self.utils.find_storage_configuration_service( - self.conn, storageSystemName) - - elementCompositionService = ( - self.utils.find_element_composition_service( - self.conn, storageSystemName)) - - # Create a volume to the size of the - # newSize - oldSize = additionalVolumeSize. - unboundVolumeInstance = self._create_and_get_unbound_volume( - self.conn, storageConfigService, volumeInstance.path, - additionalVolumeSize, extraSpecs) - if unboundVolumeInstance is None: - exceptionMessage = (_( - "Error Creating unbound volume on an Extend operation.")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - # Add the new unbound volume to the original composite volume. - rc, modifiedVolumeDict = ( - self._modify_and_get_composite_volume_instance( - self.conn, elementCompositionService, volumeInstance, - unboundVolumeInstance.path, volumeName, compositeType, - extraSpecs)) - if modifiedVolumeDict is None: - exceptionMessage = (_( - "On an Extend Operation, error adding volume to composite " - "volume: %(volumename)s.") - % {'volumename': volumeName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return rc, modifiedVolumeDict - - def _slo_workload_migration(self, volumeInstance, volume, host, - volumeName, volumeStatus, newType, - extraSpecs): - """Migrate from SLO/Workload combination to another (V3). - - :param volumeInstance: the volume instance - :param volume: the volume object - :param host: the host object - :param volumeName: the name of the volume - :param volumeStatus: the volume status - :param newType: the type to migrate to - :param extraSpecs: extra specifications - :returns: boolean -- True if migration succeeded, False if error. - """ - isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs) - storageGroupName = self.utils.get_v3_storage_group_name( - extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD], - isCompressionDisabled) - # Check if old type and new type have different compression types - doChangeCompression = ( - self.utils.change_compression_type( - isCompressionDisabled, newType)) - volumeInstanceName = volumeInstance.path - isValid, targetSlo, targetWorkload = ( - self._is_valid_for_storage_assisted_migration_v3( - volumeInstanceName, host, extraSpecs[ARRAY], - extraSpecs[POOL], volumeName, volumeStatus, - storageGroupName, doChangeCompression)) - - storageSystemName = volumeInstance['SystemName'] - if not isValid: - LOG.error( - "Volume %(name)s is not suitable for storage " - "assisted migration using retype.", - {'name': volumeName}) - return False - if volume['host'] != host['host'] or doChangeCompression: - LOG.debug( - "Retype Volume %(name)s from source host %(sourceHost)s " - "to target host %(targetHost)s. Compression change is %(cc)r.", - {'name': volumeName, - 'sourceHost': volume['host'], - 'targetHost': host['host'], - 'cc': doChangeCompression}) - return self._migrate_volume_v3( - volume, volumeInstance, extraSpecs[POOL], targetSlo, - targetWorkload, storageSystemName, newType, extraSpecs) - - return False - - def _migrate_volume_v3( - self, volume, volumeInstance, poolName, targetSlo, - targetWorkload, storageSystemName, newType, extraSpecs): - """Migrate from one slo/workload combination to another (V3). - - This requires moving the volume from its current SG to a - new or existing SG that has the target attributes. - - :param volume: the volume object - :param volumeInstance: the volume instance - :param poolName: the SRP Pool Name - :param targetSlo: the target SLO - :param targetWorkload: the target workload - :param storageSystemName: the storage system name - :param newType: the type to migrate to - :param extraSpecs: extra specifications - :returns: boolean -- True if migration succeeded, False if error. - """ - volumeName = volume['name'] - - controllerConfigService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystemName)) - isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs) - defaultSgName = self.utils.get_v3_storage_group_name( - extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD], - isCompressionDisabled) - foundStorageGroupInstanceName = ( - self.utils.get_storage_group_from_volume( - self.conn, volumeInstance.path, defaultSgName)) - if foundStorageGroupInstanceName is None: - LOG.warning( - "Volume : %(volumeName)s is not currently " - "belonging to any storage group.", - {'volumeName': volumeName}) - else: - self.masking.remove_and_reset_members( - self.conn, controllerConfigService, volumeInstance, - volumeName, extraSpecs, None, False) - - targetExtraSpecs = newType['extra_specs'] - isCompressionDisabled = self.utils.is_compression_disabled( - targetExtraSpecs) - - storageGroupName = self.utils.get_v3_storage_group_name( - poolName, targetSlo, targetWorkload, isCompressionDisabled) - - targetSgInstanceName = self._get_or_create_storage_group_v3( - poolName, targetSlo, targetWorkload, isCompressionDisabled, - storageSystemName, extraSpecs) - if targetSgInstanceName is None: - LOG.error( - "Failed to get or create storage group %(storageGroupName)s.", - {'storageGroupName': storageGroupName}) - return False - - self.masking.add_volume_to_storage_group( - self.conn, controllerConfigService, targetSgInstanceName, - volumeInstance, volumeName, storageGroupName, extraSpecs) - # Check that it has been added. - sgFromVolAddedInstanceName = ( - self.utils.get_storage_group_from_volume( - self.conn, volumeInstance.path, storageGroupName)) - if sgFromVolAddedInstanceName is None: - LOG.error( - "Volume : %(volumeName)s has not been " - "added to target storage group %(storageGroup)s.", - {'volumeName': volumeName, - 'storageGroup': targetSgInstanceName}) - return False - - return True - - def _pool_migration(self, volumeInstance, volume, host, - volumeName, volumeStatus, - fastPolicyName, newType, extraSpecs): - """Migrate from one pool to another (V2). - - :param volumeInstance: the volume instance - :param volume: the volume object - :param host: the host object - :param volumeName: the name of the volume - :param volumeStatus: the volume status - :param fastPolicyName: the FAST policy Name - :param newType: the type to migrate to - :param extraSpecs: extra specifications - :returns: boolean -- True if migration succeeded, False if error. - """ - storageSystemName = volumeInstance['SystemName'] - isValid, targetPoolName, targetFastPolicyName = ( - self._is_valid_for_storage_assisted_migration( - volumeInstance.path, host, storageSystemName, - volumeName, volumeStatus)) - - if not isValid: - LOG.error( - "Volume %(name)s is not suitable for storage " - "assisted migration using retype.", - {'name': volumeName}) - return False - if volume['host'] != host['host']: - LOG.debug( - "Retype Volume %(name)s from source host %(sourceHost)s " - "to target host %(targetHost)s.", - {'name': volumeName, - 'sourceHost': volume['host'], - 'targetHost': host['host']}) - return self._migrate_volume( - volume, volumeInstance, targetPoolName, targetFastPolicyName, - fastPolicyName, extraSpecs, newType) - - return False - - def _update_pool_stats( - self, backendName, arrayInfo): - """Update pool statistics (V2). - - :param backendName: the backend name - :param arrayInfo: the arrayInfo - :returns: location_info, total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb - """ - - if arrayInfo['FastPolicy']: - LOG.debug( - "Fast policy %(fastPolicyName)s is enabled on %(arrayName)s.", - {'fastPolicyName': arrayInfo['FastPolicy'], - 'arrayName': arrayInfo['SerialNumber']}) - else: - LOG.debug( - "No Fast policy for Array:%(arrayName)s " - "backend:%(backendName)s.", - {'arrayName': arrayInfo['SerialNumber'], - 'backendName': backendName}) - - storageSystemInstanceName = self.utils.find_storageSystem( - self.conn, arrayInfo['SerialNumber']) - isTieringPolicySupported = ( - self.fast.is_tiering_policy_enabled_on_storage_system( - self.conn, storageSystemInstanceName)) - - if (arrayInfo['FastPolicy'] is not None and - isTieringPolicySupported is True): # FAST enabled - (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, - array_max_over_subscription) = ( - self.fast.get_capacities_associated_to_policy( - self.conn, arrayInfo['SerialNumber'], - arrayInfo['FastPolicy'])) - LOG.info( - "FAST: capacity stats for policy %(fastPolicyName)s on array " - "%(arrayName)s. total_capacity_gb=%(total_capacity_gb)lu, " - "free_capacity_gb=%(free_capacity_gb)lu.", - {'fastPolicyName': arrayInfo['FastPolicy'], - 'arrayName': arrayInfo['SerialNumber'], - 'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb}) - else: # NON-FAST - (total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, - array_max_over_subscription) = ( - self.utils.get_pool_capacities(self.conn, - arrayInfo['PoolName'], - arrayInfo['SerialNumber'])) - LOG.info( - "NON-FAST: capacity stats for pool %(poolName)s on array " - "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " - "free_capacity_gb=%(free_capacity_gb)lu.", - {'poolName': arrayInfo['PoolName'], - 'arrayName': arrayInfo['SerialNumber'], - 'total_capacity_gb': total_capacity_gb, - 'free_capacity_gb': free_capacity_gb}) - - location_info = ("%(arrayName)s#%(poolName)s#%(policyName)s" - % {'arrayName': arrayInfo['SerialNumber'], - 'poolName': arrayInfo['PoolName'], - 'policyName': arrayInfo['FastPolicy']}) - - return (location_info, total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb, array_max_over_subscription) - - def _set_v2_extra_specs(self, extraSpecs, poolRecord): - """Set the VMAX V2 extra specs. - - :param extraSpecs: extra specifications - :param poolRecord: pool record - :returns: dict -- the extraSpecs - :raises VolumeBackendAPIException: - """ - try: - stripedMetaCount = extraSpecs[STRIPECOUNT] - extraSpecs[MEMBERCOUNT] = stripedMetaCount - extraSpecs[COMPOSITETYPE] = STRIPED - - LOG.debug( - "There are: %(stripedMetaCount)s striped metas in " - "the extra specs.", - {'stripedMetaCount': stripedMetaCount}) - except KeyError: - memberCount = '1' - extraSpecs[MEMBERCOUNT] = memberCount - extraSpecs[COMPOSITETYPE] = CONCATENATED - LOG.debug("StripedMetaCount is not in the extra specs.") - - # Get the FAST policy from the file. This value can be None if the - # user doesn't want to associate with any FAST policy. - if poolRecord['FastPolicy']: - LOG.debug("The fast policy name is: %(fastPolicyName)s.", - {'fastPolicyName': poolRecord['FastPolicy']}) - extraSpecs[FASTPOLICY] = poolRecord['FastPolicy'] - extraSpecs[ISV3] = False - extraSpecs = self._set_common_extraSpecs(extraSpecs, poolRecord) - - LOG.debug("Pool is: %(pool)s " - "Array is: %(array)s " - "FastPolicy is: %(fastPolicy)s " - "CompositeType is: %(compositeType)s " - "MemberCount is: %(memberCount)s.", - {'pool': extraSpecs[POOL], - 'array': extraSpecs[ARRAY], - 'fastPolicy': extraSpecs[FASTPOLICY], - 'compositeType': extraSpecs[COMPOSITETYPE], - 'memberCount': extraSpecs[MEMBERCOUNT]}) - return extraSpecs - - def _set_v3_extra_specs(self, extraSpecs, poolRecord): - """Set the VMAX V3 extra specs. - - If SLO or workload are not specified then the default - values are NONE and the Optimized SLO will be assigned to the - volume. - - :param extraSpecs: extra specifications - :param poolRecord: pool record + :param extra_specs: extra specifications + :param pool_record: pool record :returns: dict -- the extra specifications dictionary """ - if extraSpecs['MultiPoolSupport'] is True: - sloFromExtraSpec = None - workloadFromExtraSpec = None - if 'pool_name' in extraSpecs: - try: - poolDetails = extraSpecs['pool_name'].split('+') - sloFromExtraSpec = poolDetails[0] - workloadFromExtraSpec = poolDetails[1] - except KeyError: - LOG.error("Error parsing SLO, workload from " - "the provided extra_specs.") - else: - # Throw an exception as it is compulsory to have - # pool_name in the extra specs - exceptionMessage = (_( - "Pool_name is not present in the extraSpecs " - "and MultiPoolSupport is enabled")) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - # If MultiPoolSupport is enabled, we completely - # ignore any entry for SLO & Workload in the poolRecord - extraSpecs[SLO] = sloFromExtraSpec - extraSpecs[WORKLOAD] = workloadFromExtraSpec - else: - extraSpecs[SLO] = poolRecord['SLO'] - extraSpecs[WORKLOAD] = poolRecord['Workload'] + # set extra_specs from pool_record + extra_specs[utils.SRP] = pool_record['srpName'] + extra_specs[utils.ARRAY] = pool_record['SerialNumber'] + if not extra_specs.get(utils.PORTGROUPNAME): + extra_specs[utils.PORTGROUPNAME] = pool_record['PortGroup'] + if not extra_specs[utils.PORTGROUPNAME]: + error_message = (_("Port group name has not been provided - " + "please configure the 'port_group_name' extra " + "spec on the volume type, or enter a list of " + "portgroups to the xml file associated with " + "this backend e.g." + "" + " OS-PORTGROUP1-PG" + " OS-PORTGROUP2-PG" + ".")) + LOG.exception(error_message) + raise exception.VolumeBackendAPIException(data=error_message) - extraSpecs[ISV3] = True - extraSpecs = self._set_common_extraSpecs(extraSpecs, poolRecord) - if self.utils.is_all_flash(self.conn, extraSpecs[ARRAY]): - try: - extraSpecs[self.utils.DISABLECOMPRESSION] - # If not True remove it. - if not self.utils.str2bool( - extraSpecs[self.utils.DISABLECOMPRESSION]): - extraSpecs.pop(self.utils.DISABLECOMPRESSION, None) - except KeyError: - pass + extra_specs[utils.INTERVAL] = self.intervals + LOG.debug("The interval is set at: %(intervalInSecs)s.", + {'intervalInSecs': self.intervals}) + extra_specs[utils.RETRIES] = self.retries + LOG.debug("Retries are set at: %(retries)s.", + {'retries': self.retries}) + + # set pool_name slo and workload + if 'pool_name' in extra_specs: + pool_name = extra_specs['pool_name'] else: - extraSpecs.pop(self.utils.DISABLECOMPRESSION, None) - LOG.debug("Pool is: %(pool)s " + slo_list = self.rest.get_slo_list(pool_record['SerialNumber']) + if 'Optimized' in slo_list: + slo = 'Optimized' + elif 'Diamond' in slo_list: + slo = 'Diamond' + else: + slo = 'None' + pool_name = ("%(slo)s+%(workload)s+%(srpName)s+%(array)s" + % {'slo': slo, + 'workload': 'None', + 'srpName': pool_record['srpName'], + 'array': pool_record['SerialNumber']}) + LOG.warning("Pool_name is not present in the extra_specs " + "- using default pool %(pool_name)s.", + {'pool_name': pool_name}) + pool_details = pool_name.split('+') + slo_from_extra_spec = pool_details[0] + workload_from_extra_spec = pool_details[1] + # standardize slo and workload 'NONE' naming conventions + if workload_from_extra_spec.lower() == 'none': + workload_from_extra_spec = 'NONE' + if slo_from_extra_spec.lower() == 'none': + slo_from_extra_spec = None + extra_specs[utils.SLO] = slo_from_extra_spec + extra_specs[utils.WORKLOAD] = workload_from_extra_spec + + LOG.debug("SRP is: %(srp)s " "Array is: %(array)s " "SLO is: %(slo)s " "Workload is: %(workload)s.", - {'pool': extraSpecs[POOL], - 'array': extraSpecs[ARRAY], - 'slo': extraSpecs[SLO], - 'workload': extraSpecs[WORKLOAD]}) - return extraSpecs + {'srp': extra_specs[utils.SRP], + 'array': extra_specs[utils.ARRAY], + 'slo': extra_specs[utils.SLO], + 'workload': extra_specs[utils.WORKLOAD]}) + return extra_specs - def _set_common_extraSpecs(self, extraSpecs, poolRecord): - """Set common extra specs. + def _delete_from_srp(self, array, device_id, volume_name, + extra_specs): + """Delete from srp. - The extraSpecs are common to v2 and v3 - - :param extraSpecs: extra specifications - :param poolRecord: pool record - :returns: dict -- the extra specifications dictionary - """ - extraSpecs[POOL] = poolRecord['PoolName'] - extraSpecs[ARRAY] = poolRecord['SerialNumber'] - extraSpecs[PORTGROUPNAME] = poolRecord['PortGroup'] - if 'Interval' in poolRecord and poolRecord['Interval']: - extraSpecs[INTERVAL] = poolRecord['Interval'] - LOG.debug("The user defined interval is : %(intervalInSecs)s.", - {'intervalInSecs': poolRecord['Interval']}) - else: - LOG.debug("Interval not overridden, default of 10 assumed.") - if 'Retries' in poolRecord and poolRecord['Retries']: - extraSpecs[RETRIES] = poolRecord['Retries'] - LOG.debug("The user defined retries is : %(retries)s.", - {'retries': poolRecord['Retries']}) - else: - LOG.debug("Retries not overridden, default of 60 assumed.") - return extraSpecs - - def _delete_from_pool(self, storageConfigService, volumeInstance, - volumeName, deviceId, fastPolicyName, extraSpecs): - """Delete from pool (v2). - - :param storageConfigService: the storage config service - :param volumeInstance: the volume instance - :param volumeName: the volume Name - :param deviceId: the device ID of the volume - :param fastPolicyName: the FAST policy name(if it exists) - :param extraSpecs: extra specifications - :returns: int -- return code + :param array: the array serial number + :param device_id: the device id + :param volume_name: the volume name + :param extra_specs: the extra specifications :raises VolumeBackendAPIException: """ - storageSystemName = volumeInstance['SystemName'] - controllerConfigurationService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystemName)) - if fastPolicyName is not None: - defaultStorageGroupInstanceName = ( - self.masking.remove_device_from_default_storage_group( - self.conn, controllerConfigurationService, - volumeInstance.path, volumeName, fastPolicyName, - extraSpecs)) - if defaultStorageGroupInstanceName is None: - LOG.warning( - "The volume: %(volumename)s. was not first part of the " - "default storage group for FAST policy %(fastPolicyName)s" - ".", - {'volumename': volumeName, - 'fastPolicyName': fastPolicyName}) - # Check if it is part of another storage group. - self._remove_device_from_storage_group( - controllerConfigurationService, - volumeInstance.path, volumeName, extraSpecs) - - else: - # Check if volume is part of a storage group. - self._remove_device_from_storage_group( - controllerConfigurationService, - volumeInstance.path, volumeName, extraSpecs) - - LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool " - "ConfigService: %(service)s TheElement: %(vol_instance)s " - "DeviceId: %(deviceId)s.", - {'service': storageConfigService, - 'name': volumeName, - 'vol_instance': volumeInstance.path, - 'deviceId': deviceId}) - try: - rc = self.provision.delete_volume_from_pool( - self.conn, storageConfigService, volumeInstance.path, - volumeName, extraSpecs) - - except Exception: - # If we cannot successfully delete the volume then we want to - # return the volume to the default storage group. - if (fastPolicyName is not None and - defaultStorageGroupInstanceName is not None and - storageSystemName is not None): - assocDefaultStorageGroupName = ( - self.fast - .add_volume_to_default_storage_group_for_fast_policy( - self.conn, controllerConfigurationService, - volumeInstance, volumeName, fastPolicyName, - extraSpecs)) - if assocDefaultStorageGroupName is None: - LOG.error( - "Failed to Roll back to re-add volume %(volumeName)s " - "to default storage group for fast policy " - "%(fastPolicyName)s. Please contact your sysadmin to " - "get the volume returned to the default " - "storage group.", - {'volumeName': volumeName, - 'fastPolicyName': fastPolicyName}) - - errorMessage = (_("Failed to delete volume %(volumeName)s.") % - {'volumeName': volumeName}) - LOG.exception(errorMessage) - raise exception.VolumeBackendAPIException(data=errorMessage) - return rc - - def _delete_from_pool_v3(self, storageConfigService, volumeInstance, - volumeName, deviceId, extraSpecs, volume=None): - """Delete from pool (v3). - - :param storageConfigService: the storage config service - :param volumeInstance: the volume instance - :param volumeName: the volume Name - :param deviceId: the device ID of the volume - :param extraSpecs: extra specifications - :param volume: the cinder volume object - :returns: int -- return code - :raises VolumeBackendAPIException: - """ - storageSystemName = volumeInstance['SystemName'] - controllerConfigurationService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystemName)) - # Check if it is part of a storage group and delete it # extra logic for case when volume is the last member. self.masking.remove_and_reset_members( - self.conn, controllerConfigurationService, volumeInstance, - volumeName, extraSpecs, None, False) + array, device_id, volume_name, extra_specs, False) - if volume and self.utils.is_replication_enabled(extraSpecs): - self.cleanup_lun_replication(self.conn, volume, volumeName, - volumeInstance, extraSpecs) - - LOG.debug("Delete Volume: %(name)s Method: EMCReturnToStoragePool " - "ConfigServic: %(service)s TheElement: %(vol_instance)s " - "DeviceId: %(deviceId)s.", - {'service': storageConfigService, - 'name': volumeName, - 'vol_instance': volumeInstance.path, - 'deviceId': deviceId}) try: - rc = self.provisionv3.delete_volume_from_pool( - self.conn, storageConfigService, volumeInstance.path, - volumeName, extraSpecs) + LOG.debug("Delete Volume: %(name)s. device_id: %(device_id)s.", + {'name': volume_name, 'device_id': device_id}) + self.provision.delete_volume_from_srp( + array, device_id, volume_name) - except Exception: + except Exception as e: # If we cannot successfully delete the volume, then we want to # return the volume to the default storage group, # which should be the SG it previously belonged to. - self.masking.return_volume_to_default_storage_group_v3( - self.conn, controllerConfigurationService, - volumeInstance, volumeName, extraSpecs) + self.masking.return_volume_to_default_storage_group( + array, device_id, volume_name, extra_specs) - errorMessage = (_("Failed to delete volume %(volumeName)s.") % - {'volumeName': volumeName}) - LOG.exception(errorMessage) - raise exception.VolumeBackendAPIException(data=errorMessage) - - return rc - - def _create_clone_v2(self, repServiceInstanceName, cloneVolume, - sourceVolume, sourceInstance, isSnapshot, - extraSpecs): - """Create a clone (v2). - - :param repServiceInstanceName: the replication service - :param cloneVolume: the clone volume object - :param sourceVolume: the source volume object - :param sourceInstance: the device ID of the volume - :param isSnapshot: check to see if it is a snapshot - :param extraSpecs: extra specifications - :returns: int -- return code - :raises VolumeBackendAPIException: - """ - # Check if the source volume contains any meta devices. - metaHeadInstanceName = self.utils.get_volume_meta_head( - self.conn, sourceInstance.path) - - if metaHeadInstanceName is None: # Simple volume. - return self._create_v2_replica_and_delete_clone_relationship( - repServiceInstanceName, cloneVolume, sourceVolume, - sourceInstance, None, extraSpecs, isSnapshot) - else: # Composite volume with meta device members. - # Check if the meta members capacity. - metaMemberInstanceNames = ( - self.utils.get_composite_elements( - self.conn, sourceInstance)) - volumeCapacities = self.utils.get_meta_members_capacity_in_byte( - self.conn, metaMemberInstanceNames) - LOG.debug("Volume capacities: %(metasizes)s.", - {'metasizes': volumeCapacities}) - if len(set(volumeCapacities)) == 1: - LOG.debug("Meta volume all of the same size.") - return self._create_v2_replica_and_delete_clone_relationship( - repServiceInstanceName, cloneVolume, sourceVolume, - sourceInstance, None, extraSpecs, isSnapshot) - - LOG.debug("Meta volumes are of different sizes, " - "%d different sizes.", len(set(volumeCapacities))) - - baseTargetVolumeInstance = None - for volumeSizeInbits in volumeCapacities: - if baseTargetVolumeInstance is None: # Create base volume. - baseVolumeName = "TargetBaseVol" - volume = {'size': int(self.utils.convert_bits_to_gbs( - volumeSizeInbits))} - _rc, baseVolumeDict, storageSystemName = ( - self._create_composite_volume( - volume, baseVolumeName, volumeSizeInbits, - extraSpecs, 1)) - baseTargetVolumeInstance = self.utils.find_volume_instance( - self.conn, baseVolumeDict, baseVolumeName) - LOG.debug("Base target volume %(targetVol)s created. " - "capacity in bits: %(capInBits)lu.", - {'capInBits': volumeSizeInbits, - 'targetVol': baseTargetVolumeInstance.path}) - else: # Create append volume - targetVolumeName = "MetaVol" - volume = {'size': int(self.utils.convert_bits_to_gbs( - volumeSizeInbits))} - storageConfigService = ( - self.utils.find_storage_configuration_service( - self.conn, storageSystemName)) - unboundVolumeInstance = ( - self._create_and_get_unbound_volume( - self.conn, storageConfigService, - baseTargetVolumeInstance.path, volumeSizeInbits, - extraSpecs)) - if unboundVolumeInstance is None: - exceptionMessage = (_( - "Error Creating unbound volume.")) - LOG.error(exceptionMessage) - # Remove target volume - self._delete_target_volume_v2(storageConfigService, - baseTargetVolumeInstance, - extraSpecs) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - # Append the new unbound volume to the - # base target composite volume. - baseTargetVolumeInstance = self.utils.find_volume_instance( - self.conn, baseVolumeDict, baseVolumeName) - try: - elementCompositionService = ( - self.utils.find_element_composition_service( - self.conn, storageSystemName)) - compositeType = self.utils.get_composite_type( - extraSpecs[COMPOSITETYPE]) - _rc, modifiedVolumeDict = ( - self._modify_and_get_composite_volume_instance( - self.conn, - elementCompositionService, - baseTargetVolumeInstance, - unboundVolumeInstance.path, - targetVolumeName, - compositeType, - extraSpecs)) - if modifiedVolumeDict is None: - exceptionMessage = (_( - "Error appending volume %(volumename)s to " - "target base volume.") - % {'volumename': targetVolumeName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - except Exception: - exceptionMessage = (_( - "Exception appending meta volume to target volume " - "%(volumename)s.") - % {'volumename': baseVolumeName}) - LOG.error(exceptionMessage) - # Remove append volume and target base volume - self._delete_target_volume_v2( - storageConfigService, unboundVolumeInstance, - extraSpecs) - self._delete_target_volume_v2( - storageConfigService, baseTargetVolumeInstance, - extraSpecs) - - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("Create V2 replica for meta members of different sizes.") - return self._create_v2_replica_and_delete_clone_relationship( - repServiceInstanceName, cloneVolume, sourceVolume, - sourceInstance, baseTargetVolumeInstance, extraSpecs, - isSnapshot) - - def _create_v2_replica_and_delete_clone_relationship( - self, repServiceInstanceName, cloneVolume, sourceVolume, - sourceInstance, targetInstance, extraSpecs, isSnapshot=False): - """Create a replica and delete the clone relationship. - - :param repServiceInstanceName: the replication service - :param cloneVolume: the clone volume object - :param sourceVolume: the source volume object - :param sourceInstance: the source volume instance - :param targetInstance: the target volume instance - :param extraSpecs: extra specifications - :param isSnapshot: check to see if it is a snapshot - :returns: int -- return code - :returns: dict -- cloneDict - """ - sourceName = sourceVolume['name'] - cloneId = cloneVolume['id'] - cloneName = self.utils.get_volume_element_name(cloneId) - - try: - rc, job = self.provision.create_element_replica( - self.conn, repServiceInstanceName, cloneName, sourceName, - sourceInstance, targetInstance, extraSpecs) - except Exception: - exceptionMessage = (_( - "Exception during create element replica. " - "Clone name: %(cloneName)s " - "Source name: %(sourceName)s " - "Extra specs: %(extraSpecs)s ") - % {'cloneName': cloneName, - 'sourceName': sourceName, - 'extraSpecs': extraSpecs}) - LOG.error(exceptionMessage) - - if targetInstance is not None: - # Check if the copy session exists. - storageSystem = targetInstance['SystemName'] - syncInstanceName = self.utils.find_sync_sv_by_volume( - self.conn, storageSystem, targetInstance, extraSpecs, - False) - if syncInstanceName is not None: - # Remove the Clone relationship. - rc, job = self.provision.delete_clone_relationship( - self.conn, repServiceInstanceName, syncInstanceName, - extraSpecs, True) - storageConfigService = ( - self.utils.find_storage_configuration_service( - self.conn, storageSystem)) - self._delete_target_volume_v2( - storageConfigService, targetInstance, extraSpecs) - - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - cloneDict = self.provision.get_volume_dict_from_job( - self.conn, job['Job']) - - fastPolicyName = extraSpecs[FASTPOLICY] - if isSnapshot: - if fastPolicyName is not None: - storageSystemName = sourceInstance['SystemName'] - self._add_clone_to_default_storage_group( - fastPolicyName, storageSystemName, cloneDict, cloneName, - extraSpecs) - LOG.info("Snapshot creation %(cloneName)s completed. " - "Source Volume: %(sourceName)s.", - {'cloneName': cloneName, - 'sourceName': sourceName}) - - return rc, cloneDict - - cloneVolume['provider_location'] = six.text_type(cloneDict) - syncInstanceName, storageSystemName = ( - self._find_storage_sync_sv_sv(cloneVolume, sourceVolume, - extraSpecs)) - - # Remove the Clone relationship so it can be used as a regular lun. - # 8 - Detach operation. - rc, job = self.provision.delete_clone_relationship( - self.conn, repServiceInstanceName, syncInstanceName, - extraSpecs) - if fastPolicyName is not None: - self._add_clone_to_default_storage_group( - fastPolicyName, storageSystemName, cloneDict, cloneName, - extraSpecs) - - return rc, cloneDict + error_message = (_("Failed to delete volume %(volume_name)s. " + "Exception received: %(e)s") % + {'volume_name': volume_name, + 'e': six.text_type(e)}) + LOG.exception(error_message) + raise exception.VolumeBackendAPIException(data=error_message) def get_target_wwns_from_masking_view( - self, storageSystem, volume, connector): + self, volume, connector): """Find target WWNs via the masking view. - :param storageSystem: the storage system name :param volume: volume to be attached :param connector: the connector dict :returns: list -- the target WWN list """ - targetWwns = [] - mvInstanceName = self.get_masking_view_by_volume(volume, connector) - if mvInstanceName is not None: - targetWwns = self.masking.get_target_wwns( - self.conn, mvInstanceName) + target_wwns = [] + host = connector['host'] + short_host_name = self.utils.get_host_short_name(host) + extra_specs = self._initial_setup(volume) + array = extra_specs[utils.ARRAY] + device_id = self._find_device_on_array(volume, extra_specs) + masking_view_list = self.get_masking_views_from_volume( + array, device_id, short_host_name) + if masking_view_list is not None: + portgroup = self.get_port_group_from_masking_view( + array, masking_view_list[0]) + target_wwns = self.rest.get_target_wwns(array, portgroup) LOG.info("Target wwns in masking view %(maskingView)s: " "%(targetWwns)s.", - {'maskingView': mvInstanceName, - 'targetWwns': six.text_type(targetWwns)}) - return targetWwns + {'maskingView': masking_view_list[0], + 'targetWwns': target_wwns}) + return target_wwns - def get_port_group_from_masking_view(self, maskingViewInstanceName): + def get_port_group_from_masking_view(self, array, maskingview_name): """Get the port groups in a masking view. - :param maskingViewInstanceName: masking view instance name - :returns: portGroupInstanceName + :param array: the array serial number + :param maskingview_name: masking view name + :returns: port group name """ - return self.masking.get_port_group_from_masking_view( - self.conn, maskingViewInstanceName) + return self.rest.get_element_from_masking_view( + array, maskingview_name, portgroup=True) - def get_initiator_group_from_masking_view(self, maskingViewInstanceName): + def get_initiator_group_from_masking_view(self, array, maskingview_name): """Get the initiator group in a masking view. - :param maskingViewInstanceName: masking view instance name - :returns: initiatorGroupInstanceName + :param array: the array serial number + :param maskingview_name: masking view name + :returns: initiator group name """ - return self.masking.get_initiator_group_from_masking_view( - self.conn, maskingViewInstanceName) + return self.rest.get_element_from_masking_view( + array, maskingview_name, host=True) - def get_masking_view_by_volume(self, volume, connector): - """Given volume, retrieve the masking view instance name. + def get_common_masking_views(self, array, portgroup_name, + initiator_group_name): + """Get common masking views, if any. - :param volume: the volume - :param connector: the connector object - :returns: maskingviewInstanceName + :param array: the array serial number + :param portgroup_name: port group name + :param initiator_group_name: ig name + :return: list of masking views """ - LOG.debug("Finding Masking View for volume %(volume)s.", - {'volume': volume}) - volumeInstance = self._find_lun(volume) - return self.masking.get_masking_view_by_volume( - self.conn, volumeInstance, connector) + LOG.debug("Finding Masking Views for port group %(pg)s and %(ig)s.", + {'pg': portgroup_name, 'ig': initiator_group_name}) + masking_view_list = self.rest.get_common_masking_views( + array, portgroup_name, initiator_group_name) + return masking_view_list - def get_masking_views_by_port_group(self, portGroupInstanceName): - """Given port group, retrieve the masking view instance name. + def _get_ip_and_iqn(self, array, port): + """Get ip and iqn from the director port. - :param portGroupInstanceName: port group instance name - :returns: list -- maskingViewInstanceNames + :param array: the array serial number + :param port: the director port on the array + :returns: ip_and_iqn - dict """ - LOG.debug("Finding Masking Views for port group %(pg)s.", - {'pg': portGroupInstanceName}) - return self.masking.get_masking_views_by_port_group( - self.conn, portGroupInstanceName) + ip_iqn_list = [] + ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( + array, port) + for ip in ip_addresses: + ip_iqn_list.append({'iqn': iqn, 'ip': ip}) + return ip_iqn_list - def get_masking_views_by_initiator_group( - self, initiatorGroupInstanceName): - """Given initiator group, retrieve the masking view instance name. + def _find_ip_and_iqns(self, array, port_group_name): + """Find the list of ips and iqns for the ports in a portgroup. - :param initiatorGroupInstanceName: initiator group instance name - :returns: list -- maskingViewInstanceNames + :param array: the array serial number + :param port_group_name: the portgroup name + :returns: ip_and_iqn - list of dicts """ - LOG.debug("Finding Masking Views for initiator group %(ig)s.", - {'ig': initiatorGroupInstanceName}) - return self.masking.get_masking_views_by_initiator_group( - self.conn, initiatorGroupInstanceName) + ips_and_iqns = [] + LOG.debug("The portgroup name for iscsiadm is %(pg)s.", + {'pg': port_group_name}) + ports = self.rest.get_port_ids(array, port_group_name) + for port in ports: + ip_and_iqn = self._get_ip_and_iqn(array, port) + ips_and_iqns.extend(ip_and_iqn) + return ips_and_iqns - def _create_replica_v3( - self, repServiceInstanceName, cloneVolume, - sourceVolume, sourceInstance, isSnapshot, extraSpecs): + def _create_replica( + self, array, clone_volume, source_device_id, + extra_specs, snap_name=None): """Create a replica. - V3 specific function, create replica for source volume, - including clone and snapshot. - - :param repServiceInstanceName: the replication service - :param cloneVolume: the clone volume object - :param sourceVolume: the source volume object - :param sourceInstance: the device ID of the volume - :param isSnapshot: boolean -- check to see if it is a snapshot - :param extraSpecs: extra specifications + Create replica for source volume, source can be volume or snapshot. + :param array: the array serial number + :param clone_volume: the clone volume object + :param source_device_id: the device ID of the volume + :param extra_specs: extra specifications + :param snap_name: the snapshot name - optional :returns: int -- return code :returns: dict -- cloneDict """ - cloneId = cloneVolume['id'] - cloneName = self.utils.get_volume_element_name(cloneId) - # SyncType 7: snap, VG3R default snapshot is snapVx. - syncType = self.utils.get_num(SNAPVX, '16') - # Operation 9: Dissolve for snapVx. - operation = self.utils.get_num(DISSOLVE_SNAPVX, '16') - rsdInstance = None - targetInstance = None - copyState = self.utils.get_num(4, '16') - if isSnapshot: - rsdInstance = self.utils.set_target_element_supplier_in_rsd( - self.conn, repServiceInstanceName, SNAPVX_REPLICATION_TYPE, - CREATE_NEW_TARGET, extraSpecs) - else: - targetInstance = self._create_duplicate_volume( - sourceInstance, cloneName, extraSpecs) - + target_device_id = None + clone_id = clone_volume.id + clone_name = self.utils.get_volume_element_name(clone_id) + create_snap = False + # VMAX supports using a target volume that is bigger than + # the source volume, so we create the target volume the desired + # size at this point to avoid having to extend later try: - rc, job = ( - self.provisionv3.create_element_replica( - self.conn, repServiceInstanceName, cloneName, syncType, - sourceInstance, extraSpecs, targetInstance, rsdInstance, - copyState)) - except Exception: - LOG.warning( - "Clone failed on V3. Cleaning up the target volume. " - "Clone name: %(cloneName)s ", - {'cloneName': cloneName}) - if targetInstance: + clone_dict = self._create_volume( + clone_name, clone_volume.size, extra_specs) + target_device_id = clone_dict['device_id'] + LOG.info("The target device id is: %(device_id)s.", + {'device_id': target_device_id}) + if not snap_name: + snap_name = self.utils.get_temp_snap_name( + clone_name, source_device_id) + create_snap = True + self.provision.create_volume_replica( + array, source_device_id, target_device_id, + snap_name, extra_specs, create_snap) + except Exception as e: + if target_device_id: + LOG.warning("Create replica failed. Cleaning up the target " + "volume. Clone name: %(cloneName)s, Error " + "received is %(e)s.", + {'cloneName': clone_name, 'e': e}) self._cleanup_target( - repServiceInstanceName, targetInstance, extraSpecs) + array, target_device_id, source_device_id, + clone_name, snap_name, extra_specs) # Re-throw the exception. - raise + raise + return clone_dict - cloneDict = self.provisionv3.get_volume_dict_from_job( - self.conn, job['Job']) - targetVolumeInstance = ( - self.provisionv3.get_volume_from_job(self.conn, job['Job'])) - LOG.info("The target instance device id is: %(deviceid)s.", - {'deviceid': targetVolumeInstance['DeviceID']}) + def _cleanup_target(self, array, target_device_id, source_device_id, + clone_name, snap_name, extra_specs): + """Cleanup target volume on failed clone/ snapshot creation. - if not isSnapshot: - cloneVolume['provider_location'] = six.text_type(cloneDict) - - syncInstanceName, _storageSystem = ( - self._find_storage_sync_sv_sv(cloneVolume, sourceVolume, - extraSpecs, True)) - - rc, job = self.provisionv3.break_replication_relationship( - self.conn, repServiceInstanceName, syncInstanceName, - operation, extraSpecs) - return rc, cloneDict - - def _cleanup_target( - self, repServiceInstanceName, targetInstance, extraSpecs): - """cleanup target after exception - - :param repServiceInstanceName: the replication service - :param targetInstance: the target instance - :param extraSpecs: extra specifications + :param array: the array serial number + :param target_device_id: the target device ID + :param source_device_id: the source device ID + :param clone_name: the name of the clone volume + :param extra_specs: the extra specifications """ - storageSystem = targetInstance['SystemName'] - syncInstanceName = self.utils.find_sync_sv_by_volume( - self.conn, storageSystem, targetInstance, False) - if syncInstanceName is not None: - # Break the clone relationship. - self.provisionv3.break_replication_relationship( - self.conn, repServiceInstanceName, syncInstanceName, - DISSOLVE_SNAPVX, extraSpecs, True) - storageConfigService = ( - self.utils.find_storage_configuration_service( - self.conn, storageSystem)) - deviceId = targetInstance['DeviceID'] - volumeName = targetInstance['Name'] - self._delete_from_pool_v3( - storageConfigService, targetInstance, volumeName, - deviceId, extraSpecs) + snap_session = self.rest._get_sync_session( + array, source_device_id, snap_name, target_device_id) + if snap_session: + self.provision.break_replication_relationship( + array, target_device_id, source_device_id, + snap_name, extra_specs) + self._delete_from_srp( + array, target_device_id, clone_name, extra_specs) - def _delete_cg_and_members( - self, storageSystem, cgsnapshot, modelUpdate, volumes, isV3, - extraSpecs): - """Helper function to delete a consistencygroup and its member volumes. + def _sync_check(self, array, device_id, volume_name, extra_specs, + tgt_only=False): + """Check if volume is part of a SnapVx sync process. - :param storageSystem: storage system - :param cgsnapshot: consistency group snapshot - :param modelUpdate: dict -- the model update dict - :param volumes: the list of member volumes - :param isV3: boolean - :param extraSpecs: extra specifications - :returns: dict -- modelUpdate - :returns: list -- the updated list of member volumes - :raises VolumeBackendAPIException: + :param array: the array serial number + :param device_id: volume instance + :param volume_name: volume name + :param tgt_only: Flag - return only sessions where device is target + :param extra_specs: extra specifications """ - replicationService = self.utils.find_replication_service( - self.conn, storageSystem) - - storageConfigservice = ( - self.utils.find_storage_configuration_service( - self.conn, storageSystem)) - cgInstanceName, cgName = self._find_consistency_group( - replicationService, six.text_type(cgsnapshot['id'])) - - if cgInstanceName is None: - LOG.error("Cannot find CG group %(cgName)s.", - {'cgName': cgsnapshot['id']}) - modelUpdate = {'status': fields.ConsistencyGroupStatus.DELETED} - return modelUpdate, [] - - memberInstanceNames = self._get_members_of_replication_group( - cgInstanceName) - - self.provision.delete_consistency_group( - self.conn, replicationService, cgInstanceName, cgName, - extraSpecs) - - if memberInstanceNames: - try: - controllerConfigurationService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystem)) - for memberInstanceName in memberInstanceNames: - self._remove_device_from_storage_group( - controllerConfigurationService, - memberInstanceName, 'Member Volume', extraSpecs) - LOG.debug("Deleting CG members. CG: %(cg)s " - "%(numVols)lu member volumes: %(memVols)s.", - {'cg': cgInstanceName, - 'numVols': len(memberInstanceNames), - 'memVols': memberInstanceNames}) - if isV3: - self.provisionv3.delete_volume_from_pool( - self.conn, storageConfigservice, - memberInstanceNames, None, extraSpecs) - else: - self.provision.delete_volume_from_pool( - self.conn, storageConfigservice, - memberInstanceNames, None, extraSpecs) - for volumeRef in volumes: - volumeRef['status'] = 'deleted' - except Exception: - for volumeRef in volumes: - volumeRef['status'] = 'error_deleting' - modelUpdate['status'] = 'error_deleting' - return modelUpdate, volumes - - def _delete_target_volume_v2( - self, storageConfigService, targetVolumeInstance, extraSpecs): - """Helper function to delete the clone target volume instance. - - :param storageConfigService: storage configuration service instance - :param targetVolumeInstance: clone target volume instance - :param extraSpecs: extra specifications - """ - deviceId = targetVolumeInstance['DeviceID'] - volumeName = targetVolumeInstance['Name'] - rc = self._delete_from_pool(storageConfigService, - targetVolumeInstance, - volumeName, deviceId, - extraSpecs[FASTPOLICY], - extraSpecs) - return rc - - def _validate_pool(self, volume, extraSpecs=None, host=None): - """Get the pool from volume['host']. - - There may be backward compatibiliy concerns, so putting in a - check to see if a version has been added to provider_location. - If it has, we know we are at the current version, if not, we - assume it was created pre 'Pool Aware Scheduler' feature. - - :param volume: the volume Object - :param extraSpecs: extraSpecs provided in the volume type - :returns: string -- pool - :raises VolumeBackendAPIException: - """ - pool = None - # Volume is None in CG ops. - if volume is None: - return pool - - if host is None: - host = volume['host'] - - # This check is for all operations except a create. - # On a create provider_location is None - try: - if volume['provider_location']: - version = self._get_version_from_provider_location( - volume['provider_location']) - if not version: - return pool - except KeyError: - return pool - try: - pool = volume_utils.extract_host(host, 'pool') - if pool: - LOG.debug("Pool from volume['host'] is %(pool)s.", - {'pool': pool}) - # Check if it matches with the poolname if it is provided - # in the extra specs - if extraSpecs is not None: - if 'pool_name' in extraSpecs: - if extraSpecs['pool_name'] != pool: - exceptionMessage = (_( - "Pool from volume['host'] %(host)s doesn't" - " match with pool_name in extraSpecs.") - % {'host': volume['host']}) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - else: - exceptionMessage = (_( - "Pool from volume['host'] %(host)s not found.") - % {'host': volume['host']}) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - except Exception as ex: - exceptionMessage = (_( - "Pool from volume['host'] failed with: %(ex)s.") - % {'ex': ex}) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - return pool - - def _get_version_from_provider_location(self, loc): - """Get the version from the provider location. - - :param loc: the provider_location dict - :returns: version or None - """ - version = None - try: - if isinstance(loc, six.string_types): - name = ast.literal_eval(loc) - version = name['version'] - except KeyError: - pass - return version + snap_vx_sessions = self.rest.find_snap_vx_sessions( + array, device_id, tgt_only) + if snap_vx_sessions: + for session in snap_vx_sessions: + source = session['source_vol'] + snap_name = session['snap_name'] + targets = session['target_vol_list'] + for target in targets: + # Break the replication relationship + LOG.debug("Unlinking source from target. Source: " + "%(volume)s, Target: %(target)s.", + {'volume': volume_name, 'target': target}) + self.provision.break_replication_relationship( + array, target, source, snap_name, + extra_specs, wait_for_sync=True) + if 'temp' in snap_name: + self.provision.delete_temp_volume_snap( + array, snap_name, source) def manage_existing(self, volume, external_ref): """Manages an existing VMAX Volume (import to Cinder). Renames the existing volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. - :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: dict -- model_update - :raises: VolumeBackendAPIException """ - extraSpecs = self._initial_setup(volume) - self.conn = self._get_ecom_connection() - arrayName, deviceId = self.utils.get_array_and_device_id( + LOG.info("Beginning manage existing volume process") + array, device_id = self.utils.get_array_and_device_id( volume, external_ref) + volume_id = volume.id + # Check if the existing volume is valid for cinder management + self._check_lun_valid_for_cinder_management( + array, device_id, volume_id, external_ref) + # Rename the volume + volume_name = self.utils.get_volume_element_name(volume_id) + LOG.debug("Rename volume %(vol)s to %(elementName)s.", + {'vol': volume_id, + 'elementName': volume_name}) + self.rest.rename_volume(array, device_id, volume_name) - self.utils.check_volume_no_fast(extraSpecs) + provider_location = {'device_id': device_id, 'array': array} - volumeInstanceName = ( - self.utils.find_volume_by_device_id_on_array( - arrayName, deviceId)) - - self.utils.check_volume_not_in_masking_view( - self.conn, volumeInstanceName, deviceId) - - cinderPoolInstanceName, storageSystemName = ( - self._get_pool_and_storage_system(extraSpecs)) - - self.utils.check_volume_not_replication_source( - self.conn, storageSystemName, deviceId) - - self.utils.check_is_volume_in_cinder_managed_pool( - self.conn, volumeInstanceName, cinderPoolInstanceName, - deviceId) - - volumeId = volume.name - volumeElementName = self.utils.get_volume_element_name(volumeId) - LOG.debug("Rename volume %(vol)s to %(volumeId)s.", - {'vol': volumeInstanceName, - 'volumeId': volumeElementName}) - - volumeInstance = self.utils.rename_volume(self.conn, - volumeInstanceName, - volumeElementName) - keys = {} - volpath = volumeInstance.path - keys['CreationClassName'] = volpath['CreationClassName'] - keys['SystemName'] = volpath['SystemName'] - keys['DeviceID'] = volpath['DeviceID'] - keys['SystemCreationClassName'] = volpath['SystemCreationClassName'] - - provider_location = {} - provider_location['classname'] = volpath['CreationClassName'] - provider_location['keybindings'] = keys - - model_update = self.set_volume_replication_if_enabled( - self.conn, extraSpecs, volume, provider_location) - - volumeDisplayName = volume.display_name - model_update.update( - {'display_name': volumeDisplayName}) - model_update.update( - {'provider_location': six.text_type(provider_location)}) + model_update = {'provider_location': six.text_type(provider_location), + 'display_name': volume_name} return model_update - def set_volume_replication_if_enabled(self, conn, extraSpecs, - volume, provider_location): - """Set volume replication if enabled + def _check_lun_valid_for_cinder_management( + self, array, device_id, volume_id, external_ref): + """Check if a volume is valid for cinder management. - If volume replication is enabled, set relevant - values in associated model_update dict. - - :param conn: connection to the ecom server - :param extraSpecs: additional info - :param volume: the volume object - :param provider_location: volume classname & keybindings - :return: updated model_update + :param array: the array serial number + :param device_id: the device id + :param volume_id: the cinder volume id + :param external_ref: the external reference + :raises ManageExistingInvalidReference, ManageExistingAlreadyManaged: """ - model_update = {} - if self.utils.is_replication_enabled(extraSpecs): - replication_status, replication_driver_data = ( - self.setup_volume_replication( - conn, volume, provider_location, extraSpecs)) - model_update.update( - {'replication_status': replication_status}) - model_update.update( - {'replication_driver_data': six.text_type( - replication_driver_data)}) + # Ensure the volume exists on the array + volume_details = self.rest.get_volume(array, device_id) + if not volume_details: + msg = (_('Unable to retrieve volume details from array for ' + 'device %(device_id)s') % {'device_id': device_id}) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) - return model_update + # Check if volume is already cinder managed + if volume_details.get('volume_identifier'): + volume_identifier = volume_details['volume_identifier'] + if volume_identifier.startswith(utils.VOLUME_ELEMENT_NAME_PREFIX): + raise exception.ManageExistingAlreadyManaged( + volume_ref=volume_id) + + # Check if the volume is attached by checking if in any masking view. + storagegrouplist = self.rest.get_storage_groups_from_volume( + array, device_id) + for sg in storagegrouplist: + mvs = self.rest.get_masking_views_from_storage_group( + array, sg) + if mvs: + msg = (_("Unable to import volume %(device_id)s to cinder. " + "Volume is in masking view(s): %(mv)s.") + % {'device_id': device_id, 'mv': mvs}) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) + + # Check if there are any replication sessions associated + # with the volume. + snapvx_tgt, snapvx_src, rdf = self.rest.is_vol_in_rep_session( + array, device_id) + if snapvx_tgt or snapvx_src or rdf: + msg = (_("Unable to import volume %(device_id)s to cinder. " + "It is part of a replication session.") + % {'device_id': device_id}) + raise exception.ManageExistingInvalidReference( + existing_ref=external_ref, reason=msg) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing VMAX volume to manage_existing. @@ -4714,1342 +1499,31 @@ class VMAXCommon(object): """ LOG.debug("Volume in manage_existing_get_size: %(volume)s.", {'volume': volume}) - arrayName, deviceId = self.utils.get_array_and_device_id(volume, - external_ref) - volumeInstanceName = ( - self.utils.find_volume_by_device_id_on_array(arrayName, deviceId)) - - try: - volumeInstance = self.conn.GetInstance(volumeInstanceName) - byteSize = self.utils.get_volume_size(self.conn, volumeInstance) - fByteSize = float(byteSize) - gbSize = int(fByteSize / units.Gi) - - except Exception: - exceptionMessage = (_("Volume %(deviceID)s not found.") - % {'deviceID': deviceId}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - LOG.debug( - "Size of volume %(deviceID)s is %(volumeSize)s GB", - {'deviceID': deviceId, - 'volumeSize': gbSize}) - - return gbSize + array, device_id = self.utils.get_array_and_device_id( + volume, external_ref) + size = float(self.rest.get_size_of_device_on_array(array, device_id)) + LOG.debug("Size of volume %(device_id)s is %(volumeSize)s GB.", + {'device_id': device_id, 'volumeSize': int(size)}) + return int(size) def unmanage(self, volume): """Export VMAX volume from Cinder. Leave the volume intact on the backend array. - :param volume: the volume object :raises VolumeBackendAPIException: """ - volumeName = volume['name'] - volumeId = volume['id'] - LOG.debug("Unmanage volume %(name)s, id=%(id)s", - {'name': volumeName, - 'id': volumeId}) - self._initial_setup(volume) - self.conn = self._get_ecom_connection() - volumeInstance = self._find_lun(volume) - if volumeInstance is None: - exceptionMessage = (_("Cannot find Volume: %(id)s. " - "unmanage operation. Exiting...") - % {'id': volumeId}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - # Rename the volume to volumeId, thus remove the 'OS-' prefix. - self.utils.rename_volume(self.conn, volumeInstance, volumeId) - - def update_consistencygroup(self, group, add_volumes, - remove_volumes): - """Updates LUNs in consistency group. - - :param group: storage configuration service instance - :param add_volumes: the volumes uuids you want to add to the CG - :param remove_volumes: the volumes uuids you want to remove from - the CG - """ - LOG.info("Update Consistency Group: %(group)s. " - "This adds and/or removes volumes from a CG.", - {'group': group['id']}) - - modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} - cg_name = self._update_consistency_group_name(group) - add_vols = [vol for vol in add_volumes] if add_volumes else [] - add_instance_names = self._get_volume_instance_names(add_vols) - remove_vols = [vol for vol in remove_volumes] if remove_volumes else [] - remove_instance_names = self._get_volume_instance_names(remove_vols) - self.conn = self._get_ecom_connection() - - try: - replicationService, storageSystem, __, __ = ( - self._get_consistency_group_utils(self.conn, group)) - cgInstanceName, __ = ( - self._find_consistency_group( - replicationService, six.text_type(group['id']))) - if cgInstanceName is None: - raise exception.ConsistencyGroupNotFound( - consistencygroup_id=cg_name) - # Add volume(s) to a consistency group - interval_retries_dict = self.utils.get_default_intervals_retries() - if add_instance_names: - self.provision.add_volume_to_cg( - self.conn, replicationService, cgInstanceName, - add_instance_names, cg_name, None, - interval_retries_dict) - # Remove volume(s) from a consistency group - if remove_instance_names: - self.provision.remove_volume_from_cg( - self.conn, replicationService, cgInstanceName, - remove_instance_names, cg_name, None, - interval_retries_dict) - except exception.ConsistencyGroupNotFound: - raise - except Exception as ex: - LOG.error("Exception: %(ex)s", {'ex': ex}) - exceptionMessage = (_("Failed to update consistency group:" - " %(cgName)s.") - % {'cgName': group['id']}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return modelUpdate, None, None - - def _get_volume_instance_names(self, volumes): - """Get volume instance names from volume. - - :param volumes: volume objects - :returns: volume instance names - """ - volumeInstanceNames = [] - for volume in volumes: - volumeInstance = self._find_lun(volume) - if volumeInstance is None: - LOG.error("Volume %(name)s not found on the array.", - {'name': volume['name']}) - else: - volumeInstanceNames.append(volumeInstance.path) - return volumeInstanceNames - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot, snapshots, source_cg, - source_vols): - """Creates the consistency group from source. - - :param context: the context - :param group: the consistency group object to be created - :param volumes: volumes in the consistency group - :param cgsnapshot: the source consistency group snapshot - :param snapshots: snapshots of the source volumes - :param source_cg: the source consistency group - :param source_vols: the source vols - :returns: model_update, volumes_model_update - model_update is a dictionary of cg status - volumes_model_update is a list of dictionaries of volume - update - """ - if cgsnapshot: - source_vols_or_snapshots = snapshots - source_id = cgsnapshot['id'] - elif source_cg: - source_vols_or_snapshots = source_vols - source_id = source_cg['id'] + volume_name = volume.name + volume_id = volume.id + LOG.info("Unmanage volume %(name)s, id=%(id)s", + {'name': volume_name, 'id': volume_id}) + extra_specs = self._initial_setup(volume) + device_id = self._find_device_on_array(volume, extra_specs) + if device_id is None: + LOG.error("Cannot find Volume: %(id)s for " + "unmanage operation. Exiting...", + {'id': volume_id}) else: - exceptionMessage = (_("Must supply either CG snaphot or " - "a source CG.")) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("Enter EMCVMAXCommon::create_consistencygroup_from_src. " - "Group to be created: %(cgId)s, " - "Source : %(SourceCGId)s.", - {'cgId': group['id'], - 'SourceCGId': source_id}) - - self.create_consistencygroup(context, group) - - modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE} - - try: - replicationService, storageSystem, extraSpecsDictList, isV3 = ( - self._get_consistency_group_utils(self.conn, group)) - if replicationService is None: - exceptionMessage = (_( - "Cannot find replication service on system %s.") % - storageSystem) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - targetCgInstanceName, targetCgName = self._find_consistency_group( - replicationService, six.text_type(group['id'])) - LOG.debug("Create CG %(targetCg)s from snapshot.", - {'targetCg': targetCgInstanceName}) - dictOfVolumeDicts = {} - targetVolumeNames = {} - for volume, source_vol_or_snapshot in zip( - volumes, source_vols_or_snapshots): - if 'size' in source_vol_or_snapshot: - volumeSizeInbits = int(self.utils.convert_gb_to_bits( - source_vol_or_snapshot['size'])) - else: - volumeSizeInbits = int(self.utils.convert_gb_to_bits( - source_vol_or_snapshot['volume_size'])) - for extraSpecsDict in extraSpecsDictList: - if volume['volume_type_id'] in extraSpecsDict.values(): - extraSpecs = extraSpecsDict.get('extraSpecs') - if 'pool_name' in extraSpecs: - extraSpecs = self.utils.update_extra_specs( - extraSpecs) - # Create a random UUID and use it as volume name - targetVolumeName = six.text_type(uuid.uuid4()) - volumeDict = self._create_vol_and_add_to_cg( - volumeSizeInbits, replicationService, - targetCgInstanceName, targetCgName, - source_vol_or_snapshot['id'], - extraSpecs, targetVolumeName) - dictOfVolumeDicts[volume['id']] = volumeDict - targetVolumeNames[volume['id']] = targetVolumeName - - interval_retries_dict = self.utils.get_default_intervals_retries() - self._break_replica_group_relationship( - replicationService, source_id, group['id'], - targetCgInstanceName, storageSystem, interval_retries_dict, - isV3) - except Exception: - exceptionMessage = (_("Failed to create CG %(cgName)s " - "from source %(cgSnapshot)s.") - % {'cgName': group['id'], - 'cgSnapshot': source_id}) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - volumes_model_update = self.utils.get_volume_model_updates( - volumes, group['id'], modelUpdate['status']) - - # Update the provider_location - for volume_model_update in volumes_model_update: - if volume_model_update['id'] in dictOfVolumeDicts: - volume_model_update.update( - {'provider_location': six.text_type( - dictOfVolumeDicts[volume_model_update['id']])}) - - # Update the volumes_model_update with admin_metadata - self.update_admin_metadata(volumes_model_update, - key='targetVolumeName', - values=targetVolumeNames) - - return modelUpdate, volumes_model_update - - def update_admin_metadata( - self, volumes_model_update, key, values): - """Update the volume_model_updates with admin metadata - - :param volumes_model_update: List of volume model updates - :param key: Key to be updated in the admin_metadata - :param values: Dictionary of values per volume id - """ - for volume_model_update in volumes_model_update: - volume_id = volume_model_update['id'] - if volume_id in values: - admin_metadata = {} - admin_metadata.update({key: values[volume_id]}) - volume_model_update.update( - {'admin_metadata': admin_metadata}) - - def _break_replica_group_relationship( - self, replicationService, source_id, group_id, - targetCgInstanceName, storageSystem, extraSpecs, isV3): - """Breaks the replica group relationship. - - :param replicationService: replication service - :param source_id: source identifier - :param group_id: group identifier - :param targetCgInstanceName: target CG instance - :param storageSystem: storage system - :param extraSpecs: additional info - """ - sourceCgInstanceName, sourceCgName = self._find_consistency_group( - replicationService, source_id) - if sourceCgInstanceName is None: - exceptionMessage = (_("Cannot find source CG instance. " - "consistencygroup_id: %s.") % - source_id) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - relationName = self.utils.truncate_string(group_id, TRUNCATE_5) - if isV3: - self.provisionv3.create_group_replica( - self.conn, replicationService, sourceCgInstanceName, - targetCgInstanceName, relationName, extraSpecs) - else: - self.provision.create_group_replica( - self.conn, replicationService, sourceCgInstanceName, - targetCgInstanceName, relationName, extraSpecs) - # Break the replica group relationship. - rgSyncInstanceName = self.utils.find_group_sync_rg_by_target( - self.conn, storageSystem, targetCgInstanceName, extraSpecs, - True) - - if rgSyncInstanceName is not None: - if isV3: - # Operation 9: dissolve for snapVx - operation = self.utils.get_num(9, '16') - self.provisionv3.break_replication_relationship( - self.conn, replicationService, rgSyncInstanceName, - operation, extraSpecs) - else: - self.provision.delete_clone_relationship( - self.conn, replicationService, - rgSyncInstanceName, extraSpecs) - - def _create_vol_and_add_to_cg( - self, volumeSizeInbits, replicationService, - targetCgInstanceName, targetCgName, source_id, - extraSpecs, targetVolumeName): - """Creates volume and adds to CG. - - :param context: the context - :param volumeSizeInbits: volume size in bits - :param replicationService: replication service - :param targetCgInstanceName: target cg instance - :param targetCgName: target cg name - :param source_id: source identifier - :param extraSpecs: additional info - :param targetVolumeName: volume name for the target volume - :returns volumeDict: volume dictionary for the newly created volume - """ - volume = {'size': int(self.utils.convert_bits_to_gbs( - volumeSizeInbits))} - if extraSpecs[ISV3]: - _rc, volumeDict, _storageSystemName = ( - self._create_v3_volume( - volume, targetVolumeName, volumeSizeInbits, - extraSpecs)) - else: - _rc, volumeDict, _storageSystemName = ( - self._create_composite_volume( - volume, targetVolumeName, volumeSizeInbits, - extraSpecs)) - targetVolumeInstance = self.utils.find_volume_instance( - self.conn, volumeDict, targetVolumeName) - LOG.debug("Create target volume for member snapshot. " - "Source : %(snapshot)s, " - "Target volume: %(targetVol)s.", - {'snapshot': source_id, - 'targetVol': targetVolumeInstance.path}) - - self.provision.add_volume_to_cg(self.conn, - replicationService, - targetCgInstanceName, - targetVolumeInstance.path, - targetCgName, - targetVolumeName, - extraSpecs) - return volumeDict - - def _find_ip_protocol_endpoints(self, conn, storageSystemName, - portgroupname): - """Find the IP protocol endpoint for ISCSI. - - :param storageSystemName: the system name - :param portgroupname: the portgroup name - :returns: foundIpAddresses - """ - LOG.debug("The portgroup name for iscsiadm is %(pg)s", - {'pg': portgroupname}) - foundipaddresses = [] - configservice = ( - self.utils.find_controller_configuration_service( - conn, storageSystemName)) - portgroupinstancename = ( - self.masking.find_port_group(conn, configservice, portgroupname)) - iscsiendpointinstancenames = ( - self.utils.get_iscsi_protocol_endpoints( - conn, portgroupinstancename)) - - for iscsiendpointinstancename in iscsiendpointinstancenames: - tcpendpointinstancenames = ( - self.utils.get_tcp_protocol_endpoints( - conn, iscsiendpointinstancename)) - for tcpendpointinstancename in tcpendpointinstancenames: - ipendpointinstancenames = ( - self.utils.get_ip_protocol_endpoints( - conn, tcpendpointinstancename)) - endpoint = {} - for ipendpointinstancename in ipendpointinstancenames: - endpoint = self.get_ip_and_iqn(conn, endpoint, - ipendpointinstancename) - if bool(endpoint): - foundipaddresses.append(endpoint) - return foundipaddresses - - def _extend_v3_volume(self, volumeInstance, volumeName, newSize, - extraSpecs): - """Extends a VMAX3 volume. - - :param volumeInstance: volume instance - :param volumeName: volume name - :param newSize: new size the volume will be increased to - :param extraSpecs: extra specifications - :returns: int -- return code - :returns: volumeDict - """ - new_size_in_bits = int(self.utils.convert_gb_to_bits(newSize)) - storageConfigService = self.utils.find_storage_configuration_service( - self.conn, volumeInstance['SystemName']) - volumeDict, rc = self.provisionv3.extend_volume_in_SG( - self.conn, storageConfigService, volumeInstance.path, - volumeName, new_size_in_bits, extraSpecs) - - return rc, volumeDict - - def _create_duplicate_volume( - self, sourceInstance, cloneName, extraSpecs): - """Create a volume in the same dimensions of the source volume. - - :param sourceInstance: the source volume instance - :param cloneName: the user supplied snap name - :param extraSpecs: additional info - :returns: targetInstance - """ - numOfBlocks = sourceInstance['NumberOfBlocks'] - blockSize = sourceInstance['BlockSize'] - volumeSizeInbits = numOfBlocks * blockSize - - volume = {'size': - int(self.utils.convert_bits_to_gbs(volumeSizeInbits))} - _rc, volumeDict, _storageSystemName = ( - self._create_v3_volume( - volume, cloneName, volumeSizeInbits, extraSpecs)) - targetInstance = self.utils.find_volume_instance( - self.conn, volumeDict, cloneName) - LOG.debug("Create replica target volume " - "Source Volume: %(sourceVol)s, " - "Target Volume: %(targetVol)s.", - {'sourceVol': sourceInstance.path, - 'targetVol': targetInstance.path}) - return targetInstance - - def get_ip_and_iqn(self, conn, endpoint, ipendpointinstancename): - """Get ip and iqn from the endpoint. - - :param conn: ecom connection - :param endpoint: end point - :param ipendpointinstancename: ip endpoint - :returns: endpoint - """ - if ('iSCSIProtocolEndpoint' in six.text_type( - ipendpointinstancename['CreationClassName'])): - iqn = self.utils.get_iqn(conn, ipendpointinstancename) - if iqn: - endpoint['iqn'] = iqn - elif ('IPProtocolEndpoint' in six.text_type( - ipendpointinstancename['CreationClassName'])): - ipaddress = ( - self.utils.get_iscsi_ip_address( - conn, ipendpointinstancename)) - if ipaddress: - endpoint['ip'] = ipaddress - - return endpoint - - def _get_consistency_group_utils(self, conn, group): - """Standard utility for consistency group. - - :param conn: ecom connection - :param group: the consistency group object to be created - :return: replicationService, storageSystem, extraSpecs, isV3 - """ - storageSystems = set() - extraSpecsDictList = [] - isV3 = False - - if isinstance(group, group_obj.Group): - for volume_type in group.volume_types: - extraSpecsDict, storageSystems, isV3 = ( - self._update_extra_specs_list( - volume_type.extra_specs, len(group.volume_types), - volume_type.id)) - extraSpecsDictList.append(extraSpecsDict) - elif isinstance(group, cg_obj.ConsistencyGroup): - volumeTypeIds = group.volume_type_id.split(",") - volumeTypeIds = list(filter(None, volumeTypeIds)) - for volumeTypeId in volumeTypeIds: - if volumeTypeId: - extraSpecs = self.utils.get_volumetype_extraspecs( - None, volumeTypeId) - extraSpecsDict, storageSystems, isV3 = ( - self._update_extra_specs_list( - extraSpecs, len(volumeTypeIds), - volumeTypeId)) - extraSpecsDictList.append(extraSpecsDict) - else: - msg = (_("Unable to get volume type ids.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - if len(storageSystems) != 1: - if not storageSystems: - msg = (_("Failed to get a single storage system " - "associated with consistencygroup_id: %(groupid)s.") - % {'groupid': group.id}) - else: - msg = (_("There are multiple storage systems " - "associated with consistencygroup_id: %(groupid)s.") - % {'groupid': group.id}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - storageSystem = storageSystems.pop() - replicationService = self.utils.find_replication_service( - conn, storageSystem) - return replicationService, storageSystem, extraSpecsDictList, isV3 - - def _update_extra_specs_list( - self, extraSpecs, list_size, volumeTypeId): - """Update the extra specs list. - - :param extraSpecs: extraSpecs - :param list_size: the size of volume type list - :param volumeTypeId: volume type identifier - :return: extraSpecsDictList, storageSystems, isV3 - """ - storageSystems = set() - extraSpecsDict = {} - if 'pool_name' in extraSpecs: - isV3 = True - extraSpecs = self.utils.update_extra_specs( - extraSpecs) - extraSpecs[ISV3] = True - else: - # Without multipool we cannot support multiple volumetypes. - if list_size == 1: - extraSpecs = self._initial_setup(None, volumeTypeId) - isV3 = extraSpecs[ISV3] - else: - msg = (_("We cannot support multiple volume types if " - "multi pool functionality is not enabled.")) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - __, storageSystem = ( - self._get_pool_and_storage_system(extraSpecs)) - if storageSystem: - storageSystems.add(storageSystem) - extraSpecsDict["volumeTypeId"] = volumeTypeId - extraSpecsDict["extraSpecs"] = extraSpecs - return extraSpecsDict, storageSystems, isV3 - - def _update_consistency_group_name(self, group): - """Format id and name consistency group - - :param group: the consistency group object to be created - :param update_variable: the variable of the group to be used - :return: cgname -- formatted name + id - """ - cgName = "" - if group['name'] is not None: - cgName = ( - self.utils.truncate_string(group['name'], TRUNCATE_27) + "_") - - cgName += six.text_type(group["id"]) - return cgName - - def _sync_check(self, volumeInstance, volumeName, extraSpecs): - """Check if volume is part of a snapshot/clone sync process. - - :param volumeInstance: volume instance - :param volumeName: volume name - :param extraSpecs: extra specifications - """ - storageSystem = volumeInstance['SystemName'] - - # Wait for it to fully sync in case there is an ongoing - # create volume from snapshot request. - syncInstanceName = self.utils.find_sync_sv_by_volume( - self.conn, storageSystem, volumeInstance, extraSpecs, - True) - - if syncInstanceName: - repservice = self.utils.find_replication_service(self.conn, - storageSystem) - - # Break the replication relationship - LOG.debug("Deleting snap relationship: Source: %(volume)s " - "Synchronization: %(syncName)s.", - {'volume': volumeName, - 'syncName': syncInstanceName}) - if extraSpecs[ISV3]: - rc, job = self.provisionv3.break_replication_relationship( - self.conn, repservice, syncInstanceName, - DISSOLVE_SNAPVX, extraSpecs) - else: - self.provision.delete_clone_relationship( - self.conn, repservice, syncInstanceName, extraSpecs, True) - - def setup_volume_replication(self, conn, sourceVolume, volumeDict, - extraSpecs, targetInstance=None): - """Setup replication for volume, if enabled. - - Called on create volume, create cloned volume, - create volume from snapshot, manage_existing, - and re-establishing a replication relationship after extending. - - :param conn: the connection to the ecom server - :param sourceVolume: the source volume object - :param volumeDict: the source volume dict (the provider_location) - :param extraSpecs: extra specifications - :param targetInstance: optional, target on secondary array - :return: rep_update - dict - """ - isTargetV3 = self.utils.isArrayV3(conn, self.rep_config['array']) - if not extraSpecs[ISV3] or not isTargetV3: - exception_message = (_("Replication is not supported on " - "VMAX 2")) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - sourceName = sourceVolume['name'] - sourceInstance = self.utils.find_volume_instance( - conn, volumeDict, sourceName) - LOG.debug('Starting replication setup ' - 'for volume: %s.', sourceVolume['name']) - storageSystem = sourceInstance['SystemName'] - # get rdf details - rdfGroupInstance, repServiceInstanceName = ( - self.get_rdf_details(conn, storageSystem)) - rdf_vol_size = sourceVolume['size'] - - # give the target volume the same Volume Element Name as the - # source volume - targetName = self.utils.get_volume_element_name( - sourceVolume['id']) - - if not targetInstance: - # create a target volume on the target array - # target must be passed in on remote replication - targetInstance = self.get_target_instance( - sourceVolume, self.rep_config, rdf_vol_size, - targetName, extraSpecs) - - LOG.debug("Create volume replica: Remote Volume: %(targetName)s " - "Source Volume: %(sourceName)s " - "Method: CreateElementReplica " - "ReplicationService: %(service)s ElementName: " - "%(elementname)s SyncType: 6 SourceElement: " - "%(sourceelement)s.", - {'targetName': targetName, - 'sourceName': sourceName, - 'service': repServiceInstanceName, - 'elementname': targetName, - 'sourceelement': sourceInstance.path}) - - # create the remote replica and establish the link - rc, rdfDict = self.create_remote_replica( - conn, repServiceInstanceName, rdfGroupInstance, - sourceVolume, sourceInstance, targetInstance, extraSpecs, - self.rep_config) - - LOG.info('Successfully setup replication for %s.', - sourceVolume['name']) - replication_status = REPLICATION_ENABLED - replication_driver_data = rdfDict['keybindings'] - - return replication_status, replication_driver_data - - # called on delete volume after remove_and_reset_members - def cleanup_lun_replication(self, conn, volume, volumeName, - sourceInstance, extraSpecs): - """Cleanup target volume on delete. - - Extra logic if target is last in group. - :param conn: the connection to the ecom server - :param volume: the volume object - :param volumeName: the volume name - :param sourceInstance: the source volume instance - :param extraSpecs: extra specification - """ - LOG.debug('Starting cleanup replication from volume: ' - '%s.', volumeName) - try: - loc = volume['provider_location'] - rep_data = volume['replication_driver_data'] - - if (isinstance(loc, six.string_types) - and isinstance(rep_data, six.string_types)): - name = ast.literal_eval(loc) - replication_keybindings = ast.literal_eval(rep_data) - storageSystem = replication_keybindings['SystemName'] - rdfGroupInstance, repServiceInstanceName = ( - self.get_rdf_details(conn, storageSystem)) - repExtraSpecs = self._get_replication_extraSpecs( - extraSpecs, self.rep_config) - - targetVolumeDict = {'classname': name['classname'], - 'keybindings': replication_keybindings} - - targetInstance = self.utils.find_volume_instance( - conn, targetVolumeDict, volumeName) - # Ensure element name matches openstack id. - volumeElementName = (self.utils. - get_volume_element_name(volume['id'])) - if volumeElementName != targetInstance['ElementName']: - targetInstance = None - - if targetInstance is not None: - # clean-up target - targetControllerConfigService = ( - self.utils.find_controller_configuration_service( - conn, storageSystem)) - self.masking.remove_and_reset_members( - conn, targetControllerConfigService, targetInstance, - volumeName, repExtraSpecs, None, False) - self._cleanup_remote_target( - conn, repServiceInstanceName, sourceInstance, - targetInstance, extraSpecs, repExtraSpecs) - LOG.info('Successfully destroyed replication for ' - 'volume: %(volume)s', - {'volume': volumeName}) - else: - LOG.warning('Replication target not found for ' - 'replication-enabled volume: %(volume)s', - {'volume': volumeName}) - except Exception as e: - LOG.error('Cannot get necessary information to cleanup ' - 'replication target for volume: %(volume)s. ' - 'The exception received was: %(e)s. Manual ' - 'clean-up may be required. Please contact ' - 'your administrator.', - {'volume': volumeName, 'e': e}) - - def _cleanup_remote_target( - self, conn, repServiceInstanceName, sourceInstance, - targetInstance, extraSpecs, repExtraSpecs): - """Clean-up remote replication target after exception or on deletion. - - :param conn: connection to the ecom server - :param repServiceInstanceName: the replication service - :param sourceInstance: the source volume instance - :param targetInstance: the target volume instance - :param extraSpecs: extra specifications - :param repExtraSpecs: replication extra specifications - """ - storageSystem = sourceInstance['SystemName'] - targetStorageSystem = targetInstance['SystemName'] - syncInstanceName = self.utils.find_rdf_storage_sync_sv_sv( - conn, sourceInstance, storageSystem, - targetInstance, targetStorageSystem, - extraSpecs, False) - if syncInstanceName is not None: - # Break the sync relationship. - self.break_rdf_relationship( - conn, repServiceInstanceName, syncInstanceName, extraSpecs) - targetStorageConfigService = ( - self.utils.find_storage_configuration_service( - conn, targetStorageSystem)) - deviceId = targetInstance['DeviceID'] - volumeName = targetInstance['Name'] - self._delete_from_pool_v3( - targetStorageConfigService, targetInstance, volumeName, - deviceId, repExtraSpecs) - - def _cleanup_replication_source( - self, conn, volumeName, volumeDict, extraSpecs): - """Cleanup a remote replication source volume on failure. - - If replication setup fails at any stage on a new volume create, - we must clean-up the source instance as the cinder database won't - be updated with the provider_location. This means the volume can not - be properly deleted from the array by cinder. - - :param conn: the connection to the ecom server - :param volumeName: the name of the volume - :param volumeDict: the source volume dictionary - :param extraSpecs: the extra specifications - """ - LOG.warning( - "Replication failed. Cleaning up the source volume. " - "Volume name: %(sourceName)s.", - {'sourceName': volumeName}) - sourceInstance = self.utils.find_volume_instance( - conn, volumeDict, volumeName) - storageSystem = sourceInstance['SystemName'] - deviceId = sourceInstance['DeviceID'] - volumeName = sourceInstance['Name'] - storageConfigService = ( - self.utils.find_storage_configuration_service( - conn, storageSystem)) - self._delete_from_pool_v3( - storageConfigService, sourceInstance, volumeName, - deviceId, extraSpecs) - - def break_rdf_relationship(self, conn, repServiceInstanceName, - syncInstanceName, extraSpecs): - # Break the sync relationship. - LOG.debug("Suspending the SRDF relationship...") - self.provisionv3.break_replication_relationship( - conn, repServiceInstanceName, syncInstanceName, - SUSPEND_SRDF, extraSpecs, True) - LOG.debug("Detaching the SRDF relationship...") - self.provisionv3.break_replication_relationship( - conn, repServiceInstanceName, syncInstanceName, - DETACH_SRDF, extraSpecs, True) - - def get_rdf_details(self, conn, storageSystem): - """Retrieves an SRDF group instance. - - :param conn: connection to the ecom server - :param storageSystem: the storage system name - :return: - """ - if not self.rep_config: - exception_message = (_("Replication is not configured on " - "backend: %(backend)s.") % - {'backend': self.configuration.safe_get( - 'volume_backend_name')}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - repServiceInstanceName = self.utils.find_replication_service( - conn, storageSystem) - RDFGroupName = self.rep_config['rdf_group_label'] - LOG.info("Replication group: %(RDFGroup)s.", - {'RDFGroup': RDFGroupName}) - rdfGroupInstance = self.provisionv3.get_rdf_group_instance( - conn, repServiceInstanceName, RDFGroupName) - LOG.info("Found RDF group instance: %(RDFGroup)s.", - {'RDFGroup': rdfGroupInstance}) - if rdfGroupInstance is None: - exception_message = (_("Cannot find replication group: " - "%(RDFGroup)s.") % - {'RDFGroup': rdfGroupInstance}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - return rdfGroupInstance, repServiceInstanceName - - def failover_host(self, context, volumes, secondary_id=None): - """Fails over the volume back and forth. - - Driver needs to update following info for failed-over volume: - 1. provider_location: update array details - 2. replication_status: new status for replication-enabled volume - :param context: the context - :param volumes: the list of volumes to be failed over - :param secondary_id: the target backend - :return: secondary_id, volume_update_list - """ - volume_update_list = [] - if not self.conn: - self.conn = self._get_ecom_connection() - if secondary_id != 'default': - if not self.failover: - self.failover = True - if self.rep_config: - secondary_id = self.rep_config['array'] - else: - exception_message = (_( - "Backend %(backend)s is already failed over. " - "If you wish to failback, please append " - "'--backend_id default' to your command.") - % {'backend': self.configuration.safe_get( - 'volume_backend_name')}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - else: - if self.failover: - self.failover = False - secondary_id = None - else: - exception_message = (_( - "Cannot failback backend %(backend)s- backend not " - "in failed over state. If you meant to failover, please " - "omit the '--backend_id default' from the command") - % {'backend': self.configuration.safe_get( - 'volume_backend_name')}) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - def failover_volume(vol, failover): - loc = vol['provider_location'] - rep_data = vol['replication_driver_data'] - try: - name = ast.literal_eval(loc) - replication_keybindings = ast.literal_eval(rep_data) - keybindings = name['keybindings'] - storageSystem = keybindings['SystemName'] - sourceInstance = self._find_lun(vol) - volumeDict = {'classname': name['classname'], - 'keybindings': replication_keybindings} - - targetInstance = self.utils.find_volume_instance( - self.conn, volumeDict, vol['name']) - targetStorageSystem = ( - replication_keybindings['SystemName']) - repServiceInstanceName = ( - self.utils.find_replication_service( - self.conn, storageSystem)) - - if failover: - storageSynchronizationSv = ( - self.utils.find_rdf_storage_sync_sv_sv( - self.conn, sourceInstance, storageSystem, - targetInstance, targetStorageSystem, - extraSpecs)) - self.provisionv3.failover_volume( - self.conn, repServiceInstanceName, - storageSynchronizationSv, - extraSpecs) - new_status = REPLICATION_FAILOVER - - else: - storageSynchronizationSv = ( - self.utils.find_rdf_storage_sync_sv_sv( - self.conn, targetInstance, targetStorageSystem, - sourceInstance, storageSystem, - extraSpecs, False)) - self.provisionv3.failback_volume( - self.conn, repServiceInstanceName, - storageSynchronizationSv, - extraSpecs) - new_status = REPLICATION_ENABLED - - # Transfer ownership to secondary_backend_id and - # update provider_location field - provider_location, replication_driver_data = ( - self.utils.failover_provider_location( - name, replication_keybindings)) - loc = six.text_type(provider_location) - rep_data = six.text_type(replication_driver_data) - - except Exception as ex: - LOG.error( - 'Failed to failover volume %(volume_id)s. ' - 'Error: %(error)s.', - {'volume_id': vol['id'], 'error': ex}) - new_status = FAILOVER_ERROR - - model_update = {'volume_id': vol['id'], - 'updates': - {'replication_status': new_status, - 'replication_driver_data': rep_data, - 'provider_location': loc}} - volume_update_list.append(model_update) - - for volume in volumes: - extraSpecs = self._initial_setup(volume) - if self.utils.is_replication_enabled(extraSpecs): - failover_volume(volume, self.failover) - else: - if self.failover: - # Since the array has been failed-over, - # volumes without replication should be in error. - volume_update_list.append({ - 'volume_id': volume['id'], - 'updates': {'status': 'error'}}) - else: - # This is a failback, so we will attempt - # to recover non-failed over volumes - recovery = self.recover_volumes_on_failback(volume) - volume_update_list.append(recovery) - - LOG.info("Failover host complete") - - return secondary_id, volume_update_list - - def recover_volumes_on_failback(self, volume): - """Recover volumes on failback. - - On failback, attempt to recover non RE(replication enabled) - volumes from primary array. - - :param volume: - :return: volume_update - """ - - # check if volume still exists on the primary - volume_update = {'volume_id': volume['id']} - volumeInstance = self._find_lun(volume) - if not volumeInstance: - volume_update['updates'] = {'status': 'error'} - else: - try: - maskingview = self._is_volume_in_masking_view(volumeInstance) - except Exception: - maskingview = None - LOG.debug("Unable to determine if volume is in masking view.") - if not maskingview: - volume_update['updates'] = {'status': 'available'} - else: - volume_update['updates'] = {'status': 'in-use'} - return volume_update - - def _is_volume_in_masking_view(self, volumeInstance): - """Helper function to check if a volume is in a masking view. - - :param volumeInstance: the volume instance - :return: maskingview - """ - maskingView = None - volumeInstanceName = volumeInstance.path - storageGroups = self.utils.get_storage_groups_from_volume( - self.conn, volumeInstanceName) - if storageGroups: - for storageGroup in storageGroups: - maskingView = self.utils.get_masking_view_from_storage_group( - self.conn, storageGroup) - if maskingView: - break - return maskingView - - def extend_volume_is_replicated(self, volume, volumeInstance, - volumeName, newSize, extraSpecs): - """Extend a replication-enabled volume. - - Cannot extend volumes in a synchronization pair. - Must first break the relationship, extend them - separately, then recreate the pair - :param volume: the volume objcet - :param volumeInstance: the volume instance - :param volumeName: the volume name - :param newSize: the new size the volume should be - :param extraSpecs: extra specifications - :return: rc, volumeDict - """ - if self.extendReplicatedVolume is True: - storageSystem = volumeInstance['SystemName'] - loc = volume['provider_location'] - rep_data = volume['replication_driver_data'] - try: - name = ast.literal_eval(loc) - replication_keybindings = ast.literal_eval(rep_data) - targetStorageSystem = replication_keybindings['SystemName'] - targetVolumeDict = {'classname': name['classname'], - 'keybindings': replication_keybindings} - targetVolumeInstance = self.utils.find_volume_instance( - self.conn, targetVolumeDict, volumeName) - repServiceInstanceName = self.utils.find_replication_service( - self.conn, targetStorageSystem) - storageSynchronizationSv = ( - self.utils.find_rdf_storage_sync_sv_sv( - self.conn, volumeInstance, storageSystem, - targetVolumeInstance, targetStorageSystem, - extraSpecs)) - - # volume must be removed from replication (storage) group - # before the replication relationship can be ended (cannot - # have a mix of replicated and non-replicated volumes as - # the SRDF groups become unmanageable). - controllerConfigService = ( - self.utils.find_controller_configuration_service( - self.conn, storageSystem)) - self.masking.remove_and_reset_members( - self.conn, controllerConfigService, volumeInstance, - volumeName, extraSpecs, None, False) - - # repeat on target side - targetControllerConfigService = ( - self.utils.find_controller_configuration_service( - self.conn, targetStorageSystem)) - repExtraSpecs = self._get_replication_extraSpecs( - extraSpecs, self.rep_config) - self.masking.remove_and_reset_members( - self.conn, targetControllerConfigService, - targetVolumeInstance, volumeName, repExtraSpecs, - None, False) - - LOG.info("Breaking replication relationship...") - self.break_rdf_relationship( - self.conn, repServiceInstanceName, - storageSynchronizationSv, extraSpecs) - - # extend the source volume - - LOG.info("Extending source volume...") - rc, volumeDict = self._extend_v3_volume( - volumeInstance, volumeName, newSize, extraSpecs) - - # extend the target volume - LOG.info("Extending target volume...") - self._extend_v3_volume(targetVolumeInstance, volumeName, - newSize, repExtraSpecs) - - # re-create replication relationship - LOG.info("Recreating replication relationship...") - self.setup_volume_replication( - self.conn, volume, volumeDict, - extraSpecs, targetVolumeInstance) - - except Exception as e: - exception_message = (_("Error extending volume. " - "Error received was %(e)s") % - {'e': e}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - return rc, volumeDict - - else: - exceptionMessage = (_( - "Extending a replicated volume is not " - "permitted on this backend. Please contact " - "your administrator.")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - def create_remote_replica(self, conn, repServiceInstanceName, - rdfGroupInstance, sourceVolume, sourceInstance, - targetInstance, extraSpecs, rep_config): - """Create a replication relationship with a target volume. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: the replication service - :param rdfGroupInstance: the SRDF group instance - :param sourceVolume: the source volume object - :param sourceInstance: the source volume instance - :param targetInstance: the target volume instance - :param extraSpecs: extra specifications - :param rep_config: the replication configuration - :return: rc, rdfDict - the target volume dictionary - """ - # remove source and target instances from their default storage groups - volumeName = sourceVolume['name'] - storageSystemName = sourceInstance['SystemName'] - controllerConfigService = ( - self.utils.find_controller_configuration_service( - conn, storageSystemName)) - repExtraSpecs = self._get_replication_extraSpecs(extraSpecs, - rep_config) - try: - self.masking.remove_and_reset_members( - conn, controllerConfigService, sourceInstance, - volumeName, extraSpecs, connector=None, reset=False) - - targetStorageSystemName = targetInstance['SystemName'] - targetControllerConfigService = ( - self.utils.find_controller_configuration_service( - conn, targetStorageSystemName)) - self.masking.remove_and_reset_members( - conn, targetControllerConfigService, targetInstance, - volumeName, repExtraSpecs, connector=None, reset=False) - - # establish replication relationship - rc, rdfDict = self._create_remote_replica( - conn, repServiceInstanceName, rdfGroupInstance, volumeName, - sourceInstance, targetInstance, extraSpecs) - - # add source and target instances to their replication groups - LOG.debug("Adding sourceInstance to default replication group.") - self.add_volume_to_replication_group(conn, controllerConfigService, - sourceInstance, volumeName, - extraSpecs) - LOG.debug("Adding targetInstance to default replication group.") - self.add_volume_to_replication_group( - conn, targetControllerConfigService, targetInstance, - volumeName, repExtraSpecs) - - except Exception as e: - LOG.warning( - "Remote replication failed. Cleaning up the target " - "volume and returning source volume to default storage " - "group. Volume name: %(cloneName)s ", - {'cloneName': volumeName}) - - self._cleanup_remote_target( - conn, repServiceInstanceName, sourceInstance, - targetInstance, extraSpecs, repExtraSpecs) - # Re-throw the exception. - exception_message = (_("Remote replication failed with exception:" - " %(e)s") - % {'e': six.text_type(e)}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) - - return rc, rdfDict - - def add_volume_to_replication_group(self, conn, controllerConfigService, - volumeInstance, volumeName, - extraSpecs): - """Add a volume to the default replication group. - - SE_ReplicationGroups are actually VMAX storage groups under - the covers, so we can use our normal storage group operations. - :param conn: the connection to the ecom served - :param controllerConfigService: the controller config service - :param volumeInstance: the volume instance - :param volumeName: the name of the volume - :param extraSpecs: extra specifications - :return: storageGroupInstanceName - """ - storageGroupName = self.utils.get_v3_storage_group_name( - extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD], - False, True) - storageSystemName = volumeInstance['SystemName'] - doDisableCompression = self.utils.is_compression_disabled(extraSpecs) - try: - storageGroupInstanceName = self._get_or_create_storage_group_v3( - extraSpecs[POOL], extraSpecs[SLO], extraSpecs[WORKLOAD], - doDisableCompression, storageSystemName, extraSpecs, - is_re=True) - except Exception as e: - exception_message = (_("Failed to get or create replication" - "group. Exception received: %(e)s") - % {'e': six.text_type(e)}) - LOG.exception(exception_message) - raise exception.VolumeBackendAPIException( - data=exception_message) - - self.masking.add_volume_to_storage_group( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, storageGroupName, extraSpecs) - - return storageGroupInstanceName - - def _create_remote_replica( - self, conn, repServiceInstanceName, rdfGroupInstance, - volumeName, sourceInstance, targetInstance, extraSpecs): - """Helper function to establish a replication relationship. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: replication service instance - :param rdfGroupInstance: rdf group instance - :param volumeName: volume name - :param sourceInstance: the source volume instance - :param targetInstance: the target volume instance - :param extraSpecs: extra specifications - :return: rc, rdfDict - the target volume dictionary - """ - syncType = MIRROR_SYNC_TYPE - rc, job = self.provisionv3.create_remote_element_replica( - conn, repServiceInstanceName, volumeName, syncType, - sourceInstance, targetInstance, rdfGroupInstance, extraSpecs) - rdfDict = self.provisionv3.get_volume_dict_from_job( - self.conn, job['Job']) - - return rc, rdfDict - - def get_target_instance(self, sourceVolume, rep_config, - rdf_vol_size, targetName, extraSpecs): - """Create a replication target for a given source volume. - - :param sourceVolume: the source volume - :param rep_config: the replication configuration - :param rdf_vol_size: the size of the volume - :param targetName: the Element Name for the new volume - :param extraSpecs: the extra specifications - :return: the target instance - """ - repExtraSpecs = self._get_replication_extraSpecs( - extraSpecs, rep_config) - volumeSize = int(self.utils.convert_gb_to_bits(rdf_vol_size)) - rc, volumeDict, storageSystemName = self._create_v3_volume( - sourceVolume, targetName, volumeSize, repExtraSpecs) - targetInstance = self.utils.find_volume_instance( - self.conn, volumeDict, targetName) - return targetInstance - - def _get_replication_extraSpecs(self, extraSpecs, rep_config): - """Get replication extra specifications. - - Called when target array operations are necessary - - on create, extend, etc and when volume is failed over. - :param extraSpecs: the extra specifications - :param rep_config: the replication configuration - :return: repExtraSpecs - dict - """ - repExtraSpecs = extraSpecs.copy() - repExtraSpecs[ARRAY] = rep_config['array'] - repExtraSpecs[POOL] = rep_config['pool'] - repExtraSpecs[PORTGROUPNAME] = rep_config['portgroup'] - - # if disable compression is set, check if target array is all flash - doDisableCompression = self.utils.is_compression_disabled( - extraSpecs) - if doDisableCompression: - if not self.utils.is_all_flash(self.conn, repExtraSpecs[ARRAY]): - repExtraSpecs.pop(self.utils.DISABLECOMPRESSION, None) - - # Check to see if SLO and Workload are configured on the target array. - poolInstanceName, storageSystemName = ( - self._get_pool_and_storage_system(repExtraSpecs)) - storagePoolCapability = self.provisionv3.get_storage_pool_capability( - self.conn, poolInstanceName) - if extraSpecs[SLO]: - if storagePoolCapability: - try: - self.provisionv3.get_storage_pool_setting( - self.conn, storagePoolCapability, extraSpecs[SLO], - extraSpecs[WORKLOAD]) - except Exception: - LOG.warning( - "The target array does not support the storage " - "pool setting for SLO %(slo)s or workload " - "%(workload)s. Not assigning any SLO or " - "workload.", - {'slo': extraSpecs[SLO], - 'workload': extraSpecs[WORKLOAD]}) - repExtraSpecs[SLO] = None - if extraSpecs[WORKLOAD]: - repExtraSpecs[WORKLOAD] = None - - else: - LOG.warning("Cannot determine storage pool settings of " - "target array. Not assigning any SLO or " - "workload") - repExtraSpecs[SLO] = None - if extraSpecs[WORKLOAD]: - repExtraSpecs[WORKLOAD] = None - - return repExtraSpecs - - def get_secondary_stats_info(self, rep_config, arrayInfo): - """On failover, report on secondary array statistics. - - :param rep_config: the replication configuration - :param arrayInfo: the array info - :return: secondaryInfo - dict - """ - secondaryInfo = arrayInfo.copy() - secondaryInfo['SerialNumber'] = six.text_type(rep_config['array']) - secondaryInfo['PoolName'] = rep_config['pool'] - pool_info_specs = {ARRAY: secondaryInfo['SerialNumber'], - POOL: rep_config['pool'], - ISV3: True} - # Check to see if SLO and Workload are configured on the target array. - poolInstanceName, storageSystemName = ( - self._get_pool_and_storage_system(pool_info_specs)) - storagePoolCapability = self.provisionv3.get_storage_pool_capability( - self.conn, poolInstanceName) - if arrayInfo['SLO']: - if storagePoolCapability: - try: - self.provisionv3.get_storage_pool_setting( - self.conn, storagePoolCapability, arrayInfo['SLO'], - arrayInfo['Workload']) - except Exception: - LOG.info( - "The target array does not support the storage " - "pool setting for SLO %(slo)s or workload " - "%(workload)s. SLO stats will not be reported.", - {'slo': arrayInfo['SLO'], - 'workload': arrayInfo['Workload']}) - secondaryInfo['SLO'] = None - if arrayInfo['Workload']: - secondaryInfo['Workload'] = None - if self.multiPoolSupportEnabled: - self.multiPoolSupportEnabled = False - - else: - LOG.info("Cannot determine storage pool settings of " - "target array. SLO stats will not be reported.") - secondaryInfo['SLO'] = None - if arrayInfo['Workload']: - secondaryInfo['Workload'] = None - if self.multiPoolSupportEnabled: - self.multiPoolSupportEnabled = False - return secondaryInfo + # Rename the volume to volumeId, thus remove the 'OS-' prefix. + self.rest.rename_volume( + extra_specs[utils.ARRAY], device_id, volume_id) diff --git a/cinder/volume/drivers/dell_emc/vmax/fast.py b/cinder/volume/drivers/dell_emc/vmax/fast.py deleted file mode 100644 index 51c8a22640b..00000000000 --- a/cinder/volume/drivers/dell_emc/vmax/fast.py +++ /dev/null @@ -1,828 +0,0 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.dell_emc.vmax import provision -from cinder.volume.drivers.dell_emc.vmax import utils - -LOG = logging.getLogger(__name__) - -DEFAULT_SG_PREFIX = 'OS_default_' -DEFAULT_SG_POSTFIX = '_SG' - - -class VMAXFast(object): - """FAST Class for SMI-S based EMC volume drivers. - - This FAST class is for EMC volume drivers based on SMI-S. - It supports VMAX arrays. - """ - def __init__(self, prtcl): - self.protocol = prtcl - self.utils = utils.VMAXUtils(prtcl) - self.provision = provision.VMAXProvision(prtcl) - - def _check_if_fast_supported(self, conn, storageSystemInstanceName): - """Check to see if fast is supported on the array. - - :param conn: the ecom connection - :param storageSystemInstanceName: the storage system Instance name - :returns: boolean -- isTieringPolicySupported - """ - - tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( - conn, storageSystemInstanceName) - isTieringPolicySupported = self.is_tiering_policy_enabled( - conn, tierPolicyServiceInstanceName) - if isTieringPolicySupported is None: - LOG.error("Cannot determine whether " - "Tiering Policy is supported on this array.") - - if isTieringPolicySupported is False: - LOG.error("Tiering Policy is not supported on this array.") - return isTieringPolicySupported - - def is_tiering_policy_enabled(self, conn, tierPolicyServiceInstanceName): - """Checks to see if tiering policy is supported. - - We will only check if there is a fast policy specified in - the config file. - - :param conn: the connection information to the ecom server - :param tierPolicyServiceInstanceName: the tier policy service - instance name - :returns: boolean -- foundIsSupportsTieringPolicies - """ - foundIsSupportsTieringPolicies = None - tierPolicyCapabilityInstanceNames = conn.AssociatorNames( - tierPolicyServiceInstanceName, - ResultClass='CIM_TierPolicyServiceCapabilities', - AssocClass='CIM_ElementCapabilities') - - tierPolicyCapabilityInstanceName = tierPolicyCapabilityInstanceNames[0] - tierPolicyCapabilityInstance = conn.GetInstance( - tierPolicyCapabilityInstanceName, LocalOnly=False) - propertiesList = (tierPolicyCapabilityInstance - .properties.items()) - for properties in propertiesList: - if properties[0] == 'SupportsTieringPolicies': - cimProperties = properties[1] - foundIsSupportsTieringPolicies = cimProperties.value - break - - if foundIsSupportsTieringPolicies is None: - LOG.error("Cannot determine if Tiering Policies " - "are supported.") - - return foundIsSupportsTieringPolicies - - def get_and_verify_default_storage_group( - self, conn, controllerConfigService, volumeInstanceName, - volumeName, fastPolicyName): - """Retrieves and verifies the default storage group for a volume. - - Given the volumeInstanceName get any associated storage group and - check that it is the default storage group. The default storage group - should have been already created. If not found error is logged. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller config service - :param volumeInstanceName: the volume instance name - :param volumeName: the volume name (String) - :param fastPolicyName: the fast policy name (String) - :returns: foundDefaultStorageGroupInstanceName, defaultSgName - """ - foundDefaultStorageGroupInstanceName = None - storageSystemInstanceName = self.utils.find_storage_system( - conn, controllerConfigService) - - if not self._check_if_fast_supported(conn, storageSystemInstanceName): - LOG.error("FAST is not supported on this array.") - raise - - defaultSgName = self.format_default_sg_string(fastPolicyName) - assocStorageGroupInstanceName = ( - self.utils.get_storage_group_from_volume(conn, volumeInstanceName, - defaultSgName)) - - defaultStorageGroupInstanceName = ( - self.utils.find_storage_masking_group(conn, - controllerConfigService, - defaultSgName)) - if defaultStorageGroupInstanceName is None: - LOG.error( - "Unable to find default storage group " - "for FAST policy : %(fastPolicyName)s.", - {'fastPolicyName': fastPolicyName}) - raise - - if assocStorageGroupInstanceName == defaultStorageGroupInstanceName: - foundDefaultStorageGroupInstanceName = ( - assocStorageGroupInstanceName) - else: - LOG.warning( - "Volume: %(volumeName)s Does not belong " - "to storage group %(defaultSgName)s.", - {'volumeName': volumeName, - 'defaultSgName': defaultSgName}) - return foundDefaultStorageGroupInstanceName, defaultSgName - - def format_default_sg_string(self, fastPolicyName): - """Format the default storage group name - - :param fastPolicyName: the fast policy name - :returns: defaultSgName - """ - return ("%(prefix)s%(fastPolicyName)s%(postfix)s" - % {'prefix': DEFAULT_SG_PREFIX, - 'fastPolicyName': fastPolicyName, - 'postfix': DEFAULT_SG_POSTFIX}) - - def add_volume_to_default_storage_group_for_fast_policy( - self, conn, controllerConfigService, volumeInstance, - volumeName, fastPolicyName, extraSpecs): - """Add a volume to the default storage group for FAST policy. - - The storage group must pre-exist. Once added to the storage group, - check the association to make sure it has been successfully added. - - :param conn: the ecom connection - :param controllerConfigService: the controller configuration service - :param volumeInstance: the volume instance - :param volumeName: the volume name (String) - :param fastPolicyName: the fast policy name (String) - :param extraSpecs: additional info - :returns: assocStorageGroupInstanceName - the storage group - associated with the volume - """ - failedRet = None - defaultSgName = self.format_default_sg_string(fastPolicyName) - storageGroupInstanceName = self.utils.find_storage_masking_group( - conn, controllerConfigService, defaultSgName) - if storageGroupInstanceName is None: - LOG.error( - "Unable to get default storage group %(defaultSgName)s.", - {'defaultSgName': defaultSgName}) - return failedRet - - self.provision.add_members_to_masking_group( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance.path, volumeName, extraSpecs) - # Check to see if the volume is in the storage group. - assocStorageGroupInstanceName = ( - self.utils.get_storage_group_from_volume(conn, - volumeInstance.path, - defaultSgName)) - return assocStorageGroupInstanceName - - def _create_default_storage_group(self, conn, controllerConfigService, - fastPolicyName, storageGroupName, - volumeInstance, extraSpecs): - """Create a first volume for the storage group. - - This is necessary because you cannot remove a volume if it is the - last in the group. Create the default storage group for the FAST policy - Associate the storage group with the tier policy rule. - - :param conn: the connection information to the ecom server - :param controllerConfigService: the controller configuration service - :param fastPolicyName: the fast policy name (String) - :param storageGroupName: the storage group name (String) - :param volumeInstance: the volume instance - :param extraSpecs: additional info - :returns: defaultstorageGroupInstanceName - instance name of the - default storage group - """ - failedRet = None - firstVolumeInstance = self._create_volume_for_default_volume_group( - conn, controllerConfigService, volumeInstance.path, extraSpecs) - if firstVolumeInstance is None: - LOG.error( - "Failed to create a first volume for storage " - "group : %(storageGroupName)s.", - {'storageGroupName': storageGroupName}) - return failedRet - - defaultStorageGroupInstanceName = ( - self.provision.create_and_get_storage_group( - conn, controllerConfigService, storageGroupName, - firstVolumeInstance.path, extraSpecs)) - if defaultStorageGroupInstanceName is None: - LOG.error( - "Failed to create default storage group for " - "FAST policy : %(fastPolicyName)s.", - {'fastPolicyName': fastPolicyName}) - return failedRet - - storageSystemInstanceName = ( - self.utils.find_storage_system(conn, controllerConfigService)) - tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( - conn, storageSystemInstanceName) - - # Get the fast policy instance name. - tierPolicyRuleInstanceName = self._get_service_level_tier_policy( - conn, tierPolicyServiceInstanceName, fastPolicyName) - if tierPolicyRuleInstanceName is None: - LOG.error( - "Unable to get policy rule for fast policy: " - "%(fastPolicyName)s.", - {'fastPolicyName': fastPolicyName}) - return failedRet - - # Now associate it with a FAST policy. - self.add_storage_group_to_tier_policy_rule( - conn, tierPolicyServiceInstanceName, - defaultStorageGroupInstanceName, tierPolicyRuleInstanceName, - storageGroupName, fastPolicyName, extraSpecs) - - return defaultStorageGroupInstanceName - - def _create_volume_for_default_volume_group( - self, conn, controllerConfigService, volumeInstanceName, - extraSpecs): - """Creates a volume for the default storage group for a fast policy. - - Creates a small first volume for the default storage group for a - fast policy. This is necessary because you cannot remove - the last volume from a storage group and this scenario is likely. - - :param conn: the connection information to the ecom server - :param controllerConfigService: the controller configuration service - :param volumeInstanceName: the volume instance name - :param extraSpecs: additional info - :returns: firstVolumeInstanceName - instance name of the first volume - in the storage group - """ - failedRet = None - storageSystemName = self.utils.find_storage_system_name_from_service( - controllerConfigService) - storageConfigurationInstanceName = ( - self.utils.find_storage_configuration_service( - conn, storageSystemName)) - - poolInstanceName = self.utils.get_assoc_pool_from_volume( - conn, volumeInstanceName) - if poolInstanceName is None: - LOG.error("Unable to get associated pool of volume.") - return failedRet - - volumeName = 'vol1' - volumeSize = '1' - volumeDict, _rc = ( - self.provision.create_volume_from_pool( - conn, storageConfigurationInstanceName, volumeName, - poolInstanceName, volumeSize, extraSpecs)) - firstVolumeInstanceName = self.utils.find_volume_instance( - conn, volumeDict, volumeName) - return firstVolumeInstanceName - - def add_storage_group_to_tier_policy_rule( - self, conn, tierPolicyServiceInstanceName, - storageGroupInstanceName, tierPolicyRuleInstanceName, - storageGroupName, fastPolicyName, extraSpecs): - """Add the storage group to the tier policy rule. - - :param conn: the connection information to the ecom server - :param tierPolicyServiceInstanceName: tier policy service - :param storageGroupInstanceName: storage group instance name - :param tierPolicyRuleInstanceName: tier policy instance name - :param storageGroupName: the storage group name (String) - :param fastPolicyName: the fast policy name (String) - :param extraSpecs: additional info - :returns: int -- return code - :raises VolumeBackendAPIException: - """ - # 5 is ("Add InElements to Policy"). - modificationType = '5' - - rc, job = conn.InvokeMethod( - 'ModifyStorageTierPolicyRule', tierPolicyServiceInstanceName, - PolicyRule=tierPolicyRuleInstanceName, - Operation=self.utils.get_num(modificationType, '16'), - InElements=[storageGroupInstanceName]) - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error associating storage group : %(storageGroupName)s. " - "To fast Policy: %(fastPolicyName)s with error " - "description: %(errordesc)s.") - % {'storageGroupName': storageGroupName, - 'fastPolicyName': fastPolicyName, - 'errordesc': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - return rc - - def _get_service_level_tier_policy( - self, conn, tierPolicyServiceInstanceName, fastPolicyName): - """Returns the existing tier policies for a storage system instance. - - Given the storage system instance name, get the existing tier - policies on that array. - - :param conn: the connection information to the ecom server - :param tierPolicyServiceInstanceName: the policy service - :param fastPolicyName: the fast policy name e.g BRONZE1 - :returns: foundTierPolicyRuleInstanceName - the short name, - everything after the : - """ - foundTierPolicyRuleInstanceName = None - - tierPolicyRuleInstanceNames = self._get_existing_tier_policies( - conn, tierPolicyServiceInstanceName) - - for tierPolicyRuleInstanceName in tierPolicyRuleInstanceNames: - policyRuleName = tierPolicyRuleInstanceName['PolicyRuleName'] - if fastPolicyName == policyRuleName: - foundTierPolicyRuleInstanceName = tierPolicyRuleInstanceName - break - - return foundTierPolicyRuleInstanceName - - def _get_existing_tier_policies(self, conn, tierPolicyServiceInstanceName): - """Given the tier policy service, get the existing tier policies. - - :param conn: the connection information to the ecom server - :param tierPolicyServiceInstanceName: the tier policy service - instance Name - :returns: list -- the tier policy rule instance names - """ - tierPolicyRuleInstanceNames = conn.AssociatorNames( - tierPolicyServiceInstanceName, ResultClass='Symm_TierPolicyRule') - - return tierPolicyRuleInstanceNames - - def get_associated_tier_policy_from_storage_group( - self, conn, storageGroupInstanceName): - """Given the tier policy instance name get the storage groups. - - :param conn: the connection information to the ecom server - :param storageGroupInstanceName: the storage group instance name - :returns: list -- the list of tier policy instance names - """ - tierPolicyInstanceName = None - - tierPolicyInstanceNames = conn.AssociatorNames( - storageGroupInstanceName, - AssocClass='CIM_TierPolicySetAppliesToElement', - ResultClass='CIM_TierPolicyRule') - - if (len(tierPolicyInstanceNames) > 0 and - len(tierPolicyInstanceNames) < 2): - tierPolicyInstanceName = tierPolicyInstanceNames[0] - - return tierPolicyInstanceName - - def get_associated_tier_from_tier_policy( - self, conn, tierPolicyRuleInstanceName): - """Given the tierPolicyInstanceName get the associated tiers. - - :param conn: the connection information to the ecom server - :param tierPolicyRuleInstanceName: the tier policy rule instance name - :returns: list -- a list of storage tier instance names - """ - storageTierInstanceNames = conn.AssociatorNames( - tierPolicyRuleInstanceName, - AssocClass='CIM_AssociatedTierPolicy') - - if len(storageTierInstanceNames) == 0: - storageTierInstanceNames = None - LOG.warning( - "Unable to get storage tiers from tier policy rule.") - - return storageTierInstanceNames - - def get_policy_default_storage_group( - self, conn, controllerConfigService, policyName): - """Returns the default storage group for a tier policy. - - Given the tier policy instance name get the associated default - storage group. - - :param conn: the connection information to the ecom server - :param controllerConfigService: ControllerConfigurationService - instance name - :param policyName: string value - :returns: storageGroupInstanceName - instance name of the default - storage group - """ - foundStorageMaskingGroupInstanceName = None - storageMaskingGroupInstances = conn.Associators( - controllerConfigService, ResultClass='CIM_DeviceMaskingGroup') - - for storageMaskingGroupInstance in storageMaskingGroupInstances: - - if ('_default_' in storageMaskingGroupInstance['ElementName'] and - policyName in storageMaskingGroupInstance['ElementName']): - # Check that it has not been recently deleted. - instance = self.utils.get_existing_instance( - conn, storageMaskingGroupInstance.path) - if instance is None: - # Storage Group doesn't exist any more. - foundStorageMaskingGroupInstanceName = None - else: - foundStorageMaskingGroupInstanceName = ( - storageMaskingGroupInstance.path) - - return foundStorageMaskingGroupInstanceName - - def _get_associated_storage_groups_from_tier_policy( - self, conn, tierPolicyInstanceName): - """Given the tier policy instance name get the storage groups. - - :param conn: the connection information to the ecom server - :param tierPolicyInstanceName: tier policy instance name - :returns: list -- the list of storage instance names - """ - managedElementInstanceNames = conn.AssociatorNames( - tierPolicyInstanceName, - AssocClass='CIM_TierPolicySetAppliesToElement', - ResultClass='CIM_DeviceMaskingGroup') - - return managedElementInstanceNames - - def get_associated_pools_from_tier( - self, conn, storageTierInstanceName): - """Given the storage tier instance name get the storage pools. - - :param conn: the connection information to the ecom server - :param storageTierInstanceName: the storage tier instance name - :returns: list -- a list of storage tier instance names - """ - storagePoolInstanceNames = conn.AssociatorNames( - storageTierInstanceName, - AssocClass='CIM_MemberOfCollection', - ResultClass='CIM_StoragePool') - - return storagePoolInstanceNames - - def add_storage_group_and_verify_tier_policy_assoc( - self, conn, controllerConfigService, storageGroupInstanceName, - storageGroupName, fastPolicyName, extraSpecs): - """Adds a storage group to a tier policy and verifies success. - - Add a storage group to a tier policy rule and verify that it was - successful by getting the association. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller config service - :param storageGroupInstanceName: the storage group instance name - :param storageGroupName: the storage group name (String) - :param fastPolicyName: the fast policy name (String) - :param extraSpecs: additional info - :returns: assocTierPolicyInstanceName - """ - failedRet = None - assocTierPolicyInstanceName = None - storageSystemInstanceName = self.utils.find_storage_system( - conn, controllerConfigService) - tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( - conn, storageSystemInstanceName) - # Get the fast policy instance name. - tierPolicyRuleInstanceName = self._get_service_level_tier_policy( - conn, tierPolicyServiceInstanceName, fastPolicyName) - if tierPolicyRuleInstanceName is None: - LOG.error( - "Cannot find the fast policy %(fastPolicyName)s.", - {'fastPolicyName': fastPolicyName}) - return failedRet - else: - LOG.debug( - "Adding storage group %(storageGroupInstanceName)s to " - "tier policy rule %(tierPolicyRuleInstanceName)s.", - {'storageGroupInstanceName': storageGroupInstanceName, - 'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName}) - - # Associate the new storage group with the existing fast policy. - try: - self.add_storage_group_to_tier_policy_rule( - conn, tierPolicyServiceInstanceName, - storageGroupInstanceName, tierPolicyRuleInstanceName, - storageGroupName, fastPolicyName, extraSpecs) - except Exception: - LOG.exception( - "Failed to add storage group %(storageGroupInstanceName)s " - "to tier policy rule %(tierPolicyRuleInstanceName)s.", - {'storageGroupInstanceName': storageGroupInstanceName, - 'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName}) - return failedRet - - # Check that the storage group has been associated with with the - # tier policy rule. - assocTierPolicyInstanceName = ( - self.get_associated_tier_policy_from_storage_group( - conn, storageGroupInstanceName)) - - LOG.debug( - "AssocTierPolicyInstanceName is " - "%(assocTierPolicyInstanceName)s.", - {'assocTierPolicyInstanceName': assocTierPolicyInstanceName}) - return assocTierPolicyInstanceName - - def get_associated_policy_from_storage_group( - self, conn, storageGroupInstanceName): - """Get the tier policy instance name for a storage group instance name. - - :param conn: the connection information to the ecom server - :param storageGroupInstanceName: storage group instance name - :returns: foundTierPolicyInstanceName - instance name of the - tier policy object - """ - foundTierPolicyInstanceName = None - - tierPolicyInstanceNames = conn.AssociatorNames( - storageGroupInstanceName, - ResultClass='Symm_TierPolicyRule', - AssocClass='Symm_TierPolicySetAppliesToElement') - - if len(tierPolicyInstanceNames) > 0: - foundTierPolicyInstanceName = tierPolicyInstanceNames[0] - - return foundTierPolicyInstanceName - - def delete_storage_group_from_tier_policy_rule( - self, conn, tierPolicyServiceInstanceName, - storageGroupInstanceName, tierPolicyRuleInstanceName, - extraSpecs): - """Disassociate the storage group from its tier policy rule. - - :param conn: connection the ecom server - :param tierPolicyServiceInstanceName: instance name of the tier policy - service - :param storageGroupInstanceName: instance name of the storage group - :param tierPolicyRuleInstanceName: instance name of the tier policy - associated with the storage group - :param extraSpecs: additional information - """ - modificationType = '6' - LOG.debug("Invoking ModifyStorageTierPolicyRule %s.", - tierPolicyRuleInstanceName) - try: - rc, job = conn.InvokeMethod( - 'ModifyStorageTierPolicyRule', tierPolicyServiceInstanceName, - PolicyRule=tierPolicyRuleInstanceName, - Operation=self.utils.get_num(modificationType, '16'), - InElements=[storageGroupInstanceName]) - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - LOG.error("Error disassociating storage group from " - "policy: %s.", errordesc) - else: - LOG.debug("Disassociated storage group from policy.") - else: - LOG.debug("ModifyStorageTierPolicyRule completed.") - except Exception as e: - LOG.info("Storage group not associated with the " - "policy. Exception is %s.", e) - - def get_pool_associated_to_policy( - self, conn, fastPolicyName, arraySN, - storageConfigService, poolInstanceName): - """Given a FAST policy check that the pool is linked to the policy. - - If it's associated return the pool instance, if not return None. - First check if FAST is enabled on the array. - - :param conn: the ecom connection - :param fastPolicyName: the fast policy name (String) - :param arraySN: the array serial number (String) - :param storageConfigService: the storage Config Service - :param poolInstanceName: the pool instance we want to check for - association with the fast storage tier - :returns: foundPoolInstanceName - """ - storageSystemInstanceName = self.utils.find_storage_system( - conn, storageConfigService) - - if not self._check_if_fast_supported(conn, storageSystemInstanceName): - errorMessage = (_( - "FAST is not supported on this array.")) - LOG.error(errorMessage) - exception.VolumeBackendAPIException(data=errorMessage) - - tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( - conn, storageSystemInstanceName) - - tierPolicyRuleInstanceName = self._get_service_level_tier_policy( - conn, tierPolicyServiceInstanceName, fastPolicyName) - # Get the associated storage tiers from the tier policy rule. - storageTierInstanceNames = self.get_associated_tier_from_tier_policy( - conn, tierPolicyRuleInstanceName) - - # For each gold storage tier get the associated pools. - foundPoolInstanceName = None - for storageTierInstanceName in storageTierInstanceNames: - assocStoragePoolInstanceNames = ( - self.get_associated_pools_from_tier(conn, - storageTierInstanceName)) - for assocStoragePoolInstanceName in assocStoragePoolInstanceNames: - if poolInstanceName == assocStoragePoolInstanceName: - foundPoolInstanceName = poolInstanceName - break - if foundPoolInstanceName is not None: - break - - return foundPoolInstanceName - - def is_tiering_policy_enabled_on_storage_system( - self, conn, storageSystemInstanceName): - """Checks if tiering policy in enabled on a storage system. - - True if FAST policy enabled on the given storage system; - False otherwise. - - :param conn: the ecom connection - :param storageSystemInstanceName: a storage system instance name - :returns: boolean -- isTieringPolicySupported - """ - try: - tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( - conn, storageSystemInstanceName) - isTieringPolicySupported = self.is_tiering_policy_enabled( - conn, tierPolicyServiceInstanceName) - except Exception as e: - LOG.error("Exception: %s.", e) - return False - - return isTieringPolicySupported - - def get_tier_policy_by_name( - self, conn, arrayName, policyName): - """Given the name of the policy, get the TierPolicyRule instance name. - - :param conn: the ecom connection - :param arrayName: the array - :param policyName: string -- the name of policy rule - :returns: tier policy instance name. None if not found - """ - tierPolicyInstanceNames = conn.EnumerateInstanceNames( - 'Symm_TierPolicyRule') - for policy in tierPolicyInstanceNames: - if (policyName == policy['PolicyRuleName'] and - arrayName in policy['SystemName']): - return policy - return None - - def get_capacities_associated_to_policy(self, conn, arrayName, policyName): - """Gets the total and un-used capacities for all pools in a policy. - - Given the name of the policy, get the total capacity and un-used - capacity in GB of all the storage pools associated with the policy. - - :param conn: the ecom connection - :param arrayName: the array - :param policyName: the name of policy rule, a string value - :returns: int -- total capacity in GB of all pools associated with - the policy - :returns: int -- real physical capacity in GB of all pools - available to be used - :returns: int -- (Provisioned capacity-EMCSubscribedCapacity) in GB - is the capacity that has been provisioned - :returns: int -- the maximum oversubscription ration - """ - policyInstanceName = self.get_tier_policy_by_name( - conn, arrayName, policyName) - - total_capacity_gb = 0 - provisioned_capacity_gb = 0 - free_capacity_gb = 0 - array_max_over_subscription = None - - tierInstanceNames = self.get_associated_tier_from_tier_policy( - conn, policyInstanceName) - for tierInstanceName in tierInstanceNames: - # Check that tier hasn't suddenly been deleted. - instance = self.utils.get_existing_instance(conn, tierInstanceName) - if instance is None: - # Tier doesn't exist any more. - break - - poolInstanceNames = self.get_associated_pools_from_tier( - conn, tierInstanceName) - for poolInstanceName in poolInstanceNames: - # Check that pool hasn't suddenly been deleted. - storagePoolInstance = self.utils.get_existing_instance( - conn, poolInstanceName) - if storagePoolInstance is None: - # Pool doesn't exist any more. - break - total_capacity_gb += self.utils.convert_bits_to_gbs( - storagePoolInstance['TotalManagedSpace']) - provisioned_capacity_gb += self.utils.convert_bits_to_gbs( - storagePoolInstance['EMCSubscribedCapacity']) - free_capacity_gb += self.utils.convert_bits_to_gbs( - storagePoolInstance['RemainingManagedSpace']) - try: - array_max_over_subscription = ( - self.utils.get_ratio_from_max_sub_per( - storagePoolInstance['EMCMaxSubscriptionPercent'])) - except KeyError: - array_max_over_subscription = 65534 - LOG.debug( - "PolicyName:%(policyName)s, pool: %(poolInstanceName)s, " - "provisioned_capacity_gb = %(provisioned_capacity_gb)lu.", - {'policyName': policyName, - 'poolInstanceName': poolInstanceName, - 'provisioned_capacity_gb': provisioned_capacity_gb}) - - return (total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb, array_max_over_subscription) - - def get_or_create_default_storage_group( - self, conn, controllerConfigService, fastPolicyName, - volumeInstance, extraSpecs): - """Create or get a default storage group for FAST policy. - - :param conn: the ecom connection - :param controllerConfigService: the controller configuration service - :param fastPolicyName: the fast policy name (String) - :param volumeInstance: the volume instance - :param extraSpecs: additional info - :returns: defaultStorageGroupInstanceName - the default storage group - instance name - """ - defaultSgName = self.format_default_sg_string(fastPolicyName) - defaultStorageGroupInstanceName = ( - self.utils.find_storage_masking_group(conn, - controllerConfigService, - defaultSgName)) - if defaultStorageGroupInstanceName is None: - # Create it and associate it with the FAST policy in question. - defaultStorageGroupInstanceName = ( - self._create_default_storage_group(conn, - controllerConfigService, - fastPolicyName, - defaultSgName, - volumeInstance, - extraSpecs)) - - return defaultStorageGroupInstanceName - - def _get_associated_tier_policy_from_pool(self, conn, poolInstanceName): - """Given the pool instance name get the associated FAST tier policy. - - :param conn: the connection information to the ecom server - :param poolInstanceName: the pool instance name - :returns: the FAST Policy name (if it exists) - """ - fastPolicyName = None - - storageTierInstanceNames = conn.AssociatorNames( - poolInstanceName, - AssocClass='CIM_MemberOfCollection', - ResultClass='CIM_StorageTier') - - if len(storageTierInstanceNames) > 0: - tierPolicyInstanceNames = conn.AssociatorNames( - storageTierInstanceNames[0], - AssocClass='CIM_AssociatedTierPolicy') - - if len(tierPolicyInstanceNames) > 0: - tierPolicyInstanceName = tierPolicyInstanceNames[0] - fastPolicyName = tierPolicyInstanceName['PolicyRuleName'] - - return fastPolicyName - - def is_volume_in_default_SG(self, conn, volumeInstanceName): - """Check if the volume is already part of the default storage group. - - :param conn: the ecom connection - :param volumeInstanceName: the volume instance - :returns: boolean -- True if the volume is already in default - storage group. False otherwise - """ - sgInstanceNames = conn.AssociatorNames( - volumeInstanceName, - ResultClass='CIM_DeviceMaskingGroup') - if len(sgInstanceNames) == 0: - LOG.debug("volume %(vol)s is not in default sg.", - {'vol': volumeInstanceName}) - return False - else: - for sgInstance in sgInstanceNames: - if DEFAULT_SG_PREFIX in sgInstance['InstanceID']: - LOG.debug("volume %(vol)s already in default sg.", - {'vol': volumeInstanceName}) - return True - return False diff --git a/cinder/volume/drivers/dell_emc/vmax/fc.py b/cinder/volume/drivers/dell_emc/vmax/fc.py index 7800294a980..b9f48ce73af 100644 --- a/cinder/volume/drivers/dell_emc/vmax/fc.py +++ b/cinder/volume/drivers/dell_emc/vmax/fc.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. +# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,7 +16,6 @@ import ast from oslo_log import log as logging -import six from cinder import interface from cinder.volume import driver @@ -28,7 +27,7 @@ LOG = logging.getLogger(__name__) @interface.volumedriver class VMAXFCDriver(driver.FibreChannelDriver): - """EMC FC Drivers for VMAX using SMI-S. + """FC Drivers for VMAX using REST. Version history: @@ -76,9 +75,10 @@ class VMAXFCDriver(driver.FibreChannelDriver): - Support for compression on All Flash - Volume replication 2.1 (bp add-vmax-replication) - rename and restructure driver (bp vmax-rename-dell-emc) + 3.0.0 - REST based driver """ - VERSION = "2.5.0" + VERSION = "3.0.0" # ThirdPartySystems wiki CI_WIKI_NAME = "EMC_VMAX_CI" @@ -86,64 +86,98 @@ class VMAXFCDriver(driver.FibreChannelDriver): def __init__(self, *args, **kwargs): super(VMAXFCDriver, self).__init__(*args, **kwargs) - self.active_backend_id = kwargs.get('active_backend_id', None) self.common = common.VMAXCommon( 'FC', self.VERSION, - configuration=self.configuration, - active_backend_id=self.active_backend_id) + configuration=self.configuration) self.zonemanager_lookup_service = fczm_utils.create_lookup_service() def check_for_setup_error(self): pass def create_volume(self, volume): - """Creates a VMAX volume.""" + """Creates a VMAX volume. + + :param volume: the cinder volume object + :return: provider location dict + """ return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" + """Creates a volume from a snapshot. + + :param volume: the cinder volume object + :param snapshot: the cinder snapshot object + :return: provider location dict + """ return self.common.create_volume_from_snapshot( volume, snapshot) def create_cloned_volume(self, volume, src_vref): - """Creates a cloned volume.""" + """Creates a cloned volume. + + :param volume: the cinder volume object + :param src_vref: the source volume reference + :return: provider location dict + """ return self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): - """Deletes an VMAX volume.""" + """Deletes a VMAX volume. + + :param volume: the cinder volume object + """ self.common.delete_volume(volume) def create_snapshot(self, snapshot): - """Creates a snapshot.""" - src_volume = snapshot['volume'] - volpath = self.common.create_snapshot(snapshot, src_volume) + """Creates a snapshot. - model_update = {} - snapshot['provider_location'] = six.text_type(volpath) - model_update['provider_location'] = snapshot['provider_location'] - return model_update + :param snapshot: the cinder snapshot object + :return: provider location dict + """ + src_volume = snapshot.volume + return self.common.create_snapshot(snapshot, src_volume) def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - src_volume = snapshot['volume'] + """Deletes a snapshot. + :param snapshot: the cinder snapshot object + """ + src_volume = snapshot.volume self.common.delete_snapshot(snapshot, src_volume) def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" + """Driver entry point to get the export info for an existing volume. + + :param context: the context + :param volume: the cinder volume object + """ pass def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" + """Driver entry point to get the export info for a new volume. + + :param context: the context + :param volume: the cinder volume object + :param connector: the connector object + """ pass def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" + """Driver entry point to remove an export for a volume. + + :param context: the context + :param volume: the cinder volume object + """ pass - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" + @staticmethod + def check_for_export(context, volume_id): + """Make sure volume is exported. + + :param context: the context + :param volume_id: the volume id + """ pass @fczm_utils.add_fc_zone @@ -176,6 +210,9 @@ class VMAXFCDriver(driver.FibreChannelDriver): 'target_wwn': ['1234567890123', '0987654321321'], } } + :param volume: the cinder volume object + :param connector: the connector object + :return: dict -- the target_wwns and initiator_target_map """ device_info = self.common.initialize_connection( volume, connector) @@ -186,16 +223,14 @@ class VMAXFCDriver(driver.FibreChannelDriver): Add relevant data to data dict, target_lun, target_wwn and initiator_target_map. - :param device_info: device_info :param volume: the volume object :param connector: the connector object :returns: dict -- the target_wwns and initiator_target_map """ device_number = device_info['hostlunid'] - storage_system = device_info['storagesystem'] target_wwns, init_targ_map = self._build_initiator_target_map( - storage_system, volume, connector) + volume, connector) data = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': device_number, @@ -222,10 +257,8 @@ class VMAXFCDriver(driver.FibreChannelDriver): :returns: dict -- the target_wwns and initiator_target_map if the zone is to be removed, otherwise empty """ - data = {'driver_volume_type': 'fibre_channel', - 'data': {}} - zoning_mappings = ( - self._get_zoning_mappings(volume, connector)) + data = {'driver_volume_type': 'fibre_channel', 'data': {}} + zoning_mappings = self._get_zoning_mappings(volume, connector) if zoning_mappings: self.common.terminate_connection(volume, connector) @@ -243,37 +276,41 @@ class VMAXFCDriver(driver.FibreChannelDriver): zoning_mappings = {'port_group': None, 'initiator_group': None, 'target_wwns': None, - 'init_targ_map': None} - loc = volume['provider_location'] + 'init_targ_map': None, + 'array': None} + loc = volume.provider_location name = ast.literal_eval(loc) - storage_system = name['keybindings']['SystemName'] + host = connector['host'] + array = name['array'] + device_id = name['device_id'] LOG.debug("Start FC detach process for volume: %(volume)s.", - {'volume': volume['name']}) + {'volume': volume.name}) - mvInstanceName = self.common.get_masking_view_by_volume( - volume, connector) - if mvInstanceName: - portGroupInstanceName = ( + masking_views = self.common.get_masking_views_from_volume( + array, device_id, host) + if masking_views: + portgroup = ( self.common.get_port_group_from_masking_view( - mvInstanceName)) - initiatorGroupInstanceName = ( + array, masking_views[0])) + initiator_group = ( self.common.get_initiator_group_from_masking_view( - mvInstanceName)) + array, masking_views[0])) LOG.debug("Found port group: %(portGroup)s " "in masking view %(maskingView)s.", - {'portGroup': portGroupInstanceName, - 'maskingView': mvInstanceName}) + {'portGroup': portgroup, + 'maskingView': masking_views[0]}) # Map must be populated before the terminate_connection target_wwns, init_targ_map = self._build_initiator_target_map( - storage_system, volume, connector) - zoning_mappings = {'port_group': portGroupInstanceName, - 'initiator_group': initiatorGroupInstanceName, + volume, connector) + zoning_mappings = {'port_group': portgroup, + 'initiator_group': initiator_group, 'target_wwns': target_wwns, - 'init_targ_map': init_targ_map} + 'init_targ_map': init_targ_map, + 'array': array} else: LOG.warning("Volume %(volume)s is not in any masking view.", - {'volume': volume['name']}) + {'volume': volume.name}) return zoning_mappings def _cleanup_zones(self, zoning_mappings): @@ -284,36 +321,16 @@ class VMAXFCDriver(driver.FibreChannelDriver): """ LOG.debug("Looking for masking views still associated with " "Port Group %s.", zoning_mappings['port_group']) - if zoning_mappings['initiator_group']: - checkIgInstanceName = ( - self.common.check_ig_instance_name( - zoning_mappings['initiator_group'])) - else: - checkIgInstanceName = None + masking_views = self.common.get_common_masking_views( + zoning_mappings['array'], zoning_mappings['port_group'], + zoning_mappings['initiator_group']) - # if it has not been deleted, check for remaining masking views - if checkIgInstanceName: - mvInstances = self._get_common_masking_views( - zoning_mappings['port_group'], - zoning_mappings['initiator_group']) - - if len(mvInstances) > 0: - LOG.debug("Found %(numViews)lu MaskingViews.", - {'numViews': len(mvInstances)}) - data = {'driver_volume_type': 'fibre_channel', - 'data': {}} - else: # no masking views found - LOG.debug("No MaskingViews were found. Deleting zone.") - data = {'driver_volume_type': 'fibre_channel', - 'data': {'target_wwn': zoning_mappings['target_wwns'], - 'initiator_target_map': - zoning_mappings['init_targ_map']}} - - LOG.debug("Return FC data for zone removal: %(data)s.", - {'data': data}) - - else: # The initiator group has been deleted - LOG.debug("Initiator Group has been deleted. Deleting zone.") + if masking_views: + LOG.debug("Found %(numViews)d MaskingViews.", + {'numViews': len(masking_views)}) + data = {'driver_volume_type': 'fibre_channel', 'data': {}} + else: # no masking views found + LOG.debug("No MaskingViews were found. Deleting zone.") data = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': zoning_mappings['target_wwns'], 'initiator_target_map': @@ -321,32 +338,22 @@ class VMAXFCDriver(driver.FibreChannelDriver): LOG.debug("Return FC data for zone removal: %(data)s.", {'data': data}) + return data - def _get_common_masking_views( - self, portGroupInstanceName, initiatorGroupInstanceName): - """Check to see the existence of mv in list""" - mvInstances = [] - mvInstancesByPG = self.common.get_masking_views_by_port_group( - portGroupInstanceName) + def _build_initiator_target_map(self, volume, connector): + """Build the target_wwns and the initiator target map. - mvInstancesByIG = self.common.get_masking_views_by_initiator_group( - initiatorGroupInstanceName) - - for mvInstanceByPG in mvInstancesByPG: - if mvInstanceByPG in mvInstancesByIG: - mvInstances.append(mvInstanceByPG) - return mvInstances - - def _build_initiator_target_map(self, storage_system, volume, connector): - """Build the target_wwns and the initiator target map.""" - target_wwns = [] - init_targ_map = {} + :param volume: the cinder volume object + :param connector: the connector object + :return: target_wwns -- list, init_targ_map -- dict + """ + target_wwns, init_targ_map = [], {} initiator_wwns = connector['wwpns'] + fc_targets = self.common.get_target_wwns_from_masking_view( + volume, connector) if self.zonemanager_lookup_service: - fc_targets = self.common.get_target_wwns_from_masking_view( - storage_system, volume, connector) mapping = ( self.zonemanager_lookup_service. get_device_mapping_from_network(initiator_wwns, fc_targets)) @@ -356,15 +363,18 @@ class VMAXFCDriver(driver.FibreChannelDriver): for initiator in map_d['initiator_port_wwn_list']: init_targ_map[initiator] = map_d['target_port_wwn_list'] else: # No lookup service, pre-zoned case. - target_wwns = self.common.get_target_wwns_list( - storage_system, volume, connector) + target_wwns = fc_targets for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return list(set(target_wwns)), init_targ_map def extend_volume(self, volume, new_size): - """Extend an existing volume.""" + """Extend an existing volume. + + :param volume: the cinder volume object + :param new_size: the required new size + """ self.common.extend_volume(volume, new_size) def get_volume_stats(self, refresh=False): @@ -386,53 +396,14 @@ class VMAXFCDriver(driver.FibreChannelDriver): data['driver_version'] = self.VERSION self._stats = data - def migrate_volume(self, ctxt, volume, host): - """Migrate a volume from one Volume Backend to another. - - :param ctxt: context - :param volume: the volume object including the volume_type_id - :param host: the host dict holding the relevant target(destination) - information - :returns: boolean -- Always returns True - :returns: dict -- Empty dict {} - """ - return self.common.migrate_volume(ctxt, volume, host) - - def retype(self, ctxt, volume, new_type, diff, host): - """Migrate volume to another host using retype. - - :param ctxt: context - :param volume: the volume object including the volume_type_id - :param new_type: the new volume type. - :param diff: Unused parameter. - :param host: the host dict holding the relevant - target(destination) information - :returns: boolean -- True if retype succeeded, False if error - """ - return self.common.retype(ctxt, volume, new_type, diff, host) - - def create_consistencygroup(self, context, group): - """Creates a consistencygroup.""" - self.common.create_consistencygroup(context, group) - - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group.""" - return self.common.delete_consistencygroup( - context, group, volumes) - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot.""" - return self.common.create_cgsnapshot(context, cgsnapshot, snapshots) - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a cgsnapshot.""" - return self.common.delete_cgsnapshot(context, cgsnapshot, snapshots) - def manage_existing(self, volume, external_ref): """Manages an existing VMAX Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. + :param volume: the volume object + :param external_ref: the reference for the VMAX volume + :return: model_update """ return self.common.manage_existing(volume, external_ref) @@ -452,74 +423,3 @@ class VMAXFCDriver(driver.FibreChannelDriver): Leave the volume intact on the backend array. """ return self.common.unmanage(volume) - - def update_consistencygroup(self, context, group, - add_volumes, remove_volumes): - """Updates LUNs in consistency group.""" - return self.common.update_consistencygroup(group, add_volumes, - remove_volumes) - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - """Creates the consistency group from source. - - Currently the source can only be a cgsnapshot. - - :param context: the context - :param group: the consistency group object to be created - :param volumes: volumes in the consistency group - :param cgsnapshot: the source consistency group snapshot - :param snapshots: snapshots of the source volumes - :param source_cg: the dictionary of a consistency group as source. - :param source_vols: a list of volume dictionaries in the source_cg. - """ - return self.common.create_consistencygroup_from_src( - context, group, volumes, cgsnapshot, snapshots, source_cg, - source_vols) - - def create_export_snapshot(self, context, snapshot, connector): - """Driver entry point to get the export info for a new snapshot.""" - pass - - def remove_export_snapshot(self, context, snapshot): - """Driver entry point to remove an export for a snapshot.""" - pass - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - """Allows connection to snapshot. - - :param snapshot: the snapshot object - :param connector: the connector object - :param kwargs: additional parameters - :returns: data dict - """ - src_volume = snapshot['volume'] - snapshot['host'] = src_volume['host'] - - return self.initialize_connection(snapshot, connector) - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - """Disallows connection to snapshot. - - :param snapshot: the snapshot object - :param connector: the connector object - :param kwargs: additional parameters - """ - src_volume = snapshot['volume'] - snapshot['host'] = src_volume['host'] - return self.terminate_connection(snapshot, connector, **kwargs) - - def backup_use_temp_snapshot(self): - return True - - def failover_host(self, context, volumes, secondary_id=None): - """Failover volumes to a secondary host/ backend. - - :param context: the context - :param volumes: the list of volumes to be failed over - :param secondary_id: the backend to be failed over to, is 'default' - if fail back - :return: secondary_id, volume_update_list - """ - return self.common.failover_host(context, volumes, secondary_id) diff --git a/cinder/volume/drivers/dell_emc/vmax/https.py b/cinder/volume/drivers/dell_emc/vmax/https.py deleted file mode 100644 index bd04d51a693..00000000000 --- a/cinder/volume/drivers/dell_emc/vmax/https.py +++ /dev/null @@ -1,347 +0,0 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import os -import socket -import ssl -import string -import struct - -from eventlet import patcher -try: - import OpenSSL -except ImportError: - OpenSSL = None -from oslo_log import log as logging -import six -from six.moves import http_client -from six.moves import urllib - -from cinder.i18n import _ - -# Handle case where we are running in a monkey patched environment -if OpenSSL and patcher.is_monkey_patched('socket'): - from eventlet.green.OpenSSL import SSL - -try: - import pywbem - pywbemAvailable = True -except ImportError: - pywbemAvailable = False - - -LOG = logging.getLogger(__name__) - - -def to_bytes(s): - if isinstance(s, six.string_types): - return six.b(s) - else: - return s - - -def get_default_ca_certs(): - """Gets the default CA certificates if found, otherwise None. - - Try to find out system path with ca certificates. This path is cached and - returned. If no path is found out, None is returned. - """ - if not hasattr(get_default_ca_certs, '_path'): - for path in ( - '/etc/pki/ca-trust/extracted/openssl/ca-bundle.trust.crt', - '/etc/ssl/certs', - '/etc/ssl/certificates'): - if os.path.exists(path): - get_default_ca_certs._path = path - break - else: - get_default_ca_certs._path = None - return get_default_ca_certs._path - - -class OpenSSLConnectionDelegator(object): - """An OpenSSL.SSL.Connection delegator. - - Supplies an additional 'makefile' method which http_client requires - and is not present in OpenSSL.SSL.Connection. - Note: Since it is not possible to inherit from OpenSSL.SSL.Connection - a delegator must be used. - """ - def __init__(self, *args, **kwargs): - self.connection = SSL.GreenConnection(*args, **kwargs) - - def __getattr__(self, name): - return getattr(self.connection, name) - - def makefile(self, *args, **kwargs): - return socket._fileobject(self.connection, *args, **kwargs) - - -class HTTPSConnection(http_client.HTTPSConnection): - def __init__(self, host, port=None, key_file=None, cert_file=None, - strict=None, ca_certs=None, no_verification=False): - if not pywbemAvailable: - LOG.info( - 'Module PyWBEM not installed. ' - 'Install PyWBEM using the python-pywbem package.') - if six.PY3: - excp_lst = (TypeError, ssl.SSLError) - else: - excp_lst = () - try: - http_client.HTTPSConnection.__init__(self, host, port, - key_file=key_file, - cert_file=cert_file) - - self.key_file = None if key_file is None else key_file - self.cert_file = None if cert_file is None else cert_file - self.insecure = no_verification - self.ca_certs = ( - None if ca_certs is None else six.text_type(ca_certs)) - self.set_context() - # ssl exceptions are reported in various form in Python 3 - # so to be compatible, we report the same kind as under - # Python2 - except excp_lst as e: - raise pywbem.cim_http.Error(six.text_type(e)) - - @staticmethod - def host_matches_cert(host, x509): - """Verify that the certificate matches host. - - Verify that the x509 certificate we have received - from 'host' correctly identifies the server we are - connecting to, ie that the certificate's Common Name - or a Subject Alternative Name matches 'host'. - """ - def check_match(name): - # Directly match the name. - if name == host: - return True - - # Support single wildcard matching. - if name.startswith('*.') and host.find('.') > 0: - if name[2:] == host.split('.', 1)[1]: - return True - - common_name = x509.get_subject().commonName - # First see if we can match the CN. - if check_match(common_name): - return True - # Also try Subject Alternative Names for a match. - san_list = None - for i in range(x509.get_extension_count()): - ext = x509.get_extension(i) - if ext.get_short_name() == b'subjectAltName': - san_list = six.text_type(ext) - for san in ''.join(san_list.split()).split(','): - if san.startswith('DNS:'): - if check_match(san.split(':', 1)[1]): - return True - - # Server certificate does not match host. - msg = (_("Host %(host)s does not match x509 certificate contents: " - "CommonName %(commonName)s.") - % {'host': host, - 'commonName': common_name}) - - if san_list is not None: - msg = (_("%(message)s, subjectAltName: %(sanList)s.") - % {'message': msg, - 'sanList': san_list}) - raise pywbem.cim_http.AuthError(msg) - - def verify_callback(self, connection, x509, errnum, - depth, preverify_ok): - if x509.has_expired(): - msg = msg = (_("SSL Certificate expired on %s.") - % x509.get_notAfter()) - raise pywbem.cim_http.AuthError(msg) - - if depth == 0 and preverify_ok: - # We verify that the host matches against the last - # certificate in the chain. - return self.host_matches_cert(self.host, x509) - else: - # Pass through OpenSSL's default result. - return preverify_ok - - def set_context(self): - """Set up the OpenSSL context.""" - self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) - - if self.insecure is not True: - self.context.set_verify(OpenSSL.SSL.VERIFY_PEER, - self.verify_callback) - else: - self.context.set_verify(OpenSSL.SSL.VERIFY_NONE, - lambda *args: True) - - if self.cert_file: - try: - self.context.use_certificate_file(self.cert_file) - except Exception as e: - msg = (_("Unable to load cert from %(cert)s %(e)s.") - % {'cert': self.cert_file, - 'e': e}) - raise pywbem.cim_http.AuthError(msg) - if self.key_file is None: - # We support having key and cert in same file. - try: - self.context.use_privatekey_file(self.cert_file) - except Exception as e: - msg = (_("No key file specified and unable to load key " - "from %(cert)s %(e)s.") - % {'cert': self.cert_file, - 'e': e}) - raise pywbem.cim_http.AuthError(msg) - - if self.key_file: - try: - self.context.use_privatekey_file(self.key_file) - except Exception as e: - msg = (_("Unable to load key from %(cert)s %(e)s.") - % {'cert': self.cert_file, - 'e': e}) - raise pywbem.cim_http.AuthError(msg) - - if self.ca_certs: - try: - self.context.load_verify_locations(to_bytes(self.ca_certs)) - except Exception as e: - msg = (_("Unable to load CA from %(cert)s %(e)s.") - % {'cert': self.cert_file, - 'e': e}) - raise pywbem.cim_http.AuthError(msg) - else: - self.context.set_default_verify_paths() - - def connect(self): - result = socket.getaddrinfo(self.host, self.port, 0, - socket.SOCK_STREAM) - if result: - socket_family = result[0][0] - if socket_family == socket.AF_INET6: - sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) - else: - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - else: - # If due to some reason the address lookup fails - we still - # connect to IPv4 socket. This retains the older behavior. - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if self.timeout is not None: - # '0' microseconds - sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, - struct.pack('LL', 0, 0)) - self.sock = OpenSSLConnectionDelegator(self.context, sock) - self.sock.connect((self.host, self.port)) - - -def wbem_request(url, data, creds, headers=None, debug=0, x509=None, - verify_callback=None, ca_certs=None, - no_verification=False): - """Send request over HTTP. - - Send XML data over HTTP to the specified url. Return the - response in XML. Uses Python's build-in http_client. x509 may be a - dictionary containing the location of the SSL certificate and key - files. - """ - - if headers is None: - headers = [] - - host, port, use_ssl = pywbem.cim_http.parse_url(url) - key_file = None - cert_file = None - if use_ssl and x509 is not None: - cert_file = x509.get('cert_file') - key_file = x509.get('key_file') - - numTries = 0 - localAuthHeader = None - tryLimit = 5 - - if isinstance(data, six.text_type): - data = data.encode('utf-8') - data = '\n' + data - - if not no_verification and ca_certs is None: - ca_certs = get_default_ca_certs() - elif no_verification: - ca_certs = None - - h = HTTPSConnection( - host, - port=port, - key_file=key_file, - cert_file=cert_file, - ca_certs=ca_certs, - no_verification=no_verification) - - locallogin = None - while numTries < tryLimit: - numTries = numTries + 1 - - h.putrequest('POST', '/cimom') - h.putheader('Content-type', 'application/xml; charset="utf-8"') - h.putheader('Content-length', len(data)) - if localAuthHeader is not None: - h.putheader(*localAuthHeader) - elif creds is not None: - h.putheader('Authorization', 'Basic %s' % - base64.encodestring('%s:%s' % (creds[0], creds[1])) - .replace('\n', '')) - elif locallogin is not None: - h.putheader('PegasusAuthorization', 'Local "%s"' % locallogin) - - for hdr in headers: - if isinstance(hdr, six.text_type): - hdr = hdr.encode('utf-8') - s = map(lambda x: string.strip(x), string.split(hdr, ":", 1)) - h.putheader(urllib.parse.quote(s[0]), urllib.parse.quote(s[1])) - - try: - h.endheaders() - try: - h.send(data) - except socket.error as arg: - if arg[0] != 104 and arg[0] != 32: - raise - - response = h.getresponse() - body = response.read() - h.close() - - if response.status != http_client.OK: - raise pywbem.cim_http.Error('HTTP error') - - except http_client.BadStatusLine as arg: - msg = (_("Bad Status line returned: %(arg)s.") - % {'arg': arg}) - raise pywbem.cim_http.Error(msg) - except socket.sslerror as arg: - msg = (_("SSL error: %(arg)s.") - % {'arg': arg}) - raise pywbem.cim_http.Error(msg) - except socket.error as arg: - msg = (_("Socket error: %(arg)s.") - % {'arg': arg}) - raise pywbem.cim_http.Error(msg) - - break - - return body diff --git a/cinder/volume/drivers/dell_emc/vmax/iscsi.py b/cinder/volume/drivers/dell_emc/vmax/iscsi.py index c20142f0be2..5e1ab5da487 100644 --- a/cinder/volume/drivers/dell_emc/vmax/iscsi.py +++ b/cinder/volume/drivers/dell_emc/vmax/iscsi.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. +# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. """ -ISCSI Drivers for EMC VMAX arrays based on SMI-S. +ISCSI Drivers for Dell EMC VMAX arrays based on REST. """ from oslo_log import log as logging @@ -25,15 +25,12 @@ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.dell_emc.vmax import common - LOG = logging.getLogger(__name__) -CINDER_CONF = '/etc/cinder/cinder.conf' - @interface.volumedriver class VMAXISCSIDriver(driver.ISCSIDriver): - """EMC ISCSI Drivers for VMAX using SMI-S. + """ISCSI Drivers for VMAX using Rest. Version history: @@ -83,10 +80,10 @@ class VMAXISCSIDriver(driver.ISCSIDriver): - Support for compression on All Flash - Volume replication 2.1 (bp add-vmax-replication) - rename and restructure driver (bp vmax-rename-dell-emc) - + 3.0.0 - REST based driver """ - VERSION = "2.5.0" + VERSION = "3.0.0" # ThirdPartySystems wiki CI_WIKI_NAME = "EMC_VMAX_CI" @@ -94,64 +91,99 @@ class VMAXISCSIDriver(driver.ISCSIDriver): def __init__(self, *args, **kwargs): super(VMAXISCSIDriver, self).__init__(*args, **kwargs) - self.active_backend_id = kwargs.get('active_backend_id', None) self.common = ( common.VMAXCommon( 'iSCSI', self.VERSION, - configuration=self.configuration, - active_backend_id=self.active_backend_id)) + configuration=self.configuration)) def check_for_setup_error(self): pass def create_volume(self, volume): - """Creates a VMAX volume.""" + """Creates a VMAX volume. + + :param volume: the cinder volume object + :return: provider location dict + """ return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): - """Creates a volume from a snapshot.""" + """Creates a volume from a snapshot. + + :param volume: the cinder volume object + :param snapshot: the cinder snapshot object + :return: provider location dict + """ return self.common.create_volume_from_snapshot( volume, snapshot) def create_cloned_volume(self, volume, src_vref): - """Creates a cloned volume.""" + """Creates a cloned volume. + + :param volume: the cinder volume object + :param src_vref: the source volume reference + :return: provider location dict + """ return self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): - """Deletes an VMAX volume.""" + """Deletes a VMAX volume. + + :param volume: the cinder volume object + """ self.common.delete_volume(volume) def create_snapshot(self, snapshot): - """Creates a snapshot.""" - src_volume = snapshot['volume'] - volpath = self.common.create_snapshot(snapshot, src_volume) + """Creates a snapshot. - model_update = {} - snapshot['provider_location'] = six.text_type(volpath) - model_update['provider_location'] = snapshot['provider_location'] - return model_update + :param snapshot: the cinder snapshot object + :return: provider location dict + """ + src_volume = snapshot.volume + return self.common.create_snapshot(snapshot, src_volume) def delete_snapshot(self, snapshot): - """Deletes a snapshot.""" - src_volume = snapshot['volume'] + """Deletes a snapshot. + + :param snapshot: the cinder snapshot object + """ + src_volume = snapshot.volume self.common.delete_snapshot(snapshot, src_volume) def ensure_export(self, context, volume): - """Driver entry point to get the export info for an existing volume.""" + """Driver entry point to get the export info for an existing volume. + + :param context: the context + :param volume: the cinder volume object + """ pass def create_export(self, context, volume, connector): - """Driver entry point to get the export info for a new volume.""" + """Driver entry point to get the export info for a new volume. + + :param context: the context + :param volume: the cinder volume object + :param connector: the connector object + """ pass def remove_export(self, context, volume): - """Driver entry point to remove an export for a volume.""" + """Driver entry point to remove an export for a volume. + + :param context: the context + :param volume: the cinder volume object + """ pass - def check_for_export(self, context, volume_id): - """Make sure volume is exported.""" + @staticmethod + def check_for_export(context, volume_id): + """Make sure volume is exported. + + :param context: the context + :param volume_id: the volume id + """ pass def initialize_connection(self, volume, connector): @@ -183,40 +215,42 @@ class VMAXISCSIDriver(driver.ISCSIDriver): 'target_luns': [1, 1], } } + :param volume: the cinder volume object + :param connector: the connector object + :return: dict -- the iscsi dict """ device_info = self.common.initialize_connection( volume, connector) - return self.get_iscsi_dict( - device_info, volume, connector) + return self.get_iscsi_dict(device_info, volume) - def get_iscsi_dict(self, device_info, volume, connector): + def get_iscsi_dict(self, device_info, volume): """Populate iscsi dict to pass to nova. :param device_info: device info dict :param volume: volume object - :param connector: connector object :return: iscsi dict """ try: ip_and_iqn = device_info['ip_and_iqn'] is_multipath = device_info['is_multipath'] - except KeyError as ex: - exception_message = (_("Cannot get iSCSI ipaddresses or " - "multipath flag. Exception is %(ex)s. ") - % {'ex': ex}) + host_lun_id = device_info['hostlunid'] + except KeyError as e: + exception_message = (_("Cannot get iSCSI ipaddresses, multipath " + "flag, or hostlunid. Exception is %(e)s.") + % {'e': six.text_type(e)}) raise exception.VolumeBackendAPIException(data=exception_message) - iscsi_properties = self.smis_get_iscsi_properties( - volume, connector, ip_and_iqn, is_multipath) + iscsi_properties = self.vmax_get_iscsi_properties( + volume, ip_and_iqn, is_multipath, host_lun_id) - LOG.info("iSCSI properties are: %s", iscsi_properties) - return { - 'driver_volume_type': 'iscsi', - 'data': iscsi_properties - } + LOG.info("iSCSI properties are: %(props)s", + {'props': iscsi_properties}) + return {'driver_volume_type': 'iscsi', + 'data': iscsi_properties} - def smis_get_iscsi_properties(self, volume, connector, ip_and_iqn, - is_multipath): + @staticmethod + def vmax_get_iscsi_properties(volume, ip_and_iqn, + is_multipath, host_lun_id): """Gets iscsi configuration. We ideally get saved information in the volume entity, but fall back @@ -231,48 +265,32 @@ class VMAXISCSIDriver(driver.ISCSIDriver): the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. + + :param volume: the cinder volume object + :param ip_and_iqn: list of ip and iqn dicts + :param is_multipath: flag for multipath + :param host_lun_id: the host lun id of the device + :return: properties """ - - device_info, __, __ = self.common.find_device_number( - volume, connector['host']) - - isError = False - if device_info: - try: - lun_id = device_info['hostlunid'] - except KeyError: - isError = True - else: - isError = True - - if isError: - LOG.error("Unable to get the lun id") - exception_message = (_("Cannot find device number for volume " - "%(volumeName)s.") - % {'volumeName': volume['name']}) - raise exception.VolumeBackendAPIException(data=exception_message) - properties = {} if len(ip_and_iqn) > 1 and is_multipath: properties['target_portals'] = ([t['ip'] + ":3260" for t in ip_and_iqn]) properties['target_iqns'] = ([t['iqn'].split(",")[0] for t in ip_and_iqn]) - properties['target_luns'] = [lun_id] * len(ip_and_iqn) + properties['target_luns'] = [host_lun_id] * len(ip_and_iqn) properties['target_discovered'] = True properties['target_iqn'] = ip_and_iqn[0]['iqn'].split(",")[0] properties['target_portal'] = ip_and_iqn[0]['ip'] + ":3260" - properties['target_lun'] = lun_id - properties['volume_id'] = volume['id'] + properties['target_lun'] = host_lun_id + properties['volume_id'] = volume.id - LOG.info( - "ISCSI properties: %(properties)s.", {'properties': properties}) + LOG.info("ISCSI properties: %(properties)s.", + {'properties': properties}) LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume}) - if 'provider_auth' in volume: - auth = volume['provider_auth'] - LOG.info( - "AUTH properties: %(authProps)s.", {'authProps': auth}) + if hasattr(volume, 'provider_auth'): + auth = volume.provider_auth if auth is not None: (auth_method, auth_username, auth_secret) = auth.split() @@ -281,22 +299,36 @@ class VMAXISCSIDriver(driver.ISCSIDriver): properties['auth_username'] = auth_username properties['auth_password'] = auth_secret - LOG.info("AUTH properties: %s.", properties) - return properties def terminate_connection(self, volume, connector, **kwargs): - """Disallow connection from connector.""" + """Disallow connection from connector. + + Return empty data if other volumes are in the same zone. + The FibreChannel ZoneManager doesn't remove zones + if there isn't an initiator_target_map in the + return of terminate_connection. + + :param volume: the volume object + :param connector: the connector object + :returns: dict -- the target_wwns and initiator_target_map if the + zone is to be removed, otherwise empty + """ self.common.terminate_connection(volume, connector) def extend_volume(self, volume, new_size): - """Extend an existing volume.""" + """Extend an existing volume. + + :param volume: the cinder volume object + :param new_size: the required new size + """ self.common.extend_volume(volume, new_size) def get_volume_stats(self, refresh=False): """Get volume stats. - If 'refresh' is True, run update the stats first. + :param refresh: boolean -- If True, run update the stats first. + :returns: dict -- the stats dict """ if refresh: self.update_volume_stats() @@ -311,46 +343,6 @@ class VMAXISCSIDriver(driver.ISCSIDriver): data['driver_version'] = self.VERSION self._stats = data - def migrate_volume(self, ctxt, volume, host): - """Migrate a volume from one Volume Backend to another. - - :param ctxt: context - :param volume: the volume object including the volume_type_id - :param host: the host dict holding the relevant target information - :returns: boolean -- Always returns True - :returns: dict -- Empty dict {} - """ - return self.common.migrate_volume(ctxt, volume, host) - - def retype(self, ctxt, volume, new_type, diff, host): - """Migrate volume to another host using retype. - - :param ctxt: context - :param volume: the volume object including the volume_type_id - :param new_type: the new volume type. - :param diff: Unused parameter in common.retype - :param host: the host dict holding the relevant target information - :returns: boolean -- True if retype succeeded, False if error - """ - return self.common.retype(ctxt, volume, new_type, diff, host) - - def create_consistencygroup(self, context, group): - """Creates a consistencygroup.""" - self.common.create_consistencygroup(context, group) - - def delete_consistencygroup(self, context, group, volumes): - """Deletes a consistency group.""" - return self.common.delete_consistencygroup( - context, group, volumes) - - def create_cgsnapshot(self, context, cgsnapshot, snapshots): - """Creates a cgsnapshot.""" - return self.common.create_cgsnapshot(context, cgsnapshot, snapshots) - - def delete_cgsnapshot(self, context, cgsnapshot, snapshots): - """Deletes a cgsnapshot.""" - return self.common.delete_cgsnapshot(context, cgsnapshot, snapshots) - def manage_existing(self, volume, external_ref): """Manages an existing VMAX Volume (import to Cinder). @@ -375,77 +367,3 @@ class VMAXISCSIDriver(driver.ISCSIDriver): Leave the volume intact on the backend array. """ return self.common.unmanage(volume) - - def update_consistencygroup(self, context, group, - add_volumes, remove_volumes): - """Updates LUNs in consistency group.""" - return self.common.update_consistencygroup(group, add_volumes, - remove_volumes) - - def create_consistencygroup_from_src(self, context, group, volumes, - cgsnapshot=None, snapshots=None, - source_cg=None, source_vols=None): - """Creates the consistency group from source. - - Currently the source can only be a cgsnapshot. - - :param context: the context - :param group: the consistency group object to be created - :param volumes: volumes in the consistency group - :param cgsnapshot: the source consistency group snapshot - :param snapshots: snapshots of the source volumes - :param source_cg: the dictionary of a consistency group as source. - :param source_vols: a list of volume dictionaries in the source_cg. - """ - return self.common.create_consistencygroup_from_src( - context, group, volumes, cgsnapshot, snapshots, source_cg, - source_vols) - - def create_export_snapshot(self, context, snapshot, connector): - """Driver entry point to get the export info for a new snapshot.""" - pass - - def remove_export_snapshot(self, context, snapshot): - """Driver entry point to remove an export for a snapshot.""" - pass - - def initialize_connection_snapshot(self, snapshot, connector, **kwargs): - """Allows connection to snapshot. - - :param snapshot: the snapshot object - :param connector: the connector object - :param kwargs: additional parameters - :returns: iscsi dict - """ - src_volume = snapshot['volume'] - snapshot['host'] = src_volume['host'] - device_info = self.common.initialize_connection( - snapshot, connector) - return self.get_iscsi_dict( - device_info, snapshot, connector) - - def terminate_connection_snapshot(self, snapshot, connector, **kwargs): - """Disallows connection to snapshot. - - :param snapshot: the snapshot object - :param connector: the connector object - :param kwargs: additional parameters - """ - src_volume = snapshot['volume'] - snapshot['host'] = src_volume['host'] - return self.common.terminate_connection(snapshot, - connector) - - def backup_use_temp_snapshot(self): - return True - - def failover_host(self, context, volumes, secondary_id=None): - """Failover volumes to a secondary host/ backend. - - :param context: the context - :param volumes: the list of volumes to be failed over - :param secondary_id: the backend to be failed over to, is 'default' - if fail back - :return: secondary_id, volume_update_list - """ - return self.common.failover_host(context, volumes, secondary_id) diff --git a/cinder/volume/drivers/dell_emc/vmax/masking.py b/cinder/volume/drivers/dell_emc/vmax/masking.py index b40cca72a07..78d76c2bc5c 100644 --- a/cinder/volume/drivers/dell_emc/vmax/masking.py +++ b/cinder/volume/drivers/dell_emc/vmax/masking.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. +# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,919 +13,615 @@ # License for the specific language governing permissions and limitations # under the License. +import time + from oslo_log import log as logging import six from cinder import coordination from cinder import exception from cinder.i18n import _ -from cinder.volume.drivers.dell_emc.vmax import fast from cinder.volume.drivers.dell_emc.vmax import provision -from cinder.volume.drivers.dell_emc.vmax import provision_v3 from cinder.volume.drivers.dell_emc.vmax import utils LOG = logging.getLogger(__name__) -STORAGEGROUPTYPE = 4 -POSTGROUPTYPE = 3 -INITIATORGROUPTYPE = 2 - -ISCSI = 'iscsi' -FC = 'fc' - -EMC_ROOT = 'root/emc' -FASTPOLICY = 'storagetype:fastpolicy' -ISV3 = 'isV3' - class VMAXMasking(object): - """Masking class for SMI-S based EMC volume drivers. + """Masking class for Dell EMC VMAX. - Masking code to dynamically create a masking view - This masking class is for EMC volume drivers based on SMI-S. + Masking code to dynamically create a masking view. It supports VMAX arrays. """ - def __init__(self, prtcl): + def __init__(self, prtcl, rest): self.protocol = prtcl - self.utils = utils.VMAXUtils(prtcl) - self.fast = fast.VMAXFast(prtcl) - self.provision = provision.VMAXProvision(prtcl) - self.provisionv3 = provision_v3.VMAXProvisionV3(prtcl) + self.utils = utils.VMAXUtils() + self.rest = rest + self.provision = provision.VMAXProvision(self.rest) - def setup_masking_view(self, conn, maskingViewDict, extraSpecs): + def setup_masking_view( + self, serial_number, masking_view_dict, extra_specs): - @coordination.synchronized("emc-mv-{maskingViewDict[maskingViewName]}") - def do_get_or_create_masking_view_and_map_lun(maskingViewDict): - return self.get_or_create_masking_view_and_map_lun(conn, - maskingViewDict, - extraSpecs) + @coordination.synchronized("emc-mv-{maskingview_name}") + def do_get_or_create_masking_view_and_map_lun(maskingview_name): + return self.get_or_create_masking_view_and_map_lun( + serial_number, maskingview_name, masking_view_dict, + extra_specs) return do_get_or_create_masking_view_and_map_lun( - maskingViewDict) + masking_view_dict[utils.MV_NAME]) - def get_or_create_masking_view_and_map_lun(self, conn, maskingViewDict, - extraSpecs): + def get_or_create_masking_view_and_map_lun( + self, serial_number, maskingview_name, masking_view_dict, + extra_specs): """Get or Create a masking view and add a volume to the storage group. - Given a masking view tuple either get or create a masking view and add + Given a masking view dict either get or create a masking view and add the volume to the associated storage group. - If it is a live migration operation then we do not need to remove - the volume from any storage group (default or otherwise). - - :param conn: the connection to ecom - :param maskingViewDict: the masking view dict - :param extraSpecs: additional info - :returns: dict -- rollbackDict - :raises VolumeBackendAPIException: + :param serial_number: the array serial number + :param maskingview_name: the masking view name + :param masking_view_dict: the masking view dict + :param extra_specs: the extra specifications + :return: rollback_dict + :raises: VolumeBackendAPIException """ - rollbackDict = {} + storagegroup_name = masking_view_dict[utils.SG_NAME] + volume_name = masking_view_dict[utils.VOL_NAME] + masking_view_dict[utils.EXTRA_SPECS] = extra_specs + device_id = masking_view_dict[utils.DEVICE_ID] + default_sg_name = self._get_default_storagegroup_and_remove_vol( + serial_number, device_id, masking_view_dict, volume_name, + extra_specs) - controllerConfigService = maskingViewDict['controllerConfigService'] - volumeInstance = maskingViewDict['volumeInstance'] - maskingViewName = maskingViewDict['maskingViewName'] - volumeName = maskingViewDict['volumeName'] - isV3 = maskingViewDict['isV3'] - isLiveMigration = maskingViewDict['isLiveMigration'] - maskingViewDict['extraSpecs'] = extraSpecs - defaultStorageGroupInstanceName = None - fastPolicyName = None - storageGroupInstanceName = None - if isLiveMigration: - maskingViewDict['maskingViewName'] = ( - maskingViewDict['maskingViewNameLM']) - maskingViewName = maskingViewDict['maskingViewNameLM'] - else: - if isV3: - defaultStorageGroupInstanceName = ( - self._get_v3_default_storagegroup_instancename( - conn, volumeInstance, maskingViewDict, - controllerConfigService, volumeName)) - - else: - fastPolicyName = maskingViewDict['fastPolicy'] - # If FAST is enabled remove the volume from the default SG. - if fastPolicyName is not None: - defaultStorageGroupInstanceName = ( - self._get_and_remove_from_storage_group_v2( - conn, controllerConfigService, - volumeInstance.path, - volumeName, fastPolicyName, - extraSpecs)) - - # If anything has gone wrong with the masking view we rollback try: - maskingViewInstanceName, storageGroupInstanceName, errorMessage = ( - self._validate_masking_view(conn, maskingViewDict, - defaultStorageGroupInstanceName, - extraSpecs)) - instance = conn.GetInstance(storageGroupInstanceName) - maskingViewDict['sgGroupName'] = instance['ElementName'] + error_message = self._get_or_create_masking_view( + serial_number, masking_view_dict, extra_specs) LOG.debug( "The masking view in the attach operation is " - "%(maskingViewInstanceName)s. The storage group " - "in the masking view is %(storageGroupInstanceName)s.", - {'maskingViewInstanceName': maskingViewInstanceName, - 'storageGroupInstanceName': storageGroupInstanceName}) + "%(masking_name)s. The storage group " + "in the masking view is %(storage_name)s.", + {'masking_name': maskingview_name, + 'storage_name': storagegroup_name}) except Exception as e: LOG.exception( "Masking View creation or retrieval was not successful " - "for masking view %(maskingViewName)s. " + "for masking view %(maskingview_name)s. " "Attempting rollback.", - {'maskingViewName': maskingViewDict['maskingViewName']}) - errorMessage = e + {'maskingview_name': masking_view_dict[utils.MV_NAME]}) + error_message = six.text_type(e) - rollbackDict['pgGroupName'], pg_errorMessage = ( - self._get_port_group_name_from_mv( - conn, maskingViewDict['maskingViewName'], - maskingViewDict['storageSystemName'])) + rollback_dict = masking_view_dict + try: + rollback_dict['portgroup_name'] = ( + self.rest.get_element_from_masking_view( + serial_number, maskingview_name, portgroup=True)) + except Exception as e: + error_message = ("Error retrieving port group. Exception " + "received: %(e)s" % {'e': six.text_type(e)}) + rollback_dict['default_sg_name'] = default_sg_name - if pg_errorMessage: - errorMessage = pg_errorMessage - - if not errorMessage: - # Only after the masking view has been validated, add the - # volume to the storage group and recheck that it has been - # successfully added. - errorMessage = self._check_adding_volume_to_storage_group( - conn, maskingViewDict, storageGroupInstanceName) - - rollbackDict['controllerConfigService'] = controllerConfigService - rollbackDict['defaultStorageGroupInstanceName'] = ( - defaultStorageGroupInstanceName) - rollbackDict['volumeInstance'] = volumeInstance - rollbackDict['volumeName'] = volumeName - rollbackDict['fastPolicyName'] = fastPolicyName - rollbackDict['isV3'] = isV3 - rollbackDict['extraSpecs'] = extraSpecs - rollbackDict['sgGroupName'] = maskingViewDict['sgGroupName'] - rollbackDict['igGroupName'] = maskingViewDict['igGroupName'] - rollbackDict['connector'] = maskingViewDict['connector'] - - if errorMessage: + if error_message: # Rollback code if we cannot complete any of the steps above # successfully then we must roll back by adding the volume back to - # the default storage group for that fast policy. - if (fastPolicyName is not None): - # If the errorMessage was returned before the volume - # was removed from the default storage group no action. - self._check_if_rollback_action_for_masking_required( - conn, rollbackDict) - if isV3: - if maskingViewDict['slo'] is not None: - rollbackDict['storageSystemName'] = ( - maskingViewDict['storageSystemName']) - rollbackDict['slo'] = maskingViewDict['slo'] - self._check_if_rollback_action_for_masking_required( - conn, rollbackDict) + # the default storage group for that slo/workload combination. - else: - errorMessage = self._check_adding_volume_to_storage_group( - conn, rollbackDict, - rollbackDict['defaultStorageGroupInstanceName']) - if errorMessage: - LOG.error(errorMessage) + if rollback_dict['slo'] is not None: + self.check_if_rollback_action_for_masking_required( + serial_number, device_id, masking_view_dict) - exceptionMessage = (_( + else: + self._check_adding_volume_to_storage_group( + serial_number, device_id, rollback_dict['default_sg_name'], + masking_view_dict[utils.VOL_NAME], + masking_view_dict[utils.EXTRA_SPECS]) + + exception_message = (_( "Failed to get, create or add volume %(volumeName)s " - "to masking view %(maskingViewName)s. " + "to masking view %(maskingview_name)s. " "The error message received was %(errorMessage)s.") - % {'maskingViewName': maskingViewName, - 'volumeName': volumeName, - 'errorMessage': errorMessage}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) + % {'maskingview_name': maskingview_name, + 'volumeName': volume_name, + 'errorMessage': error_message}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) - return rollbackDict + return rollback_dict - def _get_v3_default_storagegroup_instancename(self, conn, volumeinstance, - maskingviewdict, - controllerConfigService, - volumeName): - defaultStorageGroupInstanceName = None - defaultSgGroupName = self.utils.get_v3_storage_group_name( - maskingviewdict['pool'], - maskingviewdict['slo'], - maskingviewdict['workload'], - maskingviewdict['isCompressionDisabled'], - maskingviewdict['replication_enabled']) - assocStorageGroupInstanceNames = ( - self.utils.get_storage_groups_from_volume( - conn, volumeinstance.path)) - for assocStorageGroupInstanceName in ( - assocStorageGroupInstanceNames): - instance = conn.GetInstance( - assocStorageGroupInstanceName, LocalOnly=False) - assocStorageGroupName = instance['ElementName'] + def _get_default_storagegroup_and_remove_vol( + self, serial_number, device_id, masking_view_dict, + volume_name, extra_specs): + """Get the default storage group and remove volume. - if assocStorageGroupName == defaultSgGroupName: - defaultStorageGroupInstanceName = ( - assocStorageGroupInstanceName) - break - if defaultStorageGroupInstanceName: - self._get_and_remove_from_storage_group_v3( - conn, controllerConfigService, volumeinstance.path, - volumeName, maskingviewdict, - defaultStorageGroupInstanceName) + :param serial_number: the array serial number + :param device_id: the device id + :param masking_view_dict: the masking view dict + :param volume_name: the volume name + :param extra_specs: the extra specifications + :return: default_sg_name + """ + default_sg_name = self.utils.get_default_storage_group_name( + masking_view_dict[utils.SRP], + masking_view_dict[utils.SLO], + masking_view_dict[utils.WORKLOAD]) + + check_vol = self.rest.is_volume_in_storagegroup( + serial_number, device_id, default_sg_name) + if check_vol: + self.remove_volume_from_sg( + serial_number, device_id, volume_name, default_sg_name, + extra_specs) else: LOG.warning( - "Volume: %(volumeName)s does not belong " - "to storage group %(defaultSgGroupName)s.", - {'volumeName': volumeName, - 'defaultSgGroupName': defaultSgGroupName}) - return defaultStorageGroupInstanceName + "Volume: %(volume_name)s does not belong " + "to default storage group %(default_sg_name)s.", + {'volume_name': volume_name, + 'default_sg_name': default_sg_name}) + return default_sg_name - def _validate_masking_view(self, conn, maskingViewDict, - defaultStorageGroupInstanceName, - extraSpecs): - """Validate all the individual pieces of the masking view. + def _get_or_create_masking_view(self, serial_number, masking_view_dict, + extra_specs): + """Retrieve an existing masking view or create a new one. - :param conn: the ecom connection - :param maskingViewDict: the masking view dictionary - :param defaultStorageGroupInstanceName: the default SG - :param extraSpecs: extra specifications - :returns: maskingViewInstanceName - :returns: storageGroupInstanceName, - :returns: string -- errorMessage + :param serial_number: the array serial number + :param masking_view_dict: the masking view dict + :param extra_specs: the extra specifications + :return: error message """ - storageSystemName = maskingViewDict['storageSystemName'] - maskingViewName = maskingViewDict['maskingViewName'] + maskingview_name = masking_view_dict[utils.MV_NAME] - maskingViewInstanceName = self._find_masking_view( - conn, maskingViewName, storageSystemName) - if maskingViewInstanceName is None: - maskingViewInstanceName, storageGroupInstanceName, errorMessage = ( - self._validate_new_masking_view( - conn, maskingViewDict, defaultStorageGroupInstanceName, - extraSpecs)) + masking_view_details = self.rest.get_masking_view( + serial_number, masking_view_name=maskingview_name) + if not masking_view_details: + error_message = self._create_new_masking_view( + serial_number, masking_view_dict, maskingview_name, + extra_specs) else: - storageGroupInstanceName, errorMessage = ( + storagegroup_name, error_message = ( self._validate_existing_masking_view( - conn, maskingViewDict, maskingViewInstanceName, - extraSpecs)) + serial_number, masking_view_dict, maskingview_name, + extra_specs)) - return maskingViewInstanceName, storageGroupInstanceName, errorMessage + return error_message - def _validate_new_masking_view(self, conn, maskingViewDict, - defaultStorageGroupInstanceName, - extraSpecs): - """Validate the creation of a new masking view. + def _create_new_masking_view(self, serial_number, masking_view_dict, + maskingview_name, extra_specs): + """Create a new masking view. - :param conn: the ecom connection - :param maskingViewDict: the masking view dictionary - :param defaultStorageGroupInstanceName: the default SG - :param extraSpecs: extra specifications - :returns: maskingViewInstanceName - :returns: storageGroupInstanceName, - :returns: string -- errorMessage + :param serial_number: the array serial number + :param masking_view_dict: the masking view dict + :param maskingview_name: the masking view name + :param extra_specs: the extra specifications + :return: error_message """ - controllerConfigService = maskingViewDict['controllerConfigService'] - igGroupName = maskingViewDict['igGroupName'] - connector = maskingViewDict['connector'] - storageSystemName = maskingViewDict['storageSystemName'] - maskingViewName = maskingViewDict['maskingViewName'] - pgGroupName = maskingViewDict['pgGroupName'] - LOG.info("Returning random Port Group: %(portGroupName)s.", - {'portGroupName': pgGroupName}) + init_group_name = masking_view_dict[utils.IG_NAME] + parent_sg_name = masking_view_dict[utils.PARENT_SG_NAME] + storagegroup_name = masking_view_dict[utils.SG_NAME] + connector = masking_view_dict[utils.CONNECTOR] + port_group_name = masking_view_dict[utils.PORTGROUPNAME] + LOG.info("Port Group in masking view operation: %(port_group_name)s.", + {'port_group_name': port_group_name}) - if maskingViewDict['isLiveMigration']: - try: - # We are sharing the storage group and port group - # between host and target - storageGroupInstanceName = ( - maskingViewDict['storageGroupInstanceName']) - storageGroupinstance = conn.GetInstance( - storageGroupInstanceName) - maskingViewDict['sgGroupName'] = ( - storageGroupinstance['ElementName']) - portGroupInstanceName = ( - maskingViewDict['portGroupInstanceName']) - portGroupInstance = conn.GetInstance( - portGroupInstanceName) - maskingViewDict['pgGroupName'] = ( - portGroupInstance['ElementName']) - except Exception: - errorMessage = (_( - "Unable to get storage group for live migration.")) - return None, None, errorMessage - else: - storageGroupInstanceName, errorMessage = ( - self._check_storage_group( - conn, maskingViewDict, defaultStorageGroupInstanceName)) - if errorMessage: - return None, storageGroupInstanceName, errorMessage + # get or create parent sg + error_message = self._get_or_create_storage_group( + serial_number, masking_view_dict, parent_sg_name, extra_specs, + parent=True) + if error_message: + return error_message - portGroupInstanceName, errorMessage = ( - self._check_port_group(conn, controllerConfigService, - pgGroupName)) - if errorMessage: - return None, storageGroupInstanceName, errorMessage + # get or create child sg + error_message = self._get_or_create_storage_group( + serial_number, masking_view_dict, storagegroup_name, extra_specs) + if error_message: + return error_message - initiatorGroupInstanceName, errorMessage = ( - self._check_initiator_group(conn, controllerConfigService, - igGroupName, connector, - storageSystemName, extraSpecs)) - if errorMessage: - return None, storageGroupInstanceName, errorMessage + __, error_message = self._check_port_group( + serial_number, port_group_name) + if error_message: + return error_message + + init_group_name, error_message = (self._get_or_create_initiator_group( + serial_number, init_group_name, connector, extra_specs)) + if error_message: + return error_message # Only after the components of the MV have been validated, # add the volume to the storage group and recheck that it # has been successfully added. This is necessary before # creating a new masking view. - errorMessage = self._check_adding_volume_to_storage_group( - conn, maskingViewDict, storageGroupInstanceName) - if errorMessage: - return None, storageGroupInstanceName, errorMessage + error_message = self._check_adding_volume_to_storage_group( + serial_number, masking_view_dict[utils.DEVICE_ID], + storagegroup_name, masking_view_dict[utils.VOL_NAME], + masking_view_dict[utils.EXTRA_SPECS]) + if error_message: + return error_message - maskingViewInstanceName, errorMessage = ( - self._check_masking_view( - conn, controllerConfigService, - maskingViewName, storageGroupInstanceName, - portGroupInstanceName, initiatorGroupInstanceName, - extraSpecs)) + error_message = self._check_add_child_sg_to_parent_sg( + serial_number, storagegroup_name, parent_sg_name, + masking_view_dict[utils.EXTRA_SPECS]) + if error_message: + return error_message - return maskingViewInstanceName, storageGroupInstanceName, errorMessage + error_message = (self.create_masking_view( + serial_number, maskingview_name, parent_sg_name, + port_group_name, init_group_name, extra_specs)) - def _validate_existing_masking_view(self, - conn, maskingViewDict, - maskingViewInstanceName, extraSpecs): + return error_message + + def _validate_existing_masking_view(self, serial_number, masking_view_dict, + maskingview_name, extra_specs): """Validate the components of an existing masking view. - :param conn: the ecom connection - :param maskingViewDict: the masking view dictionary - :param maskingViewInstanceName: the masking view instance name - :param extraSpecs: extra specification - :returns: storageGroupInstanceName - :returns: string -- errorMessage + :param serial_number: the array serial number + :param masking_view_dict: the masking view dict + :param maskingview_name: the amsking view name + :param extra_specs: the extra specifications + :return: storage_group_name -- string, msg -- string """ - storageGroupInstanceName = None - controllerConfigService = maskingViewDict['controllerConfigService'] - igGroupName = maskingViewDict['igGroupName'] - connector = maskingViewDict['connector'] - storageSystemName = maskingViewDict['storageSystemName'] - maskingViewName = maskingViewDict['maskingViewName'] - checkInitiator = maskingViewDict['initiatorCheck'] + storage_group_name, msg = self._check_existing_storage_group( + serial_number, maskingview_name, masking_view_dict) + if not msg: + portgroup_name = self.rest.get_element_from_masking_view( + serial_number, maskingview_name, portgroup=True) + __, msg = self._check_port_group( + serial_number, portgroup_name) + if not msg: + initiator_group, msg = self._check_existing_initiator_group( + serial_number, maskingview_name, masking_view_dict, + storage_group_name, portgroup_name, extra_specs) - # First verify that the initiator group matches the initiators. - if checkInitiator: - errorMessage = self._check_existing_initiator_group( - conn, controllerConfigService, maskingViewName, - connector, storageSystemName, igGroupName, extraSpecs) + return storage_group_name, msg - if errorMessage: - return storageGroupInstanceName, errorMessage + def _check_add_child_sg_to_parent_sg( + self, serial_number, child_sg_name, parent_sg_name, extra_specs): + """Check adding a child storage group to a parent storage group. - # Get the storage group from masking view - storageGroupInstanceName, errorMessage = ( - self._check_existing_storage_group(conn, maskingViewInstanceName)) - - return storageGroupInstanceName, errorMessage - - def _check_storage_group(self, conn, - maskingViewDict, storageGroupInstanceName): - """Get the storage group and return it. - - :param conn: the ecom connection - :param maskingViewDict: the masking view dictionary - :param storageGroupInstanceName: default storage group instance name - :returns: storageGroupInstanceName - :returns: string -- msg, the error message + :param serial_number: the array serial number + :param child_sg_name: the name of the child storage group + :param parent_sg_name: the name of the aprent storage group + :param extra_specs: the extra specifications + :return: error_message or None """ msg = None - storageGroupInstanceName = ( - self._get_storage_group_instance_name( - conn, maskingViewDict, storageGroupInstanceName)) - if storageGroupInstanceName is None: - # This may be used in exception hence the use of _. - msg = (_( - "Cannot get or create a storage group: %(sgGroupName)s" - " for volume %(volumeName)s ") % - {'sgGroupName': maskingViewDict['sgGroupName'], - 'volumeName': maskingViewDict['volumeName']}) + if self.rest.is_child_sg_in_parent_sg( + serial_number, child_sg_name, parent_sg_name): + LOG.info("Child sg: %(child_sg)s is already part " + "of parent storage group %(parent_sg)s.", + {'child_sg': child_sg_name, + 'parent_sg': parent_sg_name}) + else: + try: + self.add_child_sg_to_parent_sg( + serial_number, child_sg_name, parent_sg_name, extra_specs) + except Exception as e: + msg = ("Exception adding child sg %(child_sg)s to " + "%(parent_sg)s. Exception received was %(e)s" + % {'child_sg': child_sg_name, + 'parent_sg': parent_sg_name, + 'e': six.text_type(e)}) + LOG.error(msg) + return msg + + def add_child_sg_to_parent_sg( + self, serial_number, child_sg_name, parent_sg_name, extra_specs): + """Add a child storage group to a parent storage group. + + :param serial_number: the array serial number + :param child_sg_name: the name of the child storage group + :param parent_sg_name: the name of the aprent storage group + :param extra_specs: the extra specifications + """ + start_time = time.time() + + @coordination.synchronized("emc-sg-{child_sg}") + @coordination.synchronized("emc-sg-{parent_sg}") + def do_add_sg_to_sg(child_sg, parent_sg): + # Check if another process has added the child to the + # parent sg while this process was waiting for the lock + if self.rest.is_child_sg_in_parent_sg( + serial_number, child_sg_name, parent_sg_name): + pass + else: + self.rest.add_child_sg_to_parent_sg( + serial_number, child_sg, parent_sg, extra_specs) + + do_add_sg_to_sg(child_sg_name, parent_sg_name) + + LOG.debug("Add child to storagegroup took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(start_time, + time.time())}) + LOG.info("Added child sg: %(child_name)s to parent storage " + "group %(parent_name)s.", + {'child_name': child_sg_name, 'parent_name': parent_sg_name}) + + def _get_or_create_storage_group( + self, serial_number, masking_view_dict, storagegroup_name, + extra_specs, parent=False): + """Get or create a storage group for a masking view. + + :param serial_number: the array serial number + :param masking_view_dict: the masking view dict + :param storagegroup_name: the storage group name + :param extra_specs: the extra specifications + :param parent: flag to indicate if this a parent storage group + :return: msg -- string or None + """ + msg = None + srp = extra_specs[utils.SRP] + workload = extra_specs[utils.WORKLOAD] + if parent: + slo = None + else: + slo = extra_specs[utils.SLO] + storagegroup = self.rest.get_storage_group( + serial_number, storagegroup_name) + if storagegroup is None: + storagegroup = self.provision.create_storage_group( + serial_number, storagegroup_name, srp, slo, workload, + extra_specs) + + if storagegroup is None: + msg = ("Cannot get or create a storage group: " + "%(storagegroup_name)s for volume %(volume_name)s." + % {'storagegroup_name': storagegroup_name, + 'volume_name': masking_view_dict[utils.VOL_NAME]}) LOG.error(msg) - return storageGroupInstanceName, msg + return msg def _check_existing_storage_group( - self, conn, maskingViewInstanceName): - """Check that we can get the existing storage group. + self, serial_number, maskingview_name, masking_view_dict): + """Check if the masking view has the child storage group. - :param conn: the ecom connection - :param maskingViewInstanceName: the masking view instance name - :returns: storageGroupInstanceName + Get the parent storage group associated with a masking view and check + if the required child storage group is already a member. If not, get + or create the child storage group. + :param serial_number: the array serial number + :param maskingview_name: the masking view name + :param masking_view_dict: the masking view dict + :return: storage group name, msg + """ + msg = None + child_sg_name = masking_view_dict[utils.SG_NAME] + + sg_from_mv = self.rest.get_element_from_masking_view( + serial_number, maskingview_name, storagegroup=True) + + storagegroup = self.rest.get_storage_group(serial_number, sg_from_mv) + + if not storagegroup: + msg = ("Cannot get storage group: %(sg_from_mv)s " + "from masking view %(masking_view)s." + % {'sg_from_mv': sg_from_mv, + 'masking_view': maskingview_name}) + LOG.error(msg) + else: + check_child = self.rest.is_child_sg_in_parent_sg( + serial_number, child_sg_name, sg_from_mv) + child_sg = self.rest.get_storage_group( + serial_number, child_sg_name) + # Ensure the child sg can be retrieved + if check_child and not child_sg: + msg = ("Cannot get child storage group: %(sg_name)s " + "but it is listed as child of %(parent_sg)s" + % {'sg_name': child_sg_name, 'parent_sg': sg_from_mv}) + LOG.error(msg) + elif check_child and child_sg: + LOG.info("Retrieved child sg %(sg_name)s from %(mv_name)s", + {'sg_name': child_sg_name, + 'mv_name': maskingview_name}) + else: + msg = self._get_or_create_storage_group( + serial_number, masking_view_dict, child_sg_name, + masking_view_dict[utils.EXTRA_SPECS]) + if not msg: + msg = self._check_adding_volume_to_storage_group( + serial_number, masking_view_dict[utils.DEVICE_ID], + child_sg_name, masking_view_dict[utils.VOL_NAME], + masking_view_dict[utils.EXTRA_SPECS]) + if not msg and not check_child: + msg = self._check_add_child_sg_to_parent_sg( + serial_number, child_sg_name, sg_from_mv, + masking_view_dict[utils.EXTRA_SPECS]) + + return child_sg_name, msg + + def _check_port_group(self, serial_number, portgroup_name): + """Check that you can get a port group. + + :param serial_number: the array serial number + :param portgroup_name: the port group name :returns: string -- msg, the error message """ msg = None - sgFromMvInstanceName = ( - self._get_storage_group_from_masking_view_instance( - conn, maskingViewInstanceName)) - - if sgFromMvInstanceName is None: - # This may be used in exception hence the use of _. - msg = (_( - "Cannot get storage group from masking view " - "%(maskingViewInstanceName)s. ") % - {'maskingViewInstanceName': maskingViewInstanceName}) + portgroup = self.rest.get_portgroup(serial_number, portgroup_name) + if portgroup is None: + msg = ("Cannot get port group: %(portgroup)s from the array " + "%(array)s. Portgroups must be pre-configured - please " + "check the array." + % {'portgroup': portgroup_name, 'array': serial_number}) LOG.error(msg) - return sgFromMvInstanceName, msg + return portgroup_name, msg - def _check_port_group(self, conn, - controllerConfigService, pgGroupName): - """Check that you can either get or create a port group. + def _get_or_create_initiator_group( + self, serial_number, init_group_name, connector, extra_specs): + """Retrieve or create an initiator group. - :param conn: the ecom connection - :param controllerConfigService: controller configuration service - :param pgGroupName: the port group Name - :returns: portGroupInstanceName - :returns: string -- msg, the error message - """ - msg = None - portGroupInstanceName = self._get_port_group_instance_name( - conn, controllerConfigService, pgGroupName) - if portGroupInstanceName is None: - # This may be used in exception hence the use of _. - msg = (_( - "Cannot get port group: %(pgGroupName)s. ") % - {'pgGroupName': pgGroupName}) - LOG.error(msg) - - return portGroupInstanceName, msg - - def _check_initiator_group( - self, conn, controllerConfigService, igGroupName, - connector, storageSystemName, extraSpecs): - """Check that initiator group can be either retrieved or created. - - :param conn: the ecom connection - :param controllerConfigService: controller configuration service - :param igGroupName: the initiator group Name + :param serial_number: the array serial number + :param init_group_name: the name of the initiator group :param connector: the connector object - :param storageSystemName: the storage system name - :param extraSpecs: extra specifications - :returns: initiatorGroupInstanceName - :returns: string -- the error message + :param extra_specs: the extra specifications + :return: name of the initiator group -- string, msg """ msg = None - initiatorGroupInstanceName = ( - self._get_initiator_group_instance_name( - conn, controllerConfigService, igGroupName, connector, - storageSystemName, extraSpecs)) - if initiatorGroupInstanceName is None: - # This may be used in exception hence the use of _. - msg = (_( - "Cannot get or create initiator group: " - "%(igGroupName)s. ") % - {'igGroupName': igGroupName}) + initiator_names = self.find_initiator_names(connector) + LOG.debug("The initiator name(s) are: %(initiatorNames)s.", + {'initiatorNames': initiator_names}) + + found_init_group = self._find_initiator_group( + serial_number, initiator_names) + + # If you cannot find an initiator group that matches the connector + # info, create a new initiator group. + if found_init_group is None: + found_init_group = self._create_initiator_group( + serial_number, init_group_name, initiator_names, extra_specs) + LOG.info("Created new initiator group name: %(init_group_name)s.", + {'init_group_name': init_group_name}) + else: + LOG.info("Using existing initiator group name: " + "%(init_group_name)s.", + {'init_group_name': found_init_group}) + + if found_init_group is None: + msg = ("Cannot get or create initiator group: " + "%(init_group_name)s. " + % {'init_group_name': init_group_name}) LOG.error(msg) - return initiatorGroupInstanceName, msg + return found_init_group, msg def _check_existing_initiator_group( - self, conn, controllerConfigService, maskingViewName, - connector, storageSystemName, igGroupName, extraSpecs): - """Check that existing initiator group in the masking view. + self, serial_number, maskingview_name, masking_view_dict, + storagegroup_name, portgroup_name, extra_specs): + """Checks an existing initiator group in the masking view. Check if the initiators in the initiator group match those in the system. - - :param conn: the ecom connection - :param controllerConfigService: controller configuration service - :param maskingViewName: the masking view name - :param connector: the connector object - :param storageSystemName: the storage system name - :param igGroupName: the initiator group name - :param extraSpecs: extra specification - :returns: string -- msg, the error message + :param serial_number: the array serial number + :param maskingview_name: name of the masking view + :param masking_view_dict: masking view dict + :param storagegroup_name: the storage group name + :param portgroup_name: the port group name + :param extra_specs: the extra specifications + :returns: ig_from_mv, msg """ msg = None - if not self._verify_initiator_group_from_masking_view( - conn, controllerConfigService, maskingViewName, - connector, storageSystemName, igGroupName, - extraSpecs): - # This may be used in exception hence the use of _. - msg = (_( - "Unable to verify initiator group: %(igGroupName)s " - "in masking view %(maskingViewName)s. ") % - {'igGroupName': igGroupName, - 'maskingViewName': maskingViewName}) - LOG.error(msg) - return msg + ig_from_mv = self.rest.get_element_from_masking_view( + serial_number, maskingview_name, host=True) + check_ig = masking_view_dict[utils.INITIATOR_CHECK] - def _check_masking_view( - self, conn, controllerConfigService, - maskingViewName, storageGroupInstanceName, - portGroupInstanceName, initiatorGroupInstanceName, extraSpecs): - """Check that masking view can be either got or created. - - :param conn: the ecom connection - :param controllerConfigService: controller configuration service - :param maskingViewName: the masking view name - :param storageGroupInstanceName: storage group instance name - :param portGroupInstanceName: port group instance name - :param initiatorGroupInstanceName: the initiator group instance name - :param extraSpecs: extra specifications - :returns: maskingViewInstanceName - :returns: string -- msg, the error message - """ - msg = None - maskingViewInstanceName = ( - self._get_masking_view_instance_name( - conn, controllerConfigService, maskingViewName, - storageGroupInstanceName, portGroupInstanceName, - initiatorGroupInstanceName, extraSpecs)) - if maskingViewInstanceName is None: - # This may be used in exception hence the use of _. - msg = (_( - "Cannot create masking view: %(maskingViewName)s. ") % - {'maskingViewName': maskingViewName}) - LOG.error(msg) - - return maskingViewInstanceName, msg + if check_ig: + # First verify that the initiator group matches the initiators. + check, found_ig = self._verify_initiator_group_from_masking_view( + serial_number, maskingview_name, masking_view_dict, ig_from_mv, + storagegroup_name, portgroup_name, extra_specs) + if not check: + msg = ("Unable to verify initiator group: %(ig_name)s " + "in masking view %(maskingview_name)s." + % {'ig_name': ig_from_mv, + 'maskingview_name': maskingview_name}) + LOG.error(msg) + return ig_from_mv, msg def _check_adding_volume_to_storage_group( - self, conn, maskingViewDict, storageGroupInstanceName): - """Add the volume to the storage group and double check it is there. + self, serial_number, device_id, storagegroup_name, + volume_name, extra_specs): + """Check if a volume is part of an sg and add it if not. - :param conn: the ecom connection - :param maskingViewDict: the masking view dictionary - :param storageGroupInstanceName: storage group instance name - :returns: string -- the error message - """ - controllerConfigService = maskingViewDict['controllerConfigService'] - sgGroupName = maskingViewDict['sgGroupName'] - volumeInstance = maskingViewDict['volumeInstance'] - volumeName = maskingViewDict['volumeName'] - msg = None - if self._is_volume_in_storage_group( - conn, storageGroupInstanceName, - volumeInstance, sgGroupName): - LOG.warning( - "Volume: %(volumeName)s is already part " - "of storage group %(sgGroupName)s.", - {'volumeName': volumeName, - 'sgGroupName': sgGroupName}) - else: - msg = self._add_volume_to_sg_and_verify( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, sgGroupName, - maskingViewDict['extraSpecs']) - - return msg - - def _add_volume_to_sg_and_verify( - self, conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, sgGroupName, extraSpecs): - """Add the volume to the storage group and double check it is there. - - :param conn: the ecom connection - :param controllerConfigService: controller service - :param storageGroupInstanceName: storage group instance name - :param volumeInstance: the volume instance - :param volumeName: the volume name - :param sgGroupName: the storage group name - :param extraSpecs: the extra specifications - :returns: string -- the error message + :param serial_number: the array serial number + :param device_id: the device id + :param storagegroup_name: the storage group name + :param volume_name: volume name + :param extra_specs: extra specifications + :return: msg """ msg = None - self.add_volume_to_storage_group( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, sgGroupName, extraSpecs) - if not self._is_volume_in_storage_group( - conn, storageGroupInstanceName, volumeInstance, sgGroupName): - # This may be used in exception hence the use of _. - msg = (_( - "Volume: %(volumeName)s was not added " - "to storage group %(sgGroupName)s.") % - {'volumeName': volumeName, - 'sgGroupName': sgGroupName}) - LOG.error(msg) + if self.rest.is_volume_in_storagegroup( + serial_number, device_id, storagegroup_name): + LOG.info("Volume: %(volume_name)s is already part " + "of storage group %(sg_name)s.", + {'volume_name': volume_name, + 'sg_name': storagegroup_name}) else: - LOG.info("Successfully added %(volumeName)s to %(sgGroupName)s.", - {'volumeName': volumeName, - 'sgGroupName': sgGroupName}) + try: + self.add_volume_to_storage_group( + serial_number, device_id, storagegroup_name, + volume_name, extra_specs) + except Exception as e: + msg = ("Exception adding volume %(vol)s to %(sg)s. " + "Exception received was %(e)s." + % {'vol': volume_name, 'sg': storagegroup_name, + 'e': six.text_type(e)}) + LOG.error(msg) return msg - def _get_and_remove_from_storage_group_v2( - self, conn, controllerConfigService, volumeInstanceName, - volumeName, fastPolicyName, extraSpecs): - """Get the storage group and remove volume from it. + def add_volume_to_storage_group( + self, serial_number, device_id, storagegroup_name, + volume_name, extra_specs): + """Add a volume to a storage group. - :param conn: the ecom connection - :param controllerConfigService: controller configuration service - :param volumeInstanceName: volume instance name - :param volumeName: volume name - :param fastPolicyName: fast name - :param extraSpecs: additional info - :returns: defaultStorageGroupInstanceName + :param serial_number: array serial number + :param device_id: volume device id + :param storagegroup_name: storage group name + :param volume_name: volume name + :param extra_specs: extra specifications + """ + start_time = time.time() + + @coordination.synchronized("emc-sg-{sg_name}") + def do_add_volume_to_sg(sg_name): + # Check if another process has added the volume to the + # sg while this process was waiting for the lock + if self.rest.is_volume_in_storagegroup( + serial_number, device_id, storagegroup_name): + LOG.info("Volume: %(volume_name)s is already part " + "of storage group %(sg_name)s.", + {'volume_name': volume_name, + 'sg_name': storagegroup_name}) + else: + self.rest.add_vol_to_sg(serial_number, sg_name, + device_id, extra_specs) + do_add_volume_to_sg(storagegroup_name) + + LOG.debug("Add volume to storagegroup took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(start_time, + time.time())}) + LOG.info("Added volume: %(vol_name)s to storage group %(sg_name)s.", + {'vol_name': volume_name, 'sg_name': storagegroup_name}) + + def _remove_vol_from_storage_group( + self, serial_number, device_id, storagegroup_name, + volume_name, extra_specs): + """Remove a volume from a storage group. + + :param serial_number: the array serial number + :param device_id: the volume device id + :param storagegroup_name: the name of the storage group + :param volume_name: the volume name + :param extra_specs: the extra specifications :raises: VolumeBackendAPIException """ - defaultStorageGroupInstanceName = ( - self.fast.get_and_verify_default_storage_group( - conn, controllerConfigService, volumeInstanceName, - volumeName, fastPolicyName)) - if defaultStorageGroupInstanceName is None: - exceptionMessage = (_( - "Cannot get the default storage group for FAST policy: " - "%(fastPolicyName)s.") - % {'fastPolicyName': fastPolicyName}) - LOG.error(exceptionMessage) + start_time = time.time() + + self.rest.remove_vol_from_sg( + serial_number, storagegroup_name, device_id, extra_specs) + + LOG.debug("Remove volume from storagegroup took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(start_time, + time.time())}) + + check_vol = (self.rest.is_volume_in_storagegroup( + serial_number, device_id, storagegroup_name)) + if check_vol: + exception_message = (_( + "Failed to remove volume %(vol)s from SG: %(sg_name)s.") + % {'vol': volume_name, 'sg_name': storagegroup_name}) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( - data=exceptionMessage) + data=exception_message) - retStorageGroupInstanceName = ( - self.remove_device_from_default_storage_group( - conn, controllerConfigService, volumeInstanceName, - volumeName, fastPolicyName, extraSpecs)) - if retStorageGroupInstanceName is None: - exceptionMessage = (_( - "Failed to remove volume %(volumeName)s from default SG.") - % {'volumeName': volumeName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - return defaultStorageGroupInstanceName - - def _get_and_remove_from_storage_group_v3( - self, conn, controllerConfigService, volumeInstanceName, - volumeName, maskingViewDict, storageGroupInstanceName): - """Get the storage group and remove volume from it. - - :param conn: the ecom connection - :param controllerConfigService: controller configuration service - :param volumeInstanceName: volume instance name - :param volumeName: volume name - :param maskingViewDict: the masking view dictionary - :param storageGroupInstanceName: storage group instance name - :raises VolumeBackendAPIException: - """ - volInstance = conn.GetInstance(volumeInstanceName, LocalOnly=False) - - self._remove_volume_from_sg( - conn, controllerConfigService, storageGroupInstanceName, - volInstance, maskingViewDict['extraSpecs']) - - # Required for unit tests. - emptyStorageGroupInstanceName = ( - self._wrap_get_storage_group_from_volume( - conn, volumeInstanceName, maskingViewDict['sgGroupName'])) - - if emptyStorageGroupInstanceName is not None: - exceptionMessage = (_( - "Failed to remove volume %(volumeName)s from default SG: " - "%(volumeName)s.") - % {'volumeName': volumeName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - def _is_volume_in_storage_group( - self, conn, storageGroupInstanceName, volumeInstance, sgName): - """Check if the volume is already part of the storage group. - - Check if the volume is already part of the storage group, - if it is no need to re-add it. - - :param conn: the connection to ecom - :param storageGroupInstanceName: the storage group instance name - :param volumeInstance: the volume instance - :param sgName: the storage group name - :returns: boolean - """ - foundStorageGroupInstanceName = ( - self.utils.get_storage_group_from_volume( - conn, volumeInstance.path, sgName)) - - if foundStorageGroupInstanceName is not None: - storageGroupInstance = conn.GetInstance( - storageGroupInstanceName, LocalOnly=False) - LOG.debug( - "The existing storage group instance element name is: " - "%(existingElement)s.", - {'existingElement': storageGroupInstance['ElementName']}) - foundStorageGroupInstance = conn.GetInstance( - foundStorageGroupInstanceName, LocalOnly=False) - LOG.debug( - "The found storage group instance element name is: " - "%(foundElement)s.", - {'foundElement': foundStorageGroupInstance['ElementName']}) - if (foundStorageGroupInstance['ElementName'] == ( - storageGroupInstance['ElementName'])): - return True - - return False - - def _find_masking_view(self, conn, maskingViewName, storageSystemName): - """Given the masking view name get the masking view instance. - - :param conn: connection to the ecom server - :param maskingViewName: the masking view name - :param storageSystemName: the storage system name(String) - :returns: dict -- foundMaskingViewInstanceName - """ - foundMaskingViewInstanceName = None - - storageSystemInstanceName = self.utils.find_storageSystem( - conn, storageSystemName) - maskingViewInstances = conn.Associators( - storageSystemInstanceName, - ResultClass='EMC_LunMaskingSCSIProtocolController') - - for maskingViewInstance in maskingViewInstances: - if maskingViewName == maskingViewInstance['ElementName']: - foundMaskingViewInstanceName = maskingViewInstance.path - break - - if foundMaskingViewInstanceName is not None: - # Now check that is has not been deleted. - instance = self.utils.get_existing_instance( - conn, foundMaskingViewInstanceName) - if instance is None: - foundMaskingViewInstanceName = None - LOG.error( - "Looks like masking view: %(maskingViewName)s " - "has recently been deleted.", - {'maskingViewName': maskingViewName}) - else: - LOG.debug( - "Found existing masking view: %(maskingViewName)s.", - {'maskingViewName': maskingViewName}) - - return foundMaskingViewInstanceName - - def _create_storage_group( - self, conn, maskingViewDict, defaultStorageGroupInstanceName): - """Create a new storage group that doesn't already exist. - - If fastPolicyName is not none we attempt to remove it from the - default storage group of that policy and associate to the new storage - group that will be part of the masking view. - Will not handle any exception in this method it will be handled - up the stack. - - :param conn: connection to the ecom server - :param maskingViewDict: the masking view dictionary - :param defaultStorageGroupInstanceName: the default storage group - instance name (Can be None) - :returns: foundStorageGroupInstanceName the instance Name of the - storage group - """ - failedRet = None - controllerConfigService = maskingViewDict['controllerConfigService'] - storageGroupName = maskingViewDict['sgGroupName'] - isV3 = maskingViewDict['isV3'] - - if isV3: - workload = maskingViewDict['workload'] - pool = maskingViewDict['pool'] - slo = maskingViewDict['slo'] - foundStorageGroupInstanceName = ( - self.provisionv3.create_storage_group_v3( - conn, controllerConfigService, storageGroupName, - pool, slo, workload, maskingViewDict['extraSpecs'], - maskingViewDict['isCompressionDisabled'])) - else: - fastPolicyName = maskingViewDict['fastPolicy'] - volumeInstance = maskingViewDict['volumeInstance'] - foundStorageGroupInstanceName = ( - self.provision.create_and_get_storage_group( - conn, controllerConfigService, storageGroupName, - volumeInstance.path, maskingViewDict['extraSpecs'])) - if (fastPolicyName is not None and - defaultStorageGroupInstanceName is not None): - assocTierPolicyInstanceName = ( - self.fast.add_storage_group_and_verify_tier_policy_assoc( - conn, controllerConfigService, - foundStorageGroupInstanceName, - storageGroupName, fastPolicyName, - maskingViewDict['extraSpecs'])) - if assocTierPolicyInstanceName is None: - LOG.error( - "Cannot add and verify tier policy association for " - "storage group : %(storageGroupName)s to " - "FAST policy : %(fastPolicyName)s.", - {'storageGroupName': storageGroupName, - 'fastPolicyName': fastPolicyName}) - return failedRet - if foundStorageGroupInstanceName is None: - LOG.error( - "Cannot get storage Group from job : %(storageGroupName)s.", - {'storageGroupName': storageGroupName}) - return failedRet - else: - LOG.info( - "Created new storage group: %(storageGroupName)s.", - {'storageGroupName': storageGroupName}) - - return foundStorageGroupInstanceName - - def find_port_group(self, conn, controllerConfigService, portGroupName): - """Given the port Group name get the port group instance name. - - :param conn: connection to the ecom server - :param controllerConfigService: the controller configuration service - :param portGroupName: the name of the port group you are getting - :returns: foundPortGroupInstanceName - """ - foundPortGroupInstanceName = None - portMaskingGroupInstances = conn.Associators( - controllerConfigService, ResultClass='CIM_TargetMaskingGroup') - - for portMaskingGroupInstance in portMaskingGroupInstances: - if portGroupName == portMaskingGroupInstance['ElementName']: - # Check to see if it has been recently deleted. - instance = self.utils.get_existing_instance( - conn, portMaskingGroupInstance.path) - if instance is None: - foundPortGroupInstanceName = None - else: - foundPortGroupInstanceName = instance.path - break - - if foundPortGroupInstanceName is None: - LOG.error( - "Could not find port group : %(portGroupName)s. Check that " - "the EMC configuration file has the correct port group name.", - {'portGroupName': portGroupName}) - - return foundPortGroupInstanceName - - def _create_or_get_initiator_group( - self, conn, controllerConfigService, igGroupName, - connector, storageSystemName, extraSpecs): - """Attempt to create an initiatorGroup. - - If one already exists with the same Initiator/wwns then get it. - Check to see if an initiatorGroup already exists, that matches the - connector information. - NOTE: An initiator/wwn can only belong to one initiatorGroup. - If we were to attempt to create one with an initiator/wwn that - is already belong to another initiatorGroup, it would fail. - - :param conn: connection to the ecom server - :param controllerConfigService: the controller config Servicer - :param igGroupName: the proposed name of the initiator group - :param connector: the connector information to the host - :param storageSystemName: the storage system name (String) - :param extraSpecs: extra specifications - :returns: foundInitiatorGroupInstanceName - """ - initiatorNames = self._find_initiator_names(conn, connector) - LOG.debug("The initiator name(s) are: %(initiatorNames)s.", - {'initiatorNames': initiatorNames}) - - foundInitiatorGroupInstanceName = self._find_initiator_masking_group( - conn, controllerConfigService, initiatorNames) - - # If you cannot find an initiatorGroup that matches the connector - # info create a new initiatorGroup. - if foundInitiatorGroupInstanceName is None: - # Check that our connector information matches the - # hardwareId(s) on the vmax. - storageHardwareIDInstanceNames = ( - self._get_storage_hardware_id_instance_names( - conn, initiatorNames, storageSystemName)) - if not storageHardwareIDInstanceNames: - LOG.info( - "Initiator Name(s) %(initiatorNames)s are not on array " - "%(storageSystemName)s.", - {'initiatorNames': initiatorNames, - 'storageSystemName': storageSystemName}) - storageHardwareIDInstanceNames = ( - self._create_hardware_ids(conn, initiatorNames, - storageSystemName)) - if not storageHardwareIDInstanceNames: - msg = (_("Failed to create hardware id(s) on " - "%(storageSystemName)s.") - % {'storageSystemName': storageSystemName}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - - foundInitiatorGroupInstanceName = self._create_initiator_Group( - conn, controllerConfigService, igGroupName, - storageHardwareIDInstanceNames, extraSpecs) - - LOG.info("Created new initiator group name: %(igGroupName)s.", - {'igGroupName': igGroupName}) - else: - initiatorGroupInstance = conn.GetInstance( - foundInitiatorGroupInstanceName, LocalOnly=False) - LOG.info("Using existing initiator group name: %(igGroupName)s.", - {'igGroupName': initiatorGroupInstance['ElementName']}) - - return foundInitiatorGroupInstanceName - - def _find_initiator_names(self, conn, connector): + def find_initiator_names(self, connector): """Check the connector object for initiators(ISCSI) or wwpns(FC). - :param conn: the connection to the ecom :param connector: the connector object :returns: list -- list of found initiator names - :raises VolumeBackendAPIException: + :raises: VolumeBackendAPIException """ foundinitiatornames = [] name = 'initiator name' - if (self.protocol.lower() == ISCSI and connector['initiator']): + if self.protocol.lower() == utils.ISCSI and connector['initiator']: foundinitiatornames.append(connector['initiator']) - elif self.protocol.lower() == FC: - if ('wwpns' in connector and connector['wwpns']): + elif self.protocol.lower() == utils.FC: + if 'wwpns' in connector and connector['wwpns']: for wwn in connector['wwpns']: foundinitiatornames.append(wwn) name = 'world wide port names' @@ -935,9 +631,8 @@ class VMAXMasking(object): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - if (foundinitiatornames is None or len(foundinitiatornames) == 0): - msg = (_("Error finding %(name)s.") - % {'name': name}) + if not foundinitiatornames: + msg = (_("Error finding %(name)s.") % {'name': name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @@ -947,1912 +642,673 @@ class VMAXMasking(object): return foundinitiatornames - def _find_initiator_masking_group( - self, conn, controllerConfigService, initiatorNames): - """Check to see if an initiatorGroup already exists. + def _find_initiator_group(self, serial_number, initiator_names): + """Check to see if an initiator group already exists. - NOTE: An initiator/wwn can only belong to one initiatorGroup. + NOTE: An initiator/wwn can only belong to one initiator group. If we were to attempt to create one with an initiator/wwn that is - already belong to another initiatorGroup, it would fail. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller configuration service - :param initiatorNames: the list of initiator names - :returns: foundInitiatorMaskingGroup + already belonging to another initiator group, it would fail. + :param serial_number: the array serial number + :param initiator_names: the list of initiator names + :returns: initiator group name -- string or None """ - foundInitiatorMaskingGroupInstanceName = None - - initiatorMaskingGroupInstanceNames = ( - conn.AssociatorNames(controllerConfigService, - ResultClass='CIM_InitiatorMaskingGroup')) - - for initiatorMaskingGroupInstanceName in ( - initiatorMaskingGroupInstanceNames): - # Check that it hasn't been deleted. If it has, break out - # of the for loop. - instance = self.utils.get_existing_instance( - conn, initiatorMaskingGroupInstanceName) - if instance is None: - # MaskingGroup doesn't exist any more. + ig_name = None + init_list = self.rest.get_in_use_initiator_list_from_array( + serial_number) + for initiator in initiator_names: + found_init = [init for init in init_list if initiator in init] + if found_init: + ig_name = self.rest.get_initiator_group_from_initiator( + serial_number, found_init[0]) break + return ig_name - storageHardwareIdInstances = ( - conn.Associators(initiatorMaskingGroupInstanceName, - ResultClass='EMC_StorageHardwareID')) - for storageHardwareIdInstance in storageHardwareIdInstances: - # If EMC_StorageHardwareID matches the initiator, - # we found the existing CIM_InitiatorMaskingGroup. - hardwareid = storageHardwareIdInstance['StorageID'] - for initiator in initiatorNames: - if six.text_type(hardwareid).lower() == ( - six.text_type(initiator).lower()): - foundInitiatorMaskingGroupInstanceName = ( - initiatorMaskingGroupInstanceName) - break + def create_masking_view( + self, serial_number, maskingview_name, storagegroup_name, + port_group_name, init_group_name, extra_specs): + """Create a new masking view. - if foundInitiatorMaskingGroupInstanceName is not None: - break - - if foundInitiatorMaskingGroupInstanceName is not None: - break - return foundInitiatorMaskingGroupInstanceName - - def _get_storage_hardware_id_instance_names( - self, conn, initiatorNames, storageSystemName): - """Given a list of initiator names find CIM_StorageHardwareID instance. - - :param conn: the connection to the ecom server - :param initiatorNames: the list of initiator names - :param storageSystemName: the storage system name - :returns: list -- foundHardwardIDsInstanceNames + :param serial_number: the array serial number + :param maskingview_name: the masking view name + :param storagegroup_name: the storage group name + :param port_group_name: the port group + :param init_group_name: the initiator group + :param extra_specs: extra specifications + :return: error_message -- string or None """ - foundHardwardIDsInstanceNames = [] + error_message = None + try: + self.rest.create_masking_view( + serial_number, maskingview_name, storagegroup_name, + port_group_name, init_group_name, extra_specs) - hardwareIdManagementService = ( - self.utils.find_storage_hardwareid_service( - conn, storageSystemName)) + except Exception as e: + error_message = ("Error creating new masking view. Exception " + "received: %(e)s" % {'e': six.text_type(e)}) + return error_message - hardwareIdInstances = ( - self.utils.get_hardware_id_instances_from_array( - conn, hardwareIdManagementService)) - - for hardwareIdInstance in hardwareIdInstances: - storageId = hardwareIdInstance['StorageID'] - for initiatorName in initiatorNames: - if storageId.lower() == initiatorName.lower(): - # Check that the found hardwareId has been deleted. - # If it has, we don't want to add it to the list. - instance = self.utils.get_existing_instance( - conn, hardwareIdInstance.path) - if instance is None: - # HardwareId doesn't exist. Skip it. - break - - foundHardwardIDsInstanceNames.append( - hardwareIdInstance.path) - break - - LOG.debug( - "The found hardware IDs are : %(foundHardwardIDsInstanceNames)s.", - {'foundHardwardIDsInstanceNames': foundHardwardIDsInstanceNames}) - - return foundHardwardIDsInstanceNames - - def _get_initiator_group_from_job(self, conn, job): - """After creating an new initiator group find it and return it. - - :param conn: the connection to the ecom server - :param job: the create initiator group job - :returns: dict -- initiatorDict - """ - associators = conn.Associators( - job['Job'], - ResultClass='CIM_InitiatorMaskingGroup') - volpath = associators[0].path - initiatorDict = {} - initiatorDict['classname'] = volpath.classname - keys = {} - keys['CreationClassName'] = volpath['CreationClassName'] - keys['SystemName'] = volpath['SystemName'] - keys['DeviceID'] = volpath['DeviceID'] - keys['SystemCreationClassName'] = volpath['SystemCreationClassName'] - initiatorDict['keybindings'] = keys - return initiatorDict - - def _create_masking_view( - self, conn, configService, maskingViewName, deviceMaskingGroup, - targetMaskingGroup, initiatorMaskingGroup, extraSpecs): - """After creating an new initiator group find it and return it. - - :param conn: the connection to the ecom server - :param configService: the create initiator group job - :param maskingViewName: the masking view name string - :param deviceMaskingGroup: device(storage) masking group (instanceName) - :param targetMaskingGroup: target(port) masking group (instanceName) - :param initiatorMaskingGroup: initiator masking group (instanceName) - :param extraSpecs: extra specifications - :returns: int -- return code - :returns: dict -- job - :raises VolumeBackendAPIException: - """ - rc, job = conn.InvokeMethod( - 'CreateMaskingView', configService, ElementName=maskingViewName, - InitiatorMaskingGroup=initiatorMaskingGroup, - DeviceMaskingGroup=deviceMaskingGroup, - TargetMaskingGroup=targetMaskingGroup) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Create Masking View: %(groupName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'groupName': maskingViewName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.info("Created new masking view : %(maskingViewName)s.", - {'maskingViewName': maskingViewName}) - return rc, job - - def find_new_masking_view(self, conn, jobDict): - """Find the newly created volume. - - :param conn: the connection to the ecom server - :param jobDict: the job dictionary - :returns: dict -- maskingViewInstance - """ - associators = conn.Associators( - jobDict['Job'], - ResultClass='Symm_LunMaskingView') - mvpath = associators[0].path - maskingViewInstance = {} - maskingViewInstance['classname'] = mvpath.classname - keys = {} - keys['CreationClassName'] = mvpath['CreationClassName'] - keys['SystemName'] = mvpath['SystemName'] - keys['DeviceID'] = mvpath['DeviceID'] - keys['SystemCreationClassName'] = mvpath['SystemCreationClassName'] - maskingViewInstance['keybindings'] = keys - return maskingViewInstance - - def _get_storage_group_from_masking_view( - self, conn, maskingViewName, storageSystemName): - """Gets the Device Masking Group from masking view. - - :param conn: the connection to the ecom server - :param maskingViewName: the masking view name (String) - :param storageSystemName: storage system name (String) - :returns: instance name foundStorageGroupInstanceName - """ - foundStorageGroupInstanceName = None - foundView = self._find_masking_view( - conn, maskingViewName, storageSystemName) - if foundView is not None: - foundStorageGroupInstanceName = ( - self._get_storage_group_from_masking_view_instance( - conn, foundView)) - - LOG.debug( - "Masking view: %(view)s DeviceMaskingGroup: %(masking)s.", - {'view': maskingViewName, - 'masking': foundStorageGroupInstanceName}) - else: - LOG.warning("Unable to find Masking view: %(view)s.", - {'view': maskingViewName}) - - return foundStorageGroupInstanceName - - def _get_storage_group_from_masking_view_instance( - self, conn, maskingViewInstance): - """Gets the Device Masking Group from masking view instance. - - :param conn: the connection to the ecom server - :param maskingViewInstance: the masking view instance - :returns: instance name foundStorageGroupInstanceName - """ - foundStorageGroupInstanceName = None - groups = conn.AssociatorNames( - maskingViewInstance, - ResultClass='CIM_DeviceMaskingGroup') - if len(groups) > 0: - foundStorageGroupInstanceName = groups[0] - - return foundStorageGroupInstanceName - - def _get_storage_group_instance_name( - self, conn, maskingViewDict, - defaultStorageGroupInstanceName): - """Gets the storage group instance name. - - If fastPolicy name is None then NON FAST is assumed. - If it is a valid fastPolicy name then associate the new storage - group with the fast policy. - If we are using an existing storage group then we must check that - it is associated with the correct fast policy. - - :param conn: the connection to the ecom server - :param maskingViewDict: the masking view dictionary - :param defaultStorageGroupInstanceName: default storage group instance - name (can be None for Non FAST) - :returns: instance name storageGroupInstanceName - :raises VolumeBackendAPIException: - """ - storageGroupInstanceName = self.utils.find_storage_masking_group( - conn, maskingViewDict['controllerConfigService'], - maskingViewDict['sgGroupName']) - - if storageGroupInstanceName is None: - storageGroupInstanceName = self._create_storage_group( - conn, maskingViewDict, - defaultStorageGroupInstanceName) - if storageGroupInstanceName is None: - errorMessage = (_( - "Cannot create or find an storage group with name " - "%(sgGroupName)s.") - % {'sgGroupName': maskingViewDict['sgGroupName']}) - LOG.error(errorMessage) - raise exception.VolumeBackendAPIException(data=errorMessage) - - return storageGroupInstanceName - - def _get_port_group_instance_name( - self, conn, controllerConfigService, pgGroupName): - """Gets the port group instance name. - - The portGroup name has been defined in the EMC Config file if it - does not exist the operation should fail. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller configuration server - :param pgGroupName: the port group name - :returns: instance name foundPortGroupInstanceName - """ - foundPortGroupInstanceName = self.find_port_group( - conn, controllerConfigService, pgGroupName) - if foundPortGroupInstanceName is None: - LOG.error( - "Cannot find a portGroup with name %(pgGroupName)s. " - "The port group for a masking view must be pre-defined.", - {'pgGroupName': pgGroupName}) - return foundPortGroupInstanceName - - LOG.info( - "Port group instance name is %(foundPortGroupInstanceName)s.", - {'foundPortGroupInstanceName': foundPortGroupInstanceName}) - - return foundPortGroupInstanceName - - def _get_initiator_group_instance_name( - self, conn, controllerConfigService, igGroupName, connector, - storageSystemName, extraSpecs): - """Gets the initiator group instance name. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller configuration server - :param igGroupName: the port group name - :param connector: the connector object - :param storageSystemName: the storage system name - :param extraSpecs: extra specifications - :returns: foundInitiatorGroupInstanceName - """ - foundInitiatorGroupInstanceName = (self._create_or_get_initiator_group( - conn, controllerConfigService, igGroupName, connector, - storageSystemName, extraSpecs)) - if foundInitiatorGroupInstanceName is None: - LOG.error("Cannot create or find an initiator group with " - "name %(igGroupName)s.", - {'igGroupName': igGroupName}) - return foundInitiatorGroupInstanceName - - def _get_masking_view_instance_name( - self, conn, controllerConfigService, maskingViewName, - storageGroupInstanceName, portGroupInstanceName, - initiatorGroupInstanceName, extraSpecs): - """Gets the masking view instance name. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller configuration server - :param maskingViewName: the masking view name (String) - :param storageGroupInstanceName: the storage group instance name - :param portGroupInstanceName: the port group instance name - :param initiatorGroupInstanceName: the initiator group instance name - :param extraSpecs: extra specifications - :returns: instance name foundMaskingViewInstanceName - """ - _rc, job = ( - self._create_masking_view( - conn, controllerConfigService, maskingViewName, - storageGroupInstanceName, portGroupInstanceName, - initiatorGroupInstanceName, extraSpecs)) - foundMaskingViewInstanceName = self.find_new_masking_view(conn, job) - if foundMaskingViewInstanceName is None: - LOG.error( - "Cannot find the new masking view just created with name " - "%(maskingViewName)s.", - {'maskingViewName': maskingViewName}) - - return foundMaskingViewInstanceName - - def _check_if_rollback_action_for_masking_required( - self, conn, rollbackDict): - """This is a rollback action for FAST. + def check_if_rollback_action_for_masking_required( + self, serial_number, device_id, rollback_dict): + """Rollback action for volumes with an associated service level. We need to be able to return the volume to the default storage group if anything has gone wrong. The volume can also potentially belong to a storage group that is not the default depending on where the exception occurred. We also may need to clean up any unused initiator groups. - - :param conn: the connection to the ecom server - :param rollbackDict: the rollback dictionary - :returns: message - :raises VolumeBackendAPIException: + :param serial_number: the array serial number + :param device_id: the device id + :param rollback_dict: the rollback dict + :return: error message -- string, or None + :raises: VolumeBackendAPIException """ message = None # Check if ig has been created. If so, check for other - # masking views associated with the ig. If none, remove - # initiators and delete ig. + # masking views associated with the ig. If none, delete the ig. self._check_ig_rollback( - conn, rollbackDict['controllerConfigService'], - rollbackDict['igGroupName'], rollbackDict['connector'], - rollbackDict['extraSpecs']) + serial_number, rollback_dict['init_group_name'], + rollback_dict['connector']) try: - foundStorageGroupInstanceName = ( - self.utils.get_storage_group_from_volume( - conn, rollbackDict['volumeInstance'].path, - rollbackDict['sgGroupName'])) + found_sg_name = ( + self.rest.get_storage_groups_from_volume( + serial_number, rollback_dict['device_id'])) # Volume is not associated with any storage group so add # it back to the default. - if not foundStorageGroupInstanceName: - if rollbackDict['isV3']: - errorMessage = self._check_adding_volume_to_storage_group( - conn, rollbackDict, - rollbackDict['defaultStorageGroupInstanceName']) - if errorMessage: - LOG.error(errorMessage) - message = (_("V3 rollback")) - else: - LOG.warning( - "No storage group found. " - "Performing rollback on Volume: %(volumeName)s " - "To return it to the default storage group for FAST " - "policy %(fastPolicyName)s.", - {'volumeName': rollbackDict['volumeName'], - 'fastPolicyName': rollbackDict['fastPolicyName']}) - assocDefaultStorageGroupName = ( - self.fast - .add_volume_to_default_storage_group_for_fast_policy( - conn, - rollbackDict['controllerConfigService'], - rollbackDict['volumeInstance'], - rollbackDict['volumeName'], - rollbackDict['fastPolicyName'], - rollbackDict['extraSpecs'])) - if assocDefaultStorageGroupName is None: - LOG.error( - "Failed to Roll back to re-add volume " - "%(volumeName)s " - "to default storage group for fast policy " - "%(fastPolicyName)s: Please contact your sys " - "admin to get the volume re-added manually.", - {'volumeName': rollbackDict['volumeName'], - 'fastPolicyName': rollbackDict['fastPolicyName']}) - message = (_("V2 rollback, volume is not in any storage " - "group.")) + if not found_sg_name: + error_message = self._check_adding_volume_to_storage_group( + serial_number, device_id, + rollback_dict['default_sg_name'], + rollback_dict[utils.VOL_NAME], + rollback_dict[utils.EXTRA_SPECS]) + if error_message: + LOG.error(error_message) + message = (_("Rollback")) else: - LOG.info("The storage group found is " - "%(foundStorageGroupInstanceName)s.", - {'foundStorageGroupInstanceName': - foundStorageGroupInstanceName}) + LOG.info("The storage group found is %(found_sg_name)s.", + {'found_sg_name': found_sg_name}) # Check the name, see if it is the default storage group # or another. - if (foundStorageGroupInstanceName != - rollbackDict['defaultStorageGroupInstanceName']): - # Remove it from its current masking view and return it - # to its default masking view if fast is enabled or slo - # is defined. + if found_sg_name != rollback_dict['default_sg_name']: + # Remove it from its current storage group and return it + # to its default masking view if slo is defined. self.remove_and_reset_members( - conn, - rollbackDict['controllerConfigService'], - rollbackDict['volumeInstance'], - rollbackDict['volumeName'], - rollbackDict['extraSpecs']) + serial_number, device_id, + rollback_dict['volume_name'], + rollback_dict['extra_specs']) message = (_("Rollback - Volume in another storage " "group besides default storage group.")) - except Exception: - errorMessage = (_( - "Rollback for Volume: %(volumeName)s has failed. " + except Exception as e: + error_message = (_( + "Rollback for Volume: %(volume_name)s has failed. " "Please contact your system administrator to manually return " - "your volume to the default storage group for fast policy/ " - "slo.") - % {'volumeName': rollbackDict['volumeName']}) - LOG.exception(errorMessage) - raise exception.VolumeBackendAPIException(data=errorMessage) + "your volume to the default storage group for its slo. " + "Exception received: %(e)s") + % {'volume_name': rollback_dict['volume_name'], + 'e': six.text_type(e)}) + LOG.exception(error_message) + raise exception.VolumeBackendAPIException(data=error_message) return message - def _find_new_initiator_group(self, conn, maskingGroupDict): - """After creating an new initiator group find it and return it. - - :param conn: connection to the ecom server - :param maskingGroupDict: the maskingGroupDict dict - :returns: instance name foundInitiatorGroupInstanceName - """ - foundInitiatorGroupInstanceName = None - - if 'MaskingGroup' in maskingGroupDict: - foundInitiatorGroupInstanceName = maskingGroupDict['MaskingGroup'] - - return foundInitiatorGroupInstanceName - - def _get_initiator_group_from_masking_view( - self, conn, maskingViewName, storageSystemName): - """Given the masking view name get the initiator group from it. - - :param conn: connection to the ecom server - :param maskingViewName: the name of the masking view - :param storageSystemName: the storage system name - :returns: instance name foundInitiatorMaskingGroupInstanceName - """ - foundInitiatorMaskingGroupInstanceName = None - foundView = self._find_masking_view( - conn, maskingViewName, storageSystemName) - if foundView is not None: - groups = conn.AssociatorNames( - foundView, - ResultClass='CIM_InitiatorMaskingGroup') - if len(groups): - foundInitiatorMaskingGroupInstanceName = groups[0] - - LOG.debug( - "Masking view: %(view)s InitiatorMaskingGroup: %(masking)s.", - {'view': maskingViewName, - 'masking': foundInitiatorMaskingGroupInstanceName}) - else: - LOG.warning("Unable to find Masking view: %(view)s.", - {'view': maskingViewName}) - - return foundInitiatorMaskingGroupInstanceName - def _verify_initiator_group_from_masking_view( - self, conn, controllerConfigService, maskingViewName, connector, - storageSystemName, igGroupName, extraSpecs): + self, serial_number, maskingview_name, maskingview_dict, + ig_from_mv, storagegroup_name, portgroup_name, extra_specs): """Check that the initiator group contains the correct initiators. If using an existing masking view check that the initiator group contains the correct initiators. If it does not contain the correct initiators then we delete the initiator group from the masking view, re-create it with the correct initiators and add it to the masking view - NOTE: EMC does not support ModifyMaskingView so we must first + NOTE: VMAX does not support ModifyMaskingView so we must first delete the masking view and recreate it. - - :param conn: connection to the ecom server - :param controllerConfigService: the controller configuration service - :param maskingViewName: maskingview name (String) - :param connector: the connector dict - :param storageSystemName: the storage System Name (string) - :param igGroupName: the initiator group name (String) - :param extraSpecs: extra specifications - :returns: boolean + :param serial_number: the array serial number + :param maskingview_name: name of the masking view + :param maskingview_dict: the masking view dict + :param ig_from_mv: the initiator group name + :param storagegroup_name: the storage group + :param portgroup_name: the port group + :param extra_specs: extra specifications + :return: bool, found_ig_from_connector """ - initiatorNames = self._find_initiator_names(conn, connector) - foundInitiatorGroupFromConnector = self._find_initiator_masking_group( - conn, controllerConfigService, initiatorNames) + connector = maskingview_dict['connector'] + initiator_names = self.find_initiator_names(connector) + found_ig_from_connector = self._find_initiator_group( + serial_number, initiator_names) - foundInitiatorGroupFromMaskingView = ( - self._get_initiator_group_from_masking_view( - conn, maskingViewName, storageSystemName)) - - if (foundInitiatorGroupFromConnector != - foundInitiatorGroupFromMaskingView): - if foundInitiatorGroupFromMaskingView is not None: - maskingViewInstanceName = self._find_masking_view( - conn, maskingViewName, storageSystemName) - storageGroupInstanceName = ( - self._get_storage_group_from_masking_view( - conn, maskingViewName, storageSystemName)) - portGroupInstanceName = self._get_port_group_from_masking_view( - conn, maskingViewName, storageSystemName) - if foundInitiatorGroupFromConnector is None: - storageHardwareIDInstanceNames = ( - self._get_storage_hardware_id_instance_names( - conn, initiatorNames, storageSystemName)) - if not storageHardwareIDInstanceNames: - LOG.info( - "Initiator Name(s) %(initiatorNames)s are not on " - "array %(storageSystemName)s.", - {'initiatorNames': initiatorNames, - 'storageSystemName': storageSystemName}) - storageHardwareIDInstanceNames = ( - self._create_hardware_ids(conn, initiatorNames, - storageSystemName)) - if not storageHardwareIDInstanceNames: - LOG.error( - "Failed to create hardware id(s) on " - "%(storageSystemName)s.", - {'storageSystemName': storageSystemName}) - return False - - igFromMaskingViewInstance = conn.GetInstance( - foundInitiatorGroupFromMaskingView, LocalOnly=False) - # if the current foundInitiatorGroupFromMaskingView name - # matches the igGroupName supplied for the new group, the - # existing ig needs to be deleted before the new one with - # the correct initiators can be created. - if (igFromMaskingViewInstance['ElementName'] == - igGroupName): + if found_ig_from_connector != ig_from_mv: + check_ig = self.rest.get_initiator_group( + serial_number, initiator_group=ig_from_mv) + if check_ig: + if found_ig_from_connector is None: + # If the name of the current initiator group from the + # masking view matches the igGroupName supplied for the + # new group, the existing ig needs to be deleted before + # the new one with the correct initiators can be created. + if maskingview_dict['init_group_name'] == ig_from_mv: # Masking view needs to be deleted before IG # can be deleted. - self._delete_masking_view( - conn, controllerConfigService, maskingViewName, - maskingViewInstanceName, extraSpecs) - maskingViewInstanceName = None - self._delete_initiators_from_initiator_group( - conn, controllerConfigService, - foundInitiatorGroupFromMaskingView, - igGroupName) - self._delete_initiator_group( - conn, controllerConfigService, - foundInitiatorGroupFromMaskingView, - igGroupName, extraSpecs) - foundInitiatorGroupFromConnector = ( - self._create_initiator_Group( - conn, controllerConfigService, igGroupName, - storageHardwareIDInstanceNames, extraSpecs)) - if (foundInitiatorGroupFromConnector is not None and - storageGroupInstanceName is not None and - portGroupInstanceName is not None): - if maskingViewInstanceName: - # Existing masking view needs to be deleted before - # a new one can be created. - self._delete_masking_view( - conn, controllerConfigService, maskingViewName, - maskingViewInstanceName, extraSpecs) - newMaskingViewInstanceName = ( - self._get_masking_view_instance_name( - conn, controllerConfigService, maskingViewName, - storageGroupInstanceName, portGroupInstanceName, - foundInitiatorGroupFromConnector, extraSpecs)) - if newMaskingViewInstanceName is not None: + self.rest.delete_masking_view( + serial_number, maskingview_name) + self.rest.delete_initiator_group( + serial_number, ig_from_mv) + found_ig_from_connector = ( + self._create_initiator_group( + serial_number, ig_from_mv, initiator_names, + extra_specs)) + if (found_ig_from_connector is not None and + storagegroup_name is not None and + portgroup_name is not None): + # Existing masking view (if still on the array) needs + # to be deleted before a new one can be created. + try: + self.rest.delete_masking_view( + serial_number, maskingview_name) + except Exception: + pass + error_message = ( + self.create_masking_view( + serial_number, maskingview_name, storagegroup_name, + portgroup_name, + maskingview_dict['init_group_name'], + extra_specs)) + if not error_message: LOG.debug( "The old masking view has been replaced: " - "%(maskingViewName)s.", - {'maskingViewName': maskingViewName}) + "%(maskingview_name)s.", + {'maskingview_name': maskingview_name}) else: LOG.error( "One of the components of the original masking view " - "%(maskingViewName)s cannot be retrieved so " + "%(maskingview_name)s cannot be retrieved so " "please contact your system administrator to check " "that the correct initiator(s) are part of masking.", - {'maskingViewName': maskingViewName}) + {'maskingview_name': maskingview_name}) return False - return True + return True, found_ig_from_connector - def _create_initiator_Group( - self, conn, controllerConfigService, igGroupName, - hardwareIdinstanceNames, extraSpecs): + def _create_initiator_group( + self, serial_number, init_group_name, initiator_names, + extra_specs): """Create a new initiator group. - Given a list of hardwareId Instance name create a new - initiator group. - - :param conn: connection to the ecom server - :param controllerConfigService: the controller configuration service - :param igGroupName: the initiator group name (String) - :param hardwareIdinstanceNames: one or more hardware id instance names - :param extraSpecs: extra specifications - :returns: foundInitiatorGroupInstanceName - :raises VolumeBackendAPIException: + Given a list of initiators, create a new initiator group. + :param serial_number: array serial number + :param init_group_name: the name for the initiator group + :param initiator_names: initaitor names + :param extra_specs: the extra specifications + :return: the initiator group name """ - rc, job = conn.InvokeMethod( - 'CreateGroup', controllerConfigService, GroupName=igGroupName, - Type=self.utils.get_num(INITIATORGROUPTYPE, '16'), - Members=hardwareIdinstanceNames) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Create Group: %(groupName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'groupName': igGroupName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - foundInitiatorGroupInstanceName = self._find_new_initiator_group( - conn, job) - - return foundInitiatorGroupInstanceName + self.rest.create_initiator_group( + serial_number, init_group_name, initiator_names, extra_specs) + return init_group_name def _check_ig_rollback( - self, conn, controllerConfigService, - igGroupName, connector, extraSpecs): + self, serial_number, init_group_name, connector): """Check if rollback action is required on an initiator group. If anything goes wrong on a masking view creation, we need to check if the process created a now-stale initiator group before failing, i.e. an initiator group a) matching the name used in the mv process and b) not associated with any other masking views. - If a stale ig exists, remove the initiators and delete the ig. - - :param conn: the ecom connection - :param controllerConfigService: controller config service - :param igGroupName: the initiator group name + If a stale ig exists, delete the ig. + :param serial_number: the array serial number + :param init_group_name: the initiator group name :param connector: the connector object - :param extraSpecs: extra specifications """ - initiatorNames = self._find_initiator_names(conn, connector) - foundInitiatorGroupInstanceName = self._find_initiator_masking_group( - conn, controllerConfigService, initiatorNames) - if foundInitiatorGroupInstanceName: - initiatorGroupInstance = conn.GetInstance( - foundInitiatorGroupInstanceName, LocalOnly=False) - if initiatorGroupInstance['ElementName'] == igGroupName: - host = igGroupName.split("-")[1] + initiator_names = self.find_initiator_names(connector) + found_ig_name = self._find_initiator_group( + serial_number, initiator_names) + if found_ig_name: + if found_ig_name == init_group_name: + host = init_group_name.split("-")[1] LOG.debug("Searching for masking views associated with " - "%(igGroupName)s", - {'igGroupName': igGroupName}) + "%(init_group_name)s", + {'init_group_name': init_group_name}) self._last_volume_delete_initiator_group( - conn, controllerConfigService, - foundInitiatorGroupInstanceName, extraSpecs, host) - - def _get_port_group_from_masking_view( - self, conn, maskingViewName, storageSystemName): - """Given the masking view name get the port group from it. - - :param conn: connection to the ecom server - :param maskingViewName: the name of the masking view - :param storageSystemName: the storage system name - :returns: instance name foundPortMaskingGroupInstanceName - """ - - foundPortMaskingGroupInstanceName = None - foundView = self._find_masking_view( - conn, maskingViewName, storageSystemName) - if foundView: - foundPortMaskingGroupInstanceName = ( - self.get_port_group_from_masking_view_instance( - conn, foundView)) - - LOG.debug( - "Masking view: %(view)s portMaskingGroup: %(masking)s.", - {'view': maskingViewName, - 'masking': foundPortMaskingGroupInstanceName}) - - return foundPortMaskingGroupInstanceName - - def get_port_group_from_masking_view_instance( - self, conn, maskingViewInstanceName): - """Given the masking view name get the port group from it. - - :param conn: connection to the ecom server - :param maskingViewInstanceName: the masking view instance name - :returns: instance name foundPortMaskingGroupInstanceName - """ - - foundPortMaskingGroupInstanceName = None - - groups = conn.AssociatorNames( - maskingViewInstanceName, - ResultClass='CIM_TargetMaskingGroup') - if len(groups) > 0: - foundPortMaskingGroupInstanceName = groups[0] - - return foundPortMaskingGroupInstanceName - - def _delete_masking_view( - self, conn, controllerConfigService, maskingViewName, - maskingViewInstanceName, extraSpecs): - """Delete a masking view. - - :param conn: connection to the ecom server - :param controllerConfigService: the controller configuration service - :param maskingViewName: maskingview name (String) - :param maskingViewInstanceName: the masking view instance name - :param extraSpecs: extra specifications - :raises VolumeBackendAPIException: - """ - rc, job = conn.InvokeMethod('DeleteMaskingView', - controllerConfigService, - ProtocolController=maskingViewInstanceName) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Modifying masking view : %(groupName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'groupName': maskingViewName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - def get_masking_view_from_storage_group( - self, conn, storageGroupInstanceName): - """Get the associated maskingview instance name. - - Given storage group instance name, get the associated masking - view instance name. - - :param conn: connection to the ecom server - :param storageGroupInstanceName: the storage group instance name - :returns: instance name foundMaskingViewInstanceName - """ - maskingViews = conn.AssociatorNames( - storageGroupInstanceName, - ResultClass='Symm_LunMaskingView') - - return maskingViews - - def add_volume_to_storage_group( - self, conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, sgGroupName, extraSpecs): - """Add a volume to an existing storage group. - - :param conn: connection to ecom server - :param controllerConfigService: the controller configuration service - :param storageGroupInstanceName: storage group instance name - :param volumeInstance: the volume instance - :param volumeName: the name of the volume (String) - :param sgGroupName: the name of the storage group (String) - :param extraSpecs: additional info - :returns: int -- rc the return code of the job - :returns: dict -- the job dict - """ - self.provision.add_members_to_masking_group( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance.path, volumeName, extraSpecs) - - LOG.info( - "Added volume: %(volumeName)s to existing storage group " - "%(sgGroupName)s.", - {'volumeName': volumeName, - 'sgGroupName': sgGroupName}) - - def remove_device_from_default_storage_group( - self, conn, controllerConfigService, volumeInstanceName, - volumeName, fastPolicyName, extraSpecs): - """Remove the volume from the default storage group. - - Remove the volume from the default storage group for the FAST - policy and return the default storage group instance name. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller config service - :param volumeInstanceName: the volume instance name - :param volumeName: the volume name (String) - :param fastPolicyName: the fast policy name (String) - :param extraSpecs: additional info - :returns: instance name defaultStorageGroupInstanceName - """ - failedRet = None - defaultStorageGroupInstanceName, defaultSgName = ( - self.fast.get_and_verify_default_storage_group( - conn, controllerConfigService, volumeInstanceName, - volumeName, fastPolicyName)) - - if defaultStorageGroupInstanceName is None: - LOG.warning( - "Volume %(volumeName)s was not first part of the default " - "storage group for the FAST Policy.", - {'volumeName': volumeName}) - return failedRet - - @coordination.synchronized("emc-sg-{sgName}") - def do_remove_vol_from_sg(sgName): - assocVolumeInstanceNames = self.get_devices_from_storage_group( - conn, defaultStorageGroupInstanceName) - - LOG.debug( - "There are %(length)lu associated with the default storage " - "group for fast before removing volume %(volumeName)s.", - {'length': len(assocVolumeInstanceNames), - 'volumeName': volumeName}) - - self.provision.remove_device_from_storage_group( - conn, controllerConfigService, - defaultStorageGroupInstanceName, volumeInstanceName, - volumeName, extraSpecs) - - assocVolumeInstanceNames = self.get_devices_from_storage_group( - conn, defaultStorageGroupInstanceName) - LOG.debug( - "There are %(length)lu associated with the default storage " - "group %(sg)s after removing volume %(volumeName)s.", - {'length': len(assocVolumeInstanceNames), - 'sg': sgName, 'volumeName': volumeName}) - - do_remove_vol_from_sg(defaultStorageGroupInstanceName['ElementName']) - - # Required for unit tests. - emptyStorageGroupInstanceName = ( - self._wrap_get_storage_group_from_volume(conn, volumeInstanceName, - defaultSgName)) - - if emptyStorageGroupInstanceName is not None: - LOG.error( - "Failed to remove %(volumeName)s from the default storage " - "group for the FAST Policy.", - {'volumeName': volumeName}) - return failedRet - - return defaultStorageGroupInstanceName - - def _wrap_get_storage_group_from_volume(self, conn, volumeInstanceName, - defaultSgName): - """Wrapper for get_storage_group_from_volume. - - Needed for override in tests. - - :param conn: the connection to the ecom server - :param volumeInstanceName: the volume instance name - :param defaultSgName: the default storage group name - :returns: emptyStorageGroupInstanceName - """ - - return self.utils.get_storage_group_from_volume( - conn, volumeInstanceName, defaultSgName) - - def get_devices_from_storage_group( - self, conn, storageGroupInstanceName): - """Get the associated volume Instance names. - - Given the storage group instance name get the associated volume - Instance names. - - :param conn: connection to the ecom server - :param storageGroupInstanceName: the storage group instance name - :returns: list -- volumeInstanceNames list of volume instance names - """ - volumeInstanceNames = conn.AssociatorNames( - storageGroupInstanceName, - ResultClass='EMC_StorageVolume') - - return volumeInstanceNames - - def get_associated_masking_groups_from_device( - self, conn, volumeInstanceName): - """Get the associated storage groups from the volume Instance name. - - Given the volume instance name get the associated storage group - instance names. - - :param conn: connection to the ecom server - :param volumeInstanceName: the volume instance name - :returns: list -- list of storage group instance names - """ - maskingGroupInstanceNames = conn.AssociatorNames( - volumeInstanceName, - ResultClass='CIM_DeviceMaskingGroup', - AssocClass='CIM_OrderedMemberOfCollection') - if len(maskingGroupInstanceNames) > 0: - return maskingGroupInstanceNames - else: - LOG.info("Volume %(volumeName)s not in any storage group.", - {'volumeName': volumeInstanceName}) - return None + serial_number, found_ig_name, host) def remove_and_reset_members( - self, conn, controllerConfigService, volumeInstance, - volumeName, extraSpecs, connector=None, reset=True): + self, serial_number, device_id, volume_name, extra_specs, + reset=True): """This is called on a delete, unmap device or rollback. - If the connector is not None get the associated SG and remove volume - from the storage group, otherwise it is a VMAX3 deletion. - - :param conn: connection to the ecom server - :param controllerConfigService: the controller configuration service - :param volumeInstance: the volume Instance - :param volumeName: the volume name - :param extraSpecs: additional info - :param connector: optional + :param serial_number: the array serial number + :param device_id: the volume device id + :param volume_name: the volume name + :param extra_specs: additional info :param reset: reset, return to original SG (optional) - :returns: storageGroupInstanceName """ - storageGroupInstanceName = None - if extraSpecs[ISV3]: - self._cleanup_deletion_v3( - conn, controllerConfigService, volumeInstance, extraSpecs) - else: - if connector: - storageGroupInstanceName = ( - self._get_sg_associated_with_connector( - conn, controllerConfigService, volumeInstance.path, - volumeName, connector)) - if storageGroupInstanceName: - self._remove_volume_from_sg( - conn, controllerConfigService, - storageGroupInstanceName, - volumeInstance, extraSpecs) - else: - LOG.warning("Cannot get storage from connector.") - + self._cleanup_deletion( + serial_number, device_id, volume_name, extra_specs) if reset: - self._return_back_to_default_sg( - conn, controllerConfigService, volumeInstance, volumeName, - extraSpecs) + self.return_volume_to_default_storage_group( + serial_number, device_id, volume_name, extra_specs) - return storageGroupInstanceName + def _cleanup_deletion( + self, serial_number, device_id, volume_name, extra_specs): + """Prepare a volume for a delete operation. - def _cleanup_deletion_v3( - self, conn, controllerConfigService, volumeInstance, extraSpecs): - """Pre cleanup before VMAX3 deletion operation - - :param conn: the ecom connection - :param controllerConfigService: storage system instance name - :param volumeInstance: the volume instance - :param extraSpecs: the extra specifications + :param serial_number: the array serial number + :param device_id: the volume device id + :param volume_name: the volume name + :param extra_specs: the extra specifications """ - storageGroupInstanceNames = ( - self.get_associated_masking_groups_from_device( - conn, volumeInstance.path)) + storagegroup_names = (self.rest.get_storage_groups_from_volume( + serial_number, device_id)) + if storagegroup_names: + for sg_name in storagegroup_names: + self.remove_volume_from_sg( + serial_number, device_id, volume_name, sg_name, + extra_specs) - if storageGroupInstanceNames: - sgNum = len(storageGroupInstanceNames) - if len(storageGroupInstanceNames) > 1: - LOG.warning("Volume %(volumeName)s is belong to %(sgNum)s " - "storage groups.", - {'volumeName': volumeInstance['ElementName'], - 'sgNum': sgNum}) - for storageGroupInstanceName in storageGroupInstanceNames: - self._remove_volume_from_sg( - conn, controllerConfigService, - storageGroupInstanceName, - volumeInstance, - extraSpecs) + def remove_volume_from_sg( + self, serial_number, device_id, vol_name, storagegroup_name, + extra_specs): + """Remove a volume from a storage group. - def _remove_volume_from_sg( - self, conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, extraSpecs): - """Remove volume from storage group - - :param conn: the ecom connection - :param controllerConfigService: storage system instance name - :param storageGroupInstanceName: the SG instance name - :param volumeInstance: the volume instance - :param extraSpecs: the extra specifications + :param serial_number: the array serial number + :param device_id: the volume device id + :param vol_name: the volume name + :param storagegroup_name: the storage group name + :param extra_specs: the extra specifications """ - instance = conn.GetInstance(storageGroupInstanceName, LocalOnly=False) - storageGroupName = instance['ElementName'] - mvInstanceNames = self.get_masking_view_from_storage_group( - conn, storageGroupInstanceName) - if not mvInstanceNames: - LOG.debug("Unable to get masking views from storage group.") + masking_list = self.rest.get_masking_views_from_storage_group( + serial_number, storagegroup_name) + if not masking_list: + LOG.debug("No masking views associated with storage group " + "%(sg_name)s" % {'sg_name': storagegroup_name}) - @coordination.synchronized("emc-sg-{storageGroup}") - def do_remove_volume_from_sg(storageGroup): - volumeInstanceNames = self.get_devices_from_storage_group( - conn, storageGroupInstanceName) - numVolInStorageGroup = len(volumeInstanceNames) - LOG.debug( - "There are %(numVol)d volumes in the storage group " - "%(maskingGroup)s.", - {'numVol': numVolInStorageGroup, - 'maskingGroup': storageGroup}) - - if numVolInStorageGroup == 1: - # Last volume in the storage group. - self._last_vol_in_SG( - conn, controllerConfigService, - storageGroupInstanceName, - storageGroupName, volumeInstance, - volumeInstance['ElementName'], extraSpecs) - else: - # Not the last volume so remove it from storage group - self._multiple_vols_in_SG( - conn, controllerConfigService, - storageGroupInstanceName, volumeInstance, - volumeInstance['ElementName'], - numVolInStorageGroup, extraSpecs) - - return do_remove_volume_from_sg(storageGroupName) - else: - for mvInstanceName in mvInstanceNames: - # need to lock masking view when we are locking the storage - # group to avoid possible deadlock situations from concurrent - # processes - maskingViewInstance = conn.GetInstance( - mvInstanceName, LocalOnly=False) - maskingViewName = maskingViewInstance['ElementName'] - - @coordination.synchronized("emc-mv-{maskingView}") - @coordination.synchronized("emc-sg-{storageGroup}") - def do_remove_volume_from_sg(maskingView, storageGroup): - volumeInstanceNames = self.get_devices_from_storage_group( - conn, storageGroupInstanceName) - numVolInStorageGroup = len(volumeInstanceNames) + @coordination.synchronized("emc-sg-{sg_name}") + def do_remove_volume_from_sg(sg_name): + # Make sure volume hasn't been recently removed from the sg + is_vol = self.rest.is_volume_in_storagegroup( + serial_number, device_id, sg_name) + if is_vol: + num_vol_in_sg = self.rest.get_num_vols_in_sg( + serial_number, sg_name) LOG.debug( - "There are %(numVol)d volumes in the storage group " - "%(maskingGroup)s associated with %(mvName)s", - {'numVol': numVolInStorageGroup, - 'maskingGroup': storageGroup, - 'mvName': maskingView}) + "There are %(num_vol)d volumes in the storage group " + "%(sg_name)s.", + {'num_vol': num_vol_in_sg, + 'sg_name': sg_name}) - if numVolInStorageGroup == 1: - # Last volume in the storage group. - self._last_vol_in_SG( - conn, controllerConfigService, - storageGroupInstanceName, - storageGroupName, volumeInstance, - volumeInstance['ElementName'], extraSpecs) + if num_vol_in_sg == 1: + # Last volume in the storage group - delete sg. + self._last_vol_in_sg( + serial_number, device_id, vol_name, sg_name, + extra_specs) else: # Not the last volume so remove it from storage group - self._multiple_vols_in_SG( - conn, controllerConfigService, - storageGroupInstanceName, - volumeInstance, volumeInstance['ElementName'], - numVolInStorageGroup, extraSpecs) - return do_remove_volume_from_sg(maskingViewName, - storageGroupName) + self._multiple_vols_in_sg( + serial_number, device_id, sg_name, vol_name, + extra_specs) + else: + LOG.info("Volume with device_id %(dev)s is no longer a " + "member of %(sg)s.", + {'dev': device_id, 'sg': sg_name}) - def _last_vol_in_SG( - self, conn, controllerConfigService, storageGroupInstanceName, - storageGroupName, volumeInstance, volumeName, extraSpecs): + return do_remove_volume_from_sg(storagegroup_name) + else: + # Need to lock masking view when we are locking the storage + # group to avoid possible deadlock situations from concurrent + # processes + masking_name = masking_list[0] + parent_sg_name = self.rest.get_element_from_masking_view( + serial_number, masking_name, storagegroup=True) + + @coordination.synchronized("emc-mv-{parent_name}") + @coordination.synchronized("emc-mv-{mv_name}") + @coordination.synchronized("emc-sg-{sg_name}") + def do_remove_volume_from_sg(mv_name, sg_name, parent_name): + # Make sure volume hasn't been recently removed from the sg + is_vol = self.rest.is_volume_in_storagegroup( + serial_number, device_id, sg_name) + if is_vol: + num_vol_in_sg = self.rest.get_num_vols_in_sg( + serial_number, sg_name) + LOG.debug( + "There are %(num_vol)d volumes in the storage group " + "%(sg_name)s associated with %(mv_name)s. Parent " + "storagegroup is %(parent)s.", + {'num_vol': num_vol_in_sg, 'sg_name': sg_name, + 'mv_name': mv_name, 'parent': parent_name}) + + if num_vol_in_sg == 1: + # Last volume in the storage group - delete sg. + self._last_vol_in_sg( + serial_number, device_id, vol_name, sg_name, + extra_specs) + else: + # Not the last volume so remove it from storage group + self._multiple_vols_in_sg( + serial_number, device_id, sg_name, vol_name, + extra_specs) + else: + LOG.info("Volume with device_id %(dev)s is no longer a " + "member of %(sg)s", + {'dev': device_id, 'sg': sg_name}) + + return do_remove_volume_from_sg(masking_name, storagegroup_name, + parent_sg_name) + + def _last_vol_in_sg(self, serial_number, device_id, volume_name, + storagegroup_name, extra_specs): """Steps if the volume is the last in a storage group. 1. Check if the volume is in a masking view. - 2. If it is in a masking view, delete the masking view, remove the - initiators from the initiator group and delete the initiator - group if there are no other masking views associated with the - initiator group, remove the volume from the storage group, and - delete the storage group. - 3. If it is not in a masking view, remove the volume from the - storage group and delete the storage group. - - :param conn: the ecom connection - :param controllerConfigService: storage system instance name - :param storageGroupInstanceName: the SG instance name - :param storageGroupName: the Storage group name (String) - :param volumeInstance: the volume instance - :param volumeName: the volume name - :param extraSpecs: the extra specifications + 2. If it is in a masking view, check if it is the last volume in the + masking view or just this child storage group. + 3. If it is last in the masking view, delete the masking view, + delete the initiator group if there are no other masking views + associated with it, and delete the both the current storage group + and its parent group. + 4. Otherwise, remove the volume and delete the child storage group. + 5. If it is not in a masking view, delete the storage group. + :param serial_number: array serial number + :param device_id: volume device id + :param volume_name: volume name + :param storagegroup_name: storage group name + :param extra_specs: extra specifications + :return: status -- bool """ - status = False LOG.debug("Only one volume remains in storage group " "%(sgname)s. Driver will attempt cleanup.", - {'sgname': storageGroupName}) - mvInstanceNames = self.get_masking_view_from_storage_group( - conn, storageGroupInstanceName) - if not mvInstanceNames: - # Remove the volume from the storage group and delete the SG. - self._remove_last_vol_and_delete_sg( - conn, controllerConfigService, - storageGroupInstanceName, - storageGroupName, volumeInstance.path, - volumeName, extraSpecs) - status = True + {'sgname': storagegroup_name}) + maskingview_list = self.rest.get_masking_views_from_storage_group( + serial_number, storagegroup_name) + if not bool(maskingview_list): + status = self._last_vol_no_masking_views( + serial_number, storagegroup_name, device_id, volume_name, + extra_specs) else: - mv_count = len(mvInstanceNames) - for mvInstanceName in mvInstanceNames: - maskingViewInstance = conn.GetInstance( - mvInstanceName, LocalOnly=False) - maskingViewName = maskingViewInstance['ElementName'] - - def do_delete_mv_ig_and_sg(): - return self._delete_mv_ig_and_sg( - conn, controllerConfigService, mvInstanceName, - maskingViewName, storageGroupInstanceName, - storageGroupName, volumeInstance, volumeName, - extraSpecs, mv_count) - do_delete_mv_ig_and_sg() - status = True - mv_count -= 1 + status = self._last_vol_masking_views( + serial_number, storagegroup_name, maskingview_list, + device_id, volume_name, extra_specs) return status - def _multiple_vols_in_SG( - self, conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, numVolsInSG, extraSpecs): - """If the volume is not the last in the storage group + def _last_vol_no_masking_views(self, serial_number, storagegroup_name, + device_id, volume_name, extra_specs): + """Remove the last vol from an sg not associated with an mv. - Remove the volume from the SG. - - :param conn: the ecom connection - :param controllerConfigService: storage system instance name - :param storageGroupInstanceName: the SG instance name - :param volumeInstance: the volume instance - :param volumeName: the volume name - :param numVolsInSG: the number of volumes in the SG - :param extraSpecs: the extra specifications + Helper function for removing the last vol from a storage group + which is not associated with a masking view. + :param serial_number: the array serial number + :param storagegroup_name: the storage group name + :param device_id: the device id + :param volume_name: the volume name + :param extra_specs: the extra specifications + :return: status -- bool """ + # Check if storage group is a child sg: + parent_sg = self.get_parent_sg_from_child( + serial_number, storagegroup_name) + # Delete the storage group. + if parent_sg is None: + self.rest.delete_storage_group(serial_number, storagegroup_name) + status = True + else: + num_vols_parent = self.rest.get_num_vols_in_sg( + serial_number, parent_sg) + if num_vols_parent == 1: + self._delete_cascaded_storage_groups( + serial_number, storagegroup_name, parent_sg) + else: + self._remove_last_vol_and_delete_sg( + serial_number, device_id, volume_name, + storagegroup_name, extra_specs, parent_sg) + status = True + return status - LOG.debug("Start: number of volumes in masking storage group: " - "%(numVol)d", {'numVol': numVolsInSG}) - self.provision.remove_device_from_storage_group( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance.path, volumeName, extraSpecs) + def _last_vol_masking_views( + self, serial_number, storagegroup_name, maskingview_list, + device_id, volume_name, extra_specs): + """Remove the last vol from an sg associated with masking views. + + Helper function for removing the last vol from a storage group + which is associated with one or more masking views. + :param serial_number: the array serial number + :param storagegroup_name: the storage group name + :param maskingview_list: the liast of masking views + :param device_id: the device id + :param volume_name: the volume name + :param extra_specs: the extra specifications + :return: status -- bool + """ + status = False + for mv in maskingview_list: + num_vols_in_mv, parent_sg_name = ( + self._get_num_vols_from_mv(serial_number, mv)) + # If the volume is the last in the masking view, full cleanup + if num_vols_in_mv == 1: + def do_delete_mv_ig_and_sg(): + return self._delete_mv_ig_and_sg( + serial_number, mv, storagegroup_name, parent_sg_name) + + do_delete_mv_ig_and_sg() + else: + self._remove_last_vol_and_delete_sg( + serial_number, device_id, volume_name, + storagegroup_name, extra_specs, parent_sg_name) + status = True + return status + + def get_parent_sg_from_child(self, serial_number, storagegroup_name): + """Given a storage group name, get its parent storage group, if any. + + :param serial_number: the array serial number + :param storagegroup_name: the name of the storage group + :return: the parent storage group name, or None + """ + parent_sg_name = None + storagegroup = self.rest.get_storage_group( + serial_number, storagegroup_name) + if storagegroup and storagegroup.get('parent_storage_group'): + parent_sg_name = storagegroup['parent_storage_group'][0] + return parent_sg_name + + def _get_num_vols_from_mv(self, serial_number, maskingview_name): + """Get the total number of volumes associated with a masking view. + + :param serial_number: the array serial number + :param maskingview_name: the name of the masking view + :return: num_vols, parent_sg_name + """ + parent_sg_name = self.rest.get_element_from_masking_view( + serial_number, maskingview_name, storagegroup=True) + num_vols = self.rest.get_num_vols_in_sg(serial_number, parent_sg_name) + return num_vols, parent_sg_name + + def _multiple_vols_in_sg(self, serial_number, device_id, storagegroup_name, + volume_name, extra_specs): + """Remove the volume from the SG. + + If the volume is not the last in the storage group, + remove the volume from the SG and leave the sg on the array. + :param serial_number: array serial number + :param device_id: volume device id + :param volume_name: volume name + :param storagegroup_name: storage group name + :param extra_specs: extra specifications + """ + self._remove_vol_from_storage_group( + serial_number, device_id, storagegroup_name, + volume_name, extra_specs) LOG.debug( - "RemoveMembers for volume %(volumeName)s completed " - "successfully.", {'volumeName': volumeName}) + "RemoveMembers for volume %(volume_name)s completed " + "successfully.", {'volume_name': volume_name}) - volumeInstanceNames = self.get_devices_from_storage_group( - conn, storageGroupInstanceName) - LOG.debug( - "End: number of volumes in masking storage group: %(numVol)d.", - {'numVol': len(volumeInstanceNames)}) + num_vol_in_sg = self.rest.get_num_vols_in_sg( + serial_number, storagegroup_name) + LOG.debug("There are %(num_vol)d volumes remaining in the storage " + "group %(sg_name)s." % + {'num_vol': num_vol_in_sg, + 'sg_name': storagegroup_name}) + + def _delete_cascaded_storage_groups(self, serial_number, child_sg_name, + parent_sg_name): + """Delete a child and parent storage groups. + + :param serial_number: the array serial number + :param child_sg_name: the child storage group name + :param parent_sg_name: the parent storage group name + """ + self.rest.delete_storage_group(serial_number, parent_sg_name) + self.rest.delete_storage_group(serial_number, child_sg_name) + + LOG.debug("Storage Groups %(storagegroup_name)s and %(parent)s " + "successfully deleted.", + {'storagegroup_name': child_sg_name, + 'parent': parent_sg_name}) def _delete_mv_ig_and_sg( - self, conn, controllerConfigService, mvInstanceName, - maskingViewName, storageGroupInstanceName, storageGroupName, - volumeInstance, volumeName, extraSpecs, mv_count): - """Delete the Masking view, the storage Group and the initiator group. + self, serial_number, masking_view, storagegroup_name, + parent_sg_name): + """Delete the masking view, storage groups and initiator group. - :param conn: connection to the ecom server - :param controllerConfigService: the controller configuration service - :param mvInstanceName: masking view instance name - :param maskingViewName: masking view name - :param storageGroupInstanceName: storage group instance name - :param maskingViewName: masking view name - :param volumeInstance: the volume Instance - :param volumeName: the volume name - :param extraSpecs: extra specs - :param mv_count: number of masking views + :param serial_number: array serial number + :param masking_view: masking view name + :param storagegroup_name: storage group name + :param parent_sg_name: the parent storage group name """ - isV3 = extraSpecs[ISV3] - fastPolicyName = extraSpecs.get(FASTPOLICY, None) - host = maskingViewName.split("-")[1] + host = masking_view.split("-")[1] - storageSystemInstanceName = self.utils.find_storage_system( - conn, controllerConfigService) - initiatorGroupInstanceName = ( - self.get_initiator_group_from_masking_view(conn, mvInstanceName)) - self._last_volume_delete_masking_view( - conn, controllerConfigService, mvInstanceName, - maskingViewName, extraSpecs) - initiatorGroupInstance = conn.GetInstance(initiatorGroupInstanceName) - if initiatorGroupInstance: - initiatorGroupName = initiatorGroupInstance['ElementName'] + initiatorgroup = self.rest.get_element_from_masking_view( + serial_number, masking_view, host=True) + self._last_volume_delete_masking_view(serial_number, masking_view) + self._last_volume_delete_initiator_group( + serial_number, initiatorgroup, host) + self._delete_cascaded_storage_groups(serial_number, storagegroup_name, + parent_sg_name) - @coordination.synchronized('emc-ig-{initiatorGroupName}') - def inner_do_delete_initiator_group(initiatorGroupName): - self._last_volume_delete_initiator_group( - conn, controllerConfigService, - initiatorGroupInstanceName, extraSpecs, host) - inner_do_delete_initiator_group(initiatorGroupName) - if not isV3: - isTieringPolicySupported, tierPolicyServiceInstanceName = ( - self._get_tiering_info(conn, storageSystemInstanceName, - fastPolicyName)) - self._get_and_remove_rule_association( - conn, fastPolicyName, - isTieringPolicySupported, - tierPolicyServiceInstanceName, - storageSystemInstanceName['Name'], - storageGroupInstanceName, extraSpecs) - - if mv_count == 1: - if self._is_volume_in_storage_group( - conn, storageGroupInstanceName, - volumeInstance, storageGroupName): - self._remove_last_vol_and_delete_sg( - conn, controllerConfigService, storageGroupInstanceName, - storageGroupName, volumeInstance.path, volumeName, - extraSpecs) - - LOG.debug( - "Volume %(volumeName)s successfully removed from SG and " - "Storage Group %(storageGroupName)s successfully deleted. ", - {'volumeName': volumeName, - 'storageGroupName': storageGroupName}) - - def _return_back_to_default_sg( - self, conn, controllerConfigService, volumeInstance, volumeName, - extraSpecs): - """Return volume to default storage group - - Moving the volume to the default SG for VMAX3 and - FAST for VMAX2. - - :param conn: connection to the ecom server - :param controllerConfigService: the controller configuration service - :param volumeInstance: the volume Instance - :param volumeName: the volume name - :param extraSpecs: extra specs - """ - # Add it back to the default storage group. - if extraSpecs[ISV3]: - self.return_volume_to_default_storage_group_v3( - conn, controllerConfigService, - volumeInstance, volumeName, extraSpecs) - else: - # V2 if FAST POLICY enabled, move the volume to the default - # SG. - fastPolicyName = extraSpecs.get(FASTPOLICY, None) - storageSystemInstanceName = self.utils.find_storage_system( - conn, controllerConfigService) - isTieringPolicySupported, __ = ( - self._get_tiering_info(conn, storageSystemInstanceName, - fastPolicyName)) - if fastPolicyName is not None and isTieringPolicySupported: - self._cleanup_tiering( - conn, controllerConfigService, fastPolicyName, - volumeInstance, volumeName, extraSpecs) - - def _get_sg_associated_with_connector( - self, conn, controllerConfigService, volumeInstanceName, - volumeName, connector): - """Get storage group associated with connector. - - If the connector gets passed then extra logic required to - get storage group. - - :param conn: the ecom connection - :param controllerConfigService: storage system instance name - :param volumeInstanceName: the volume instance name - :param volumeName: the volume name (String) - :param connector: the connector object - :returns: storageGroupInstanceName(can be None) - """ - return self._get_sg_or_mv_associated_with_initiator( - conn, controllerConfigService, volumeInstanceName, - volumeName, connector, True) - - def _get_tiering_info( - self, conn, storageSystemInstanceName, fastPolicyName): - """Get tiering specifics. - - :param conn: the ecom connection - :param storageSystemInstanceName: storage system instance name - :param fastPolicyName: - :returns: boolean -- isTieringPolicySupported - :returns: tierPolicyServiceInstanceName - """ - isTieringPolicySupported = False - tierPolicyServiceInstanceName = None - if fastPolicyName is not None: - tierPolicyServiceInstanceName = self.utils.get_tier_policy_service( - conn, storageSystemInstanceName) - - isTieringPolicySupported = self.fast.is_tiering_policy_enabled( - conn, tierPolicyServiceInstanceName) - LOG.debug( - "FAST policy enabled on %(storageSystem)s: %(isSupported)s", - {'storageSystem': storageSystemInstanceName, - 'isSupported': isTieringPolicySupported}) - - return isTieringPolicySupported, tierPolicyServiceInstanceName - - def _last_volume_delete_masking_view( - self, conn, controllerConfigService, mvInstanceName, - maskingViewName, extraSpecs): + def _last_volume_delete_masking_view(self, serial_number, masking_view): """Delete the masking view. Delete the masking view if the volume is the last one in the storage group. - - :param conn: the ecom connection - :param controllerConfigService: controller config service - :param mvInstanceName: masking view instance name - :param maskingViewName: masking view name - :param extraSpecs: extra specifications + :param serial_number: the array serial number + :param masking_view: masking view name """ - LOG.debug( - "Last volume in the storage group, deleting masking view " - "%(maskingViewName)s.", - {'maskingViewName': maskingViewName}) - self._delete_masking_view( - conn, controllerConfigService, maskingViewName, - mvInstanceName, extraSpecs) + LOG.debug("Last volume in the storage group, deleting masking view " + "%(maskingview_name)s.", {'maskingview_name': masking_view}) + self.rest.delete_masking_view(serial_number, masking_view) + LOG.info("Masking view %(maskingview)s successfully deleted.", + {'maskingview': masking_view}) - mvInstance = self.utils.get_existing_instance( - conn, mvInstanceName) - if mvInstance: - exceptionMessage = (_( - "Masking view %(maskingViewName)s " - "was not deleted successfully") % - {'maskingViewName': maskingViewName}) + def return_volume_to_default_storage_group( + self, serial_number, device_id, volume_name, extra_specs): + """Return volume to its default storage group. - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) + :param serial_number: the array serial number + :param device_id: the volume device id + :param volume_name: the volume name + :param extra_specs: the extra specifications + """ + storagegroup_name = self.get_or_create_default_storage_group( + serial_number, extra_specs[utils.SRP], extra_specs[utils.SLO], + extra_specs[utils.WORKLOAD], extra_specs) + + self._check_adding_volume_to_storage_group( + serial_number, device_id, storagegroup_name, volume_name, + extra_specs) + + def get_or_create_default_storage_group( + self, serial_number, srp, slo, workload, extra_specs): + """Get or create a default storage group. + + :param serial_number: the array serial number + :param srp: the SRP name + :param slo: the SLO + :param workload: the workload + :param extra_specs: extra specifications + :returns: storagegroup_name + :raises: VolumeBackendAPIException + """ + storagegroup, storagegroup_name = ( + self.rest.get_vmax_default_storage_group( + serial_number, srp, slo, workload)) + if storagegroup is None: + self.provision.create_storage_group( + serial_number, storagegroup_name, srp, slo, workload, + extra_specs) else: - LOG.info( - "Masking view %(maskingViewName)s successfully deleted.", - {'maskingViewName': maskingViewName}) - - def _get_and_remove_rule_association( - self, conn, fastPolicyName, isTieringPolicySupported, - tierPolicyServiceInstanceName, storageSystemName, - storageGroupInstanceName, extraSpecs): - """Remove the storage group from the policy rule. - - :param conn: the ecom connection - :param fastPolicyName: the fast policy name - :param isTieringPolicySupported: boolean - :param tierPolicyServiceInstanceName: the tier policy instance name - :param storageSystemName: storage system name - :param storageGroupInstanceName: the storage group instance name - :param extraSpecs: additional info - """ - # Disassociate storage group from FAST policy. - if fastPolicyName is not None and isTieringPolicySupported is True: - tierPolicyInstanceName = self.fast.get_tier_policy_by_name( - conn, storageSystemName, fastPolicyName) - - LOG.debug( - "Policy: %(policy)s, policy service:%(service)s, " - "masking group: %(maskingGroup)s.", - {'policy': tierPolicyInstanceName, - 'service': tierPolicyServiceInstanceName, - 'maskingGroup': storageGroupInstanceName}) - - self.fast.delete_storage_group_from_tier_policy_rule( - conn, tierPolicyServiceInstanceName, - storageGroupInstanceName, tierPolicyInstanceName, extraSpecs) - - def return_volume_to_default_storage_group_v3( - self, conn, controllerConfigurationService, - volumeInstance, volumeName, extraSpecs): - """Return volume to the default storage group in v3. - - :param conn: the ecom connection - :param controllerConfigurationService: controller config service - :param volumeInstance: volumeInstance - :param volumeName: the volume name - :param extraSpecs: additional info - :raises VolumeBackendAPIException: - """ - rep_enabled = self.utils.is_replication_enabled(extraSpecs) - isCompressionDisabled = self.utils.is_compression_disabled(extraSpecs) - storageGroupName = self.utils.get_v3_storage_group_name( - extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO], - extraSpecs[self.utils.WORKLOAD], isCompressionDisabled, - rep_enabled) - storageGroupInstanceName = self.utils.find_storage_masking_group( - conn, controllerConfigurationService, storageGroupName) - - if not storageGroupInstanceName: - storageGroupInstanceName = ( - self.provisionv3.create_storage_group_v3( - conn, controllerConfigurationService, storageGroupName, - extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO], - extraSpecs[self.utils.WORKLOAD], extraSpecs, - isCompressionDisabled)) - if not storageGroupInstanceName: - errorMessage = (_("Failed to create storage group " - "%(storageGroupName)s.") % - {'storageGroupName': storageGroupName}) - LOG.error(errorMessage) - raise exception.VolumeBackendAPIException(data=errorMessage) - - self._add_volume_to_sg_and_verify( - conn, controllerConfigurationService, - storageGroupInstanceName, volumeInstance, volumeName, - storageGroupName, extraSpecs) - - def _cleanup_tiering( - self, conn, controllerConfigService, fastPolicyName, - volumeInstance, volumeName, extraSpecs): - """Clean up tiering. - - :param conn: the ecom connection - :param controllerConfigService: the controller configuration service - :param fastPolicyName: the fast policy name - :param volumeInstance: volume instance - :param volumeName: the volume name - :param extraSpecs: additional info - """ - defaultStorageGroupInstanceName = ( - self.fast.get_policy_default_storage_group( - conn, controllerConfigService, fastPolicyName)) - volumeInstanceNames = self.get_devices_from_storage_group( - conn, defaultStorageGroupInstanceName) - LOG.debug( - "Start: number of volumes in default storage group: %(numVol)d.", - {'numVol': len(volumeInstanceNames)}) - defaultStorageGroupInstanceName = ( - self.fast.add_volume_to_default_storage_group_for_fast_policy( - conn, controllerConfigService, volumeInstance, volumeName, - fastPolicyName, extraSpecs)) - # Check default storage group number of volumes. - volumeInstanceNames = self.get_devices_from_storage_group( - conn, defaultStorageGroupInstanceName) - LOG.debug( - "End: number of volumes in default storage group: %(numVol)d.", - {'numVol': len(volumeInstanceNames)}) - - def get_target_wwns(self, conn, mvInstanceName): - """Get the DA ports wwns. - - :param conn: the ecom connection - :param mvInstanceName: masking view instance name - :returns: list -- the list of target wwns for the masking view - """ - targetWwns = [] - targetPortInstanceNames = conn.AssociatorNames( - mvInstanceName, - ResultClass='Symm_FCSCSIProtocolEndpoint') - numberOfPorts = len(targetPortInstanceNames) - if numberOfPorts <= 0: - LOG.warning("No target ports found in " - "masking view %(maskingView)s.", - {'numPorts': len(targetPortInstanceNames), - 'maskingView': mvInstanceName}) - for targetPortInstanceName in targetPortInstanceNames: - targetWwns.append(targetPortInstanceName['Name']) - return targetWwns - - def get_masking_view_by_volume(self, conn, volumeInstance, connector): - """Given volume, retrieve the masking view instance name. - - :param conn: the ecom connection - :param volumeInstance: the volume instance - :param connector: the connector object - :returns: masking view instance name - """ - - storageSystemName = volumeInstance['SystemName'] - controllerConfigService = ( - self.utils.find_controller_configuration_service( - conn, storageSystemName)) - volumeName = volumeInstance['ElementName'] - mvInstanceName = ( - self._get_sg_or_mv_associated_with_initiator( - conn, controllerConfigService, volumeInstance.path, - volumeName, connector, False)) - return mvInstanceName - - def get_masking_views_by_port_group(self, conn, portGroupInstanceName): - """Given port group, retrieve the masking view instance name. - - :param conn: the ecom connection - :param portGroupInstanceName: the instance name of the port group - :returns: masking view instance names - """ - mvInstanceNames = conn.AssociatorNames( - portGroupInstanceName, ResultClass='Symm_LunMaskingView') - return mvInstanceNames - - def get_masking_views_by_initiator_group( - self, conn, initiatorGroupInstanceName): - """Given initiator group, retrieve the masking view instance name. - - Retrieve the list of masking view instances associated with the - initiator group instance name. - - :param conn: the ecom connection - :param initiatorGroupInstanceName: the instance name of the - initiator group - :returns: list of masking view instance names - """ - mvInstanceNames = conn.AssociatorNames( - initiatorGroupInstanceName, ResultClass='Symm_LunMaskingView') - return mvInstanceNames - - def get_port_group_from_masking_view(self, conn, maskingViewInstanceName): - """Get the port group in a masking view. - - :param conn: the ecom connection - :param maskingViewInstanceName: masking view instance name - :returns: portGroupInstanceName - """ - portGroupInstanceNames = conn.AssociatorNames( - maskingViewInstanceName, ResultClass='SE_TargetMaskingGroup') - if len(portGroupInstanceNames) > 0: - LOG.debug("Found port group %(pg)s in masking view %(mv)s.", - {'pg': portGroupInstanceNames[0], - 'mv': maskingViewInstanceName}) - return portGroupInstanceNames[0] - else: - LOG.warning("No port group found in masking view %(mv)s.", - {'mv': maskingViewInstanceName}) - - def get_initiator_group_from_masking_view( - self, conn, maskingViewInstanceName): - """Get initiator group in a masking view. - - :param conn: the ecom connection - :param maskingViewInstanceName: masking view instance name - :returns: initiatorGroupInstanceName or None if it is not found - """ - initiatorGroupInstanceNames = conn.AssociatorNames( - maskingViewInstanceName, ResultClass='SE_InitiatorMaskingGroup') - if len(initiatorGroupInstanceNames) > 0: - LOG.debug("Found initiator group %(ig)s in masking view %(mv)s.", - {'ig': initiatorGroupInstanceNames[0], - 'mv': maskingViewInstanceName}) - return initiatorGroupInstanceNames[0] - else: - LOG.warning("No Initiator group found in masking view " - "%(mv)s.", {'mv': maskingViewInstanceName}) - - def _get_sg_or_mv_associated_with_initiator( - self, conn, controllerConfigService, volumeInstanceName, - volumeName, connector, getSG=True): - """Get storage group or masking view associated with connector. - - If the connector gets passed then extra logic required to - get storage group. - - :param conn: the ecom connection - :param controllerConfigService: storage system instance name - :param volumeInstanceName: volume instance name - :param volumeName: volume element name - :param connector: the connector object - :param getSG: True if to get storage group; otherwise get masking - :returns: foundInstanceName(can be None) - """ - foundInstanceName = None - initiatorNames = self._find_initiator_names(conn, connector) - igInstanceNameFromConnector = self._find_initiator_masking_group( - conn, controllerConfigService, initiatorNames) - # Device can be shared by multi-SGs in a multi-host attach case. - storageGroupInstanceNames = ( - self.get_associated_masking_groups_from_device( - conn, volumeInstanceName)) - LOG.debug("Found storage groups volume " - "%(volumeName)s is in: %(storageGroups)s", - {'volumeName': volumeName, - 'storageGroups': storageGroupInstanceNames}) - if storageGroupInstanceNames: # not empty - # Get the SG by IGs. - for sgInstanceName in storageGroupInstanceNames: - # Get maskingview from storage group. - mvInstanceNames = self.get_masking_view_from_storage_group( - conn, sgInstanceName) - # Get initiator group from masking view. - for mvInstanceName in mvInstanceNames: - host = self.utils.get_host_short_name(connector['host']) - mvInstance = conn.GetInstance(mvInstanceName) - if host not in mvInstance['ElementName']: - LOG.info( - "Check 1: Connector host %(connHost)s " - "does not match mv host %(mvHost)s. Skipping...", - {'connHost': host, - 'mvHost': mvInstance['ElementName']}) - continue - LOG.debug("Found masking view associated with SG " - "%(storageGroup)s: %(maskingview)s", - {'maskingview': mvInstanceName, - 'storageGroup': sgInstanceName}) - igInstanceName = ( - self.get_initiator_group_from_masking_view( - conn, mvInstanceName)) - LOG.debug("Initiator Group in masking view %(ig)s: " - "IG associated with connector " - "%(igFromConnector)s.", - {'ig': igInstanceName, - 'igFromConnector': igInstanceNameFromConnector}) - if igInstanceName == igInstanceNameFromConnector: - if getSG is True: - foundInstanceName = sgInstanceName - LOG.debug("Found the storage group associated " - "with initiator %(initiator)s: " - "%(storageGroup)s", - {'initiator': initiatorNames, - 'storageGroup': foundInstanceName}) - else: - foundInstanceName = mvInstanceName - LOG.debug("Found the masking view associated with " - "initiator %(initiator)s: " - "%(maskingview)s.", - {'initiator': initiatorNames, - 'maskingview': foundInstanceName}) - - break - return foundInstanceName - - def _remove_last_vol_and_delete_sg(self, conn, controllerConfigService, - storageGroupInstanceName, - storageGroupName, volumeInstanceName, - volumeName, extraSpecs): - """Remove the last volume and delete the storage group - - :param conn: the ecom connection - :param controllerConfigService: controller config service - :param storageGroupInstanceName: storage group instance name - :param storageGroupName: storage group name - :param volumeInstanceName: volume instance name - :param volumeName: volume name - :param extrSpecs: additional info - """ - self.provision.remove_device_from_storage_group( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstanceName, volumeName, extraSpecs) - - LOG.debug( - "Remove the last volume %(volumeName)s completed " - "successfully.", - {'volumeName': volumeName}) - - # Delete storage group. - self.delete_storage_group(conn, controllerConfigService, - storageGroupInstanceName, extraSpecs) - - def delete_storage_group(self, conn, controllerConfigService, - storageGroupInstanceName, extraSpecs): - """Delete a given storage group. - - :param conn: connection to the ecom server - :param controllerConfigService: controller config service - :param storageGroupInstanceName: the storage group instance - :param extraSpecs: the extra specifications - """ - # Delete storage group. - self._delete_storage_group(conn, controllerConfigService, - storageGroupInstanceName, extraSpecs) - storageGroupInstance = self.utils.get_existing_instance( - conn, storageGroupInstanceName) - if storageGroupInstance: - exceptionMessage = ( - _("Storage group %(storageGroupName)s " - "was not deleted successfully") - % {'storageGroupName': storageGroupInstanceName}) - - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - else: - LOG.debug("Storage Group successfully deleted.") - - def _delete_storage_group(self, conn, controllerConfigService, - storageGroupInstanceName, extraSpecs): - """Delete empty storage group - - :param conn: the ecom connection - :param controllerConfigService: controller config service - :param storageGroupInstanceName: storage group instance name - :param extraSpecs: extra specifications - """ - rc, job = conn.InvokeMethod( - 'DeleteGroup', - controllerConfigService, - MaskingGroup=storageGroupInstanceName, - Force=True) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Deleting Group: %(storageGroupName)s. " - "Return code: %(rc)lu. Error: %(error)s") - % {'storageGroupName': storageGroupInstanceName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) + # Check that SG is not part of a masking view + LOG.info("Using existing default storage group") + masking_views = self.rest.get_masking_views_from_storage_group( + serial_number, storagegroup_name) + if masking_views: + exception_message = (_( + "Default storage group %(sg_name)s is part of masking " + "views %(mvs)s. Please remove it from all masking views") + % {'sg_name': storagegroup_name, 'mvs': masking_views}) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( - data=exceptionMessage) + data=exception_message) - def _delete_initiator_group(self, conn, controllerConfigService, - initiatorGroupInstanceName, initiatorGroupName, - extraSpecs): - """Delete an initiatorGroup. + return storagegroup_name - :param conn - connection to the ecom server - :param controllerConfigService - controller config service - :param initiatorGroupInstanceName - the initiator group instance name - :param initiatorGroupName - initiator group name - :param extraSpecs: extra specifications - """ + def _remove_last_vol_and_delete_sg( + self, serial_number, device_id, volume_name, + storagegroup_name, extra_specs, parent_sg_name=None): + """Remove the last volume and delete the storage group. - rc, job = conn.InvokeMethod( - 'DeleteGroup', - controllerConfigService, - MaskingGroup=initiatorGroupInstanceName, - Force=True) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Deleting Initiator Group: %(initiatorGroupName)s. " - "Return code: %(rc)lu. Error: %(error)s") - % {'initiatorGroupName': initiatorGroupName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - else: - LOG.debug("Initiator group %(initiatorGroupName)s " - "is successfully deleted.", - {'initiatorGroupName': initiatorGroupName}) - else: - LOG.debug("Initiator group %(initiatorGroupName)s " - "is successfully deleted.", - {'initiatorGroupName': initiatorGroupName}) - - def _delete_storage_hardware_id(self, - conn, - hardwareIdManagementService, - hardwareIdPath): - """Delete given initiator path - - Delete the initiator. Do not rise exception or failure if deletion - fails due to any reasons. - - :param conn - connection to the ecom server - :param hardwareIdManagementService - hardware id management service - :param hardwareIdPath - The path of the initiator object + If the storage group is a child of another storage group, + it must be removed from the parent before deletion. + :param serial_number: the array serial number + :param device_id: the volume device id + :param volume_name: the volume name + :param storagegroup_name: the sg name + :param extra_specs: extra specifications + :param parent_sg_name: the parent sg name """ - ret = -1 - try: - ret = conn.InvokeMethod('DeleteStorageHardwareID', - hardwareIdManagementService, - HardwareID = hardwareIdPath) - except Exception: - pass - if ret == 0: - LOG.debug("Deletion of initiator path %(hardwareIdPath)s " - "is successful.", {'hardwareIdPath': hardwareIdPath}) - else: - LOG.debug("Deletion of initiator path %(hardwareIdPath)s " - "failed.", {'hardwareIdPath': hardwareIdPath}) + self._remove_vol_from_storage_group( + serial_number, device_id, storagegroup_name, volume_name, + extra_specs) - def _delete_initiators_from_initiator_group(self, conn, - controllerConfigService, - initiatorGroupInstanceName, - initiatorGroupName): - """Delete initiators + LOG.debug("Remove the last volume %(volumeName)s completed " + "successfully.", {'volumeName': volume_name}) + if parent_sg_name: + self.rest.remove_child_sg_from_parent_sg( + serial_number, storagegroup_name, parent_sg_name, + extra_specs) - Delete all initiators associated with the initiator group instance. - Cleanup whatever is possible. It will not return any failure or - rise exception if deletion fails due to any reasons. - - :param conn - connection to the ecom server - :param controllerConfigService - controller config service - :param initiatorGroupInstanceName - the initiator group instance name - """ - storageHardwareIdInstanceNames = ( - conn.AssociatorNames(initiatorGroupInstanceName, - ResultClass='SE_StorageHardwareID')) - if len(storageHardwareIdInstanceNames) == 0: - LOG.debug("No initiators found in Initiator group " - "%(initiatorGroupName)s.", - {'initiatorGroupName': initiatorGroupName}) - return - storageSystemName = controllerConfigService['SystemName'] - hardwareIdManagementService = ( - self.utils.find_storage_hardwareid_service(conn, - storageSystemName)) - for storageHardwareIdInstanceName in storageHardwareIdInstanceNames: - initiatorName = storageHardwareIdInstanceName['InstanceID'] - hardwareIdPath = storageHardwareIdInstanceName - LOG.debug("Initiator %(initiatorName)s " - "will be deleted from the Initiator group " - "%(initiatorGroupName)s. HardwareIdPath is " - "%(hardwareIdPath)s.", - {'initiatorName': initiatorName, - 'initiatorGroupName': initiatorGroupName, - 'hardwareIdPath': hardwareIdPath}) - self._delete_storage_hardware_id(conn, - hardwareIdManagementService, - hardwareIdPath) + self.rest.delete_storage_group(serial_number, storagegroup_name) def _last_volume_delete_initiator_group( - self, conn, controllerConfigService, - initiatorGroupInstanceName, extraSpecs, host=None): + self, serial_number, initiatorgroup_name, host): """Delete the initiator group. Delete the Initiator group if it has been created by the VMAX driver, and if there are no masking views associated with it. - - :param conn: the ecom connection - :param controllerConfigService: controller config service - :param igInstanceNames: initiator group instance name - :param extraSpecs: extra specifications + :param serial_number: the array serial number + :param initiatorgroup_name: initiator group name :param host: the short name of the host """ - defaultInitiatorGroupName = None - initiatorGroupInstance = conn.GetInstance(initiatorGroupInstanceName) - initiatorGroupName = initiatorGroupInstance['ElementName'] protocol = self.utils.get_short_protocol_type(self.protocol) - if host: - defaultInitiatorGroupName = (( - "OS-%(shortHostName)s-%(protocol)s-IG" - % {'shortHostName': host, - 'protocol': protocol})) - if initiatorGroupName == defaultInitiatorGroupName: - maskingViewInstanceNames = ( - self.get_masking_views_by_initiator_group( - conn, initiatorGroupInstanceName)) - if len(maskingViewInstanceNames) == 0: + default_ig_name = ("OS-%(shortHostName)s-%(protocol)s-IG" + % {'shortHostName': host, + 'protocol': protocol}) + + if initiatorgroup_name == default_ig_name: + maskingview_names = ( + self.rest.get_masking_views_by_initiator_group( + serial_number, initiatorgroup_name)) + if not maskingview_names: LOG.debug( "Last volume associated with the initiator group - " "deleting the associated initiator group " - "%(initiatorGroupName)s.", - {'initiatorGroupName': initiatorGroupName}) - self._delete_initiators_from_initiator_group( - conn, controllerConfigService, initiatorGroupInstanceName, - initiatorGroupName) - self._delete_initiator_group(conn, controllerConfigService, - initiatorGroupInstanceName, - initiatorGroupName, extraSpecs) + "%(initiatorgroup_name)s.", + {'initiatorgroup_name': initiatorgroup_name}) + self.rest.delete_initiator_group( + serial_number, initiatorgroup_name) else: - LOG.warning("Initiator group %(initiatorGroupName)s is " - "associated with masking views and can't be " - "deleted. Number of associated masking view " - "is: %(nmv)d.", - {'initiatorGroupName': initiatorGroupName, - 'nmv': len(maskingViewInstanceNames)}) + LOG.warning("Initiator group %(ig_name)s is associated with " + "masking views and can't be deleted. Number of " + "associated masking view is: %(nmv)d.", + {'ig_name': initiatorgroup_name, + 'nmv': len(maskingview_names)}) else: - LOG.warning("Initiator group %(initiatorGroupName)s was " + LOG.warning("Initiator group %(ig_name)s was " "not created by the VMAX driver so will " "not be deleted by the VMAX driver.", - {'initiatorGroupName': initiatorGroupName}) - - def _create_hardware_ids( - self, conn, initiatorNames, storageSystemName): - """Create hardwareIds for initiator(s). - - :param conn: the connection to the ecom server - :param initiatorNames: the list of initiator names - :param storageSystemName: the storage system name - :returns: list -- foundHardwareIDsInstanceNames - """ - foundHardwareIDsInstanceNames = [] - - hardwareIdManagementService = ( - self.utils.find_storage_hardwareid_service( - conn, storageSystemName)) - for initiatorName in initiatorNames: - hardwareIdInstanceName = ( - self.utils.create_storage_hardwareId_instance_name( - conn, hardwareIdManagementService, initiatorName)) - LOG.debug( - "Created hardwareId Instance: %(hardwareIdInstanceName)s.", - {'hardwareIdInstanceName': hardwareIdInstanceName}) - foundHardwareIDsInstanceNames.append(hardwareIdInstanceName) - - return foundHardwareIDsInstanceNames - - def _get_port_group_name_from_mv(self, conn, maskingViewName, - storageSystemName): - """Get the port group name from the masking view. - - :param conn: the connection to the ecom server - :param maskingViewName: the masking view name - :param storageSystemName: the storage system name - :returns: String - port group name - String - error message - """ - errorMessage = None - portGroupName = None - portGroupInstanceName = ( - self._get_port_group_from_masking_view( - conn, maskingViewName, storageSystemName)) - if portGroupInstanceName is None: - errorMessage = ("Cannot get port group from masking view: " - "%(maskingViewName)s." % - {'maskingViewName': maskingViewName}) - LOG.error(errorMessage) - else: - try: - portGroupInstance = ( - conn.GetInstance(portGroupInstanceName)) - portGroupName = ( - portGroupInstance['ElementName']) - except Exception: - errorMessage = ("Cannot get port group name.") - LOG.error(errorMessage) - return portGroupName, errorMessage - - @coordination.synchronized('emc-sg-' - '{storageGroupName}') - def remove_device_from_storage_group( - self, conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, storageGroupName, extraSpecs): - """Remove a device from a storage group. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller config service - :param storageGroupInstanceName: the sg instance - :param volumeInstance: the volume instance - :param extraSpecs: the extra specifications - """ - return self.provision.remove_device_from_storage_group( - conn, controllerConfigService, storageGroupInstanceName, - volumeInstance, volumeName, extraSpecs) + {'ig_name': initiatorgroup_name}) diff --git a/cinder/volume/drivers/dell_emc/vmax/provision.py b/cinder/volume/drivers/dell_emc/vmax/provision.py index 27cfb9b4bdd..bbdb02797b9 100644 --- a/cinder/volume/drivers/dell_emc/vmax/provision.py +++ b/cinder/volume/drivers/dell_emc/vmax/provision.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. +# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -12,1069 +12,359 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + import time from oslo_log import log as logging -import six from cinder import coordination -from cinder import exception -from cinder.i18n import _ from cinder.volume.drivers.dell_emc.vmax import utils - LOG = logging.getLogger(__name__) -STORAGEGROUPTYPE = 4 -POSTGROUPTYPE = 3 - -EMC_ROOT = 'root/emc' -THINPROVISIONINGCOMPOSITE = 32768 -THINPROVISIONING = 5 -SYNC_CLONE_LOCAL = 10 -COPY_ON_WRITE = 6 -TF_CLONE = 8 - class VMAXProvision(object): - """Provisioning Class for SMI-S based EMC volume drivers. + """Provisioning Class for Dell EMC VMAX volume drivers. - This Provisioning class is for EMC volume drivers based on SMI-S. It supports VMAX arrays. """ - def __init__(self, prtcl): - self.protocol = prtcl - self.utils = utils.VMAXUtils(prtcl) + def __init__(self, rest): + self.utils = utils.VMAXUtils() + self.rest = rest - def delete_volume_from_pool( - self, conn, storageConfigservice, volumeInstanceName, volumeName, - extraSpecs): - """Given the volume instance remove it from the pool. + def create_storage_group( + self, array, storagegroup_name, srp, slo, workload, extra_specs): + """Create a new storage group. - :param conn: connection to the ecom server - :param storageConfigservice: volume created from job - :param volumeInstanceName: the volume instance name - :param volumeName: the volume name (String) - :param extraSpecs: additional info - :returns: int -- return code - :raises: VolumeBackendAPIException + :param array: the array serial number + :param storagegroup_name: the group name (String) + :param srp: the SRP (String) + :param slo: the SLO (String) + :param workload: the workload (String) + :param extra_specs: additional info + :returns: storagegroup - storage group object """ - startTime = time.time() + start_time = time.time() - if isinstance(volumeInstanceName, list): - theElements = volumeInstanceName - volumeName = 'Bulk Delete' - else: - theElements = [volumeInstanceName] + @coordination.synchronized("emc-sg-{storage_group}") + def do_create_storage_group(storage_group): + storagegroup = self.rest.create_storage_group( + array, storage_group, srp, slo, workload, extra_specs) - rc, job = conn.InvokeMethod( - 'ReturnElementsToStoragePool', storageConfigservice, - TheElements=theElements) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Delete Volume: %(volumeName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'volumeName': volumeName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod EMCReturnToStoragePool took: " - "%(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - return rc - - def create_volume_from_pool( - self, conn, storageConfigService, volumeName, - poolInstanceName, volumeSize, extraSpecs): - """Create the volume in the specified pool. - - :param conn: the connection information to the ecom server - :param storageConfigService: the storage configuration service - :param volumeName: the volume name (String) - :param poolInstanceName: the pool instance name to create - the dummy volume in - :param volumeSize: volume size (String) - :param extraSpecs: additional info - :returns: dict -- the volume dict - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - rc, job = conn.InvokeMethod( - 'CreateOrModifyElementFromStoragePool', - storageConfigService, ElementName=volumeName, - InPool=poolInstanceName, - ElementType=self.utils.get_num(THINPROVISIONING, '16'), - Size=self.utils.get_num(volumeSize, '64'), - EMCBindElements=False) - - LOG.debug("Create Volume: %(volumename)s Return code: %(rc)lu.", - {'volumename': volumeName, - 'rc': rc}) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Create Volume: %(volumeName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'volumeName': volumeName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - # Find the newly created volume. - volumeDict = self.get_volume_dict_from_job(conn, job['Job']) - return volumeDict, rc - - def create_and_get_storage_group(self, conn, controllerConfigService, - storageGroupName, volumeInstanceName, - extraSpecs): - """Create a storage group and return it. - - :param conn: the connection information to the ecom server - :param controllerConfigService: the controller configuration service - :param storageGroupName: the storage group name (String - :param volumeInstanceName: the volume instance name - :param extraSpecs: additional info - :returns: foundStorageGroupInstanceName - instance name of the - default storage group - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - @coordination.synchronized("emc-sg-{storageGroupName}") - def do_create_storage_group(storageGroupName): - rc, job = conn.InvokeMethod( - 'CreateGroup', controllerConfigService, - GroupName=storageGroupName, - Type=self.utils.get_num(STORAGEGROUPTYPE, '16'), - Members=[volumeInstanceName]) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Create Group: %(groupName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'groupName': storageGroupName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateGroup " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, + LOG.debug("Create storage group took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(start_time, time.time())}) - foundStorageGroupInstanceName = self._find_new_storage_group( - conn, job, storageGroupName) + LOG.info("Storage group %(sg)s created successfully.", + {'sg': storagegroup_name}) + return storagegroup - return foundStorageGroupInstanceName - return do_create_storage_group(storageGroupName) + return do_create_storage_group(storagegroup_name) - def create_storage_group_no_members( - self, conn, controllerConfigService, groupName, extraSpecs): - """Create a new storage group that has no members. + def create_volume_from_sg(self, array, volume_name, storagegroup_name, + volume_size, extra_specs): + """Create a new volume in the given storage group. - :param conn: connection to the ecom server - :param controllerConfigService: the controller configuration service - :param groupName: the proposed group name - :param extraSpecs: additional info - :returns: foundStorageGroupInstanceName - :raises: VolumeBackendAPIException + :param array: the array serial number + :param volume_name: the volume name (String) + :param storagegroup_name: the storage group name + :param volume_size: volume size (String) + :param extra_specs: the extra specifications + :returns: dict -- volume_dict - the volume dict """ - startTime = time.time() + @coordination.synchronized("emc-sg-{storage_group}") + def do_create_volume_from_sg(storage_group): + start_time = time.time() - rc, job = conn.InvokeMethod( - 'CreateGroup', controllerConfigService, GroupName=groupName, - Type=self.utils.get_num(STORAGEGROUPTYPE, '16'), - DeleteWhenBecomesUnassociated=False) + volume_dict = self.rest.create_volume_from_sg( + array, volume_name, storage_group, + volume_size, extra_specs) - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Create Group: %(groupName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'groupName': groupName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateGroup " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - foundStorageGroupInstanceName = self._find_new_storage_group( - conn, job, groupName) - - return foundStorageGroupInstanceName - - def _find_new_storage_group( - self, conn, maskingGroupDict, storageGroupName): - """After creating a new storage group find it and return it. - - :param conn: connection to the ecom server - :param maskingGroupDict: the maskingGroupDict dict - :param storageGroupName: storage group name (String) - :returns: maskingGroupDict['MaskingGroup'] - """ - foundStorageGroupInstanceName = None - if 'MaskingGroup' in maskingGroupDict: - foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup'] - - return foundStorageGroupInstanceName - - def get_volume_dict_from_job(self, conn, jobInstance): - """Given the jobInstance determine the volume Instance. - - :param conn: the ecom connection - :param jobInstance: the instance of a job - :returns: dict -- volumeDict - an instance of a volume - """ - associators = conn.Associators( - jobInstance, - ResultClass='EMC_StorageVolume') - volpath = associators[0].path - volumeDict = {} - volumeDict['classname'] = volpath.classname - keys = {} - keys['CreationClassName'] = volpath['CreationClassName'] - keys['SystemName'] = volpath['SystemName'] - keys['DeviceID'] = volpath['DeviceID'] - keys['SystemCreationClassName'] = volpath['SystemCreationClassName'] - volumeDict['keybindings'] = keys - - return volumeDict - - def remove_device_from_storage_group( - self, conn, controllerConfigService, sgInstanceName, - volumeInstanceName, volumeName, extraSpecs): - """Remove a volume from a storage group. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller configuration service - :param sgInstanceName: the instance name of the storage group - :param volumeInstanceName: the instance name of the volume - :param volumeName: the volume name (String) - :param extraSpecs: additional info - :returns: int -- the return code of the job - :raises: VolumeBackendAPIException - """ - - try: - storageGroupInstance = conn.GetInstance(sgInstanceName) - except Exception: - exceptionMessage = (_( - "Unable to get the name of the storage group.")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - def do_remove_volume_from_sg(): - startTime = time.time() - rc, jobDict = conn.InvokeMethod('RemoveMembers', - controllerConfigService, - MaskingGroup=sgInstanceName, - Members=[volumeInstanceName]) - if rc != 0: - rc, errorDesc = self.utils.wait_for_job_complete(conn, jobDict, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error removing volume %(vol)s from %(sg)s. " - "Error is: %(error)s.") - % {'vol': volumeName, - 'sg': storageGroupInstance['ElementName'], - 'error': errorDesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod RemoveMembers " + LOG.debug("Create volume from storage group " "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, + {'delta': self.utils.get_time_delta(start_time, time.time())}) + return volume_dict + return do_create_volume_from_sg(storagegroup_name) - return rc - return do_remove_volume_from_sg() + def delete_volume_from_srp(self, array, device_id, volume_name): + """Delete a volume from the srp. - def add_members_to_masking_group( - self, conn, controllerConfigService, storageGroupInstanceName, - volumeInstanceName, volumeName, extraSpecs): - """Add a member to a masking group group. - - :param conn: the connection to the ecom server - :param controllerConfigService: the controller configuration service - :param storageGroupInstanceName: the instance name of the storage group - :param volumeInstanceName: the instance name of the volume - :param volumeName: the volume name (String) - :param extraSpecs: additional info - :raises: VolumeBackendAPIException + :param array: the array serial number + :param device_id: the volume device id + :param volume_name: the volume name """ + start_time = time.time() + LOG.debug("Delete volume %(volume_name)s from srp.", + {'volume_name': volume_name}) + self.rest.delete_volume(array, device_id) + LOG.debug("Delete volume took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta( + start_time, time.time())}) + + def create_volume_snapvx(self, array, source_device_id, + snap_name, extra_specs): + """Create a snapVx of a volume. + + :param array: the array serial number + :param source_device_id: source volume device id + :param snap_name: the snapshot name + :param extra_specs: the extra specifications + """ + start_time = time.time() + LOG.debug("Create Snap Vx snapshot of: %(source)s.", + {'source': source_device_id}) + self.rest.create_volume_snap( + array, snap_name, source_device_id, extra_specs) + LOG.debug("Create volume snapVx took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(start_time, + time.time())}) + + def create_volume_replica( + self, array, source_device_id, target_device_id, + snap_name, extra_specs, create_snap=False): + """Create a snap vx of a source and copy to a target. + + :param array: the array serial number + :param source_device_id: source volume device id + :param target_device_id: target volume device id + :param snap_name: the name for the snap shot + :param extra_specs: extra specifications + :param create_snap: Flag for create snapvx + """ + start_time = time.time() + if create_snap: + self.create_volume_snapvx(array, source_device_id, + snap_name, extra_specs) + # Link source to target + self.rest.modify_volume_snap( + array, source_device_id, target_device_id, snap_name, + extra_specs, link=True) + + LOG.debug("Create element replica took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(start_time, + time.time())}) + + def break_replication_relationship( + self, array, target_device_id, source_device_id, snap_name, + extra_specs, wait_for_sync=True): + """Unlink a snapshot from its target volume. + + :param array: the array serial number + :param source_device_id: source volume device id + :param target_device_id: target volume device id + :param snap_name: the name for the snap shot + :param extra_specs: extra specifications + :param wait_for_sync: flag for wait for sync + """ + LOG.debug("Break snap vx link relationship between: %(src)s " + "and: %(tgt)s.", + {'src': source_device_id, 'tgt': target_device_id}) + + if wait_for_sync: + self.rest.is_sync_complete(array, source_device_id, + target_device_id, snap_name, + extra_specs) try: - storageGroupInstance = conn.GetInstance(storageGroupInstanceName) - except Exception: - exceptionMessage = (_( - "Unable to get the name of the storage group.")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) + self.rest.modify_volume_snap( + array, source_device_id, target_device_id, snap_name, + extra_specs, unlink=True) + except Exception as e: + LOG.error( + "Error modifying volume snap. Exception received: %(e)s.", + {'e': e}) - @coordination.synchronized('emc-sg-' - '{storageGroupInstance[ElementName]}') - def do_add_volume_to_sg(storageGroupInstance): - startTime = time.time() - rc, job = conn.InvokeMethod( - 'AddMembers', controllerConfigService, - MaskingGroup=storageGroupInstanceName, - Members=[volumeInstanceName]) + def delete_volume_snap(self, array, snap_name, source_device_id): + """Delete a snapVx snapshot of a volume. - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error adding volume %(vol)s to %(sg)s. %(error)s.") - % {'vol': volumeName, - 'sg': storageGroupInstance['ElementName'], - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod AddMembers " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - return rc - return do_add_volume_to_sg(storageGroupInstance) - - def unbind_volume_from_storage_pool( - self, conn, storageConfigService, - volumeInstanceName, volumeName, extraSpecs): - """Unbind a volume from a pool and return the unbound volume. - - :param conn: the connection information to the ecom server - :param storageConfigService: the storage configuration service - instance name - :param volumeInstanceName: the volume instance name - :param volumeName: the volume name - :param extraSpecs: additional info - :returns: int -- return code - :returns: the job object - :raises: VolumeBackendAPIException + :param array: the array serial number + :param snap_name: the snapshot name + :param source_device_id: the source device id """ - startTime = time.time() + LOG.debug("Delete SnapVx: %(snap_name)s for volume %(vol)s.", + {'vol': source_device_id, 'snap_name': snap_name}) + self.rest.delete_volume_snap(array, snap_name, source_device_id) - rc, job = conn.InvokeMethod( - 'EMCUnBindElement', - storageConfigService, - TheElement=volumeInstanceName) + def delete_temp_volume_snap(self, array, snap_name, source_device_id): + """Delete the temporary snapshot created for clone operations. - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error unbinding volume %(vol)s from pool. %(error)s.") - % {'vol': volumeName, 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) + There can be instances where the source and target both attempt to + delete a temp snapshot simultaneously, so we must lock the snap and + then double check it is on the array. + :param array: the array serial number + :param snap_name: the snapshot name + :param source_device_id: the source device id + """ - LOG.debug("InvokeMethod EMCUnBindElement " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, + @coordination.synchronized("emc-snapvx-{snapvx_name}") + def do_delete_temp_snap(snapvx_name): + # Ensure snap has not been recently deleted + if self.rest.get_volume_snap( + array, source_device_id, snapvx_name): + self.delete_volume_snap(array, snapvx_name, source_device_id) + + do_delete_temp_snap(snap_name) + + def delete_volume_snap_check_for_links(self, array, snap_name, + source_device, extra_specs): + """Check if a snap has any links before deletion. + + If a snapshot has any links, break the replication relationship + before deletion. + :param array: the array serial number + :param snap_name: the snapshot name + :param source_device: the source device id + :param extra_specs: the extra specifications + """ + LOG.debug("Check for linked devices to SnapVx: %(snap_name)s " + "for volume %(vol)s.", + {'vol': source_device, 'snap_name': snap_name}) + linked_list = self.rest.get_snap_linked_device_list( + array, source_device, snap_name) + for link in linked_list: + target_device = link['targetDevice'] + self.break_replication_relationship( + array, target_device, source_device, snap_name, extra_specs) + self.delete_volume_snap(array, snap_name, source_device) + + def extend_volume(self, array, device_id, new_size, extra_specs): + """Extend a volume. + + :param array: the array serial number + :param device_id: the volume device id + :param new_size: the new size (GB) + :param extra_specs: the extra specifications + :return: status_code + """ + start_time = time.time() + self.rest.extend_volume(array, device_id, new_size, extra_specs) + LOG.debug("Extend VMAX volume took: %(delta)s H:MM:SS.", + {'delta': self.utils.get_time_delta(start_time, time.time())}) - return rc, job + def get_srp_pool_stats(self, array, array_info): + """Get the srp capacity stats. - def modify_composite_volume( - self, conn, elementCompositionService, theVolumeInstanceName, - inVolumeInstanceName, extraSpecs): - - """Given a composite volume add a storage volume to it. - - :param conn: the connection to the ecom - :param elementCompositionService: the element composition service - :param theVolumeInstanceName: the existing composite volume - :param inVolumeInstanceName: the volume you wish to add to the - composite volume - :param extraSpecs: additional info - :returns: int -- rc - return code - :returns: the job object - :raises: VolumeBackendAPIException + :param array: the array serial number + :param array_info: the array dict + :returns: total_capacity_gb + :returns: remaining_capacity_gb + :returns: subscribed_capacity_gb + :returns: array_reserve_percent + :returns: wlp_enabled """ - startTime = time.time() - - rc, job = conn.InvokeMethod( - 'CreateOrModifyCompositeElement', - elementCompositionService, - TheElement=theVolumeInstanceName, - InElements=[inVolumeInstanceName]) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error adding volume to composite volume. " - "Error is: %(error)s.") - % {'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateOrModifyCompositeElement " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - return rc, job - - def create_composite_volume( - self, conn, elementCompositionService, volumeSize, volumeName, - poolInstanceName, compositeType, numMembers, extraSpecs): - """Create a new volume using the auto meta feature. - - :param conn: connection to the ecom server - :param elementCompositionService: the element composition service - :param volumeSize: the size of the volume - :param volumeName: user friendly name - :param poolInstanceName: the pool to bind the composite volume to - :param compositeType: the proposed composite type of the volume - e.g striped/concatenated - :param numMembers: the number of meta members to make up the composite. - If it is 1 then a non composite is created - :param extraSpecs: additional info - :returns: dict -- volumeDict - :returns: int -- return code - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - newMembers = 2 + total_capacity_gb = 0 + remaining_capacity_gb = 0 + allocated_capacity_gb = None + subscribed_capacity_gb = 0 + array_reserve_percent = 0 + wlp_enabled = False + srp = array_info['srpName'] LOG.debug( - "Parameters for CreateOrModifyCompositeElement: " - "elementCompositionService: %(elementCompositionService)s " - "provisioning: %(provisioning)lu " - "volumeSize: %(volumeSize)s " - "newMembers: %(newMembers)lu " - "poolInstanceName: %(poolInstanceName)s " - "compositeType: %(compositeType)lu " - "numMembers: %(numMembers)s.", - {'elementCompositionService': elementCompositionService, - 'provisioning': THINPROVISIONINGCOMPOSITE, - 'volumeSize': volumeSize, - 'newMembers': newMembers, - 'poolInstanceName': poolInstanceName, - 'compositeType': compositeType, - 'numMembers': numMembers}) + "Retrieving capacity for srp %(srpName)s on array %(array)s.", + {'srpName': srp, 'array': array}) - rc, job = conn.InvokeMethod( - 'CreateOrModifyCompositeElement', elementCompositionService, - ElementName=volumeName, - ElementType=self.utils.get_num(THINPROVISIONINGCOMPOSITE, '16'), - Size=self.utils.get_num(volumeSize, '64'), - ElementSource=self.utils.get_num(newMembers, '16'), - EMCInPools=[poolInstanceName], - CompositeType=self.utils.get_num(compositeType, '16'), - EMCNumberOfMembers=self.utils.get_num(numMembers, '32')) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Create Volume: %(volumename)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'volumename': volumeName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateOrModifyCompositeElement " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - # Find the newly created volume. - volumeDict = self.get_volume_dict_from_job(conn, job['Job']) - - return volumeDict, rc - - def create_new_composite_volume( - self, conn, elementCompositionService, compositeHeadInstanceName, - compositeMemberInstanceName, compositeType, extraSpecs): - """Creates a new composite volume. - - Given a bound composite head and an unbound composite member - create a new composite volume. - - :param conn: connection to the ecom server - :param elementCompositionService: the element composition service - :param compositeHeadInstanceName: the composite head. This can be bound - :param compositeMemberInstanceName: the composite member. This must be - unbound - :param compositeType: the composite type e.g striped or concatenated - :param extraSpecs: additional info - :returns: int -- return code - :returns: the job object - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - rc, job = conn.InvokeMethod( - 'CreateOrModifyCompositeElement', elementCompositionService, - ElementType=self.utils.get_num('2', '16'), - InElements=( - [compositeHeadInstanceName, compositeMemberInstanceName]), - CompositeType=self.utils.get_num(compositeType, '16')) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Creating new composite Volume Return code: " - "%(rc)lu. Error: %(error)s.") - % {'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateOrModifyCompositeElement " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - return rc, job - - def _migrate_volume( - self, conn, storageRelocationServiceInstanceName, - volumeInstanceName, targetPoolInstanceName, extraSpecs): - """Migrate a volume to another pool. - - :param conn: the connection to the ecom server - :param storageRelocationServiceInstanceName: the storage relocation - service - :param volumeInstanceName: the volume to be migrated - :param targetPoolInstanceName: the target pool to migrate the volume to - :param extraSpecs: additional info - :returns: int -- return code - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - rc, job = conn.InvokeMethod( - 'RelocateStorageVolumesToStoragePool', - storageRelocationServiceInstanceName, - TheElements=[volumeInstanceName], - TargetPool=targetPoolInstanceName) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Migrating volume from one pool to another. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - LOG.debug("InvokeMethod RelocateStorageVolumesToStoragePool " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - return rc - - def migrate_volume_to_storage_pool( - self, conn, storageRelocationServiceInstanceName, - volumeInstanceName, targetPoolInstanceName, extraSpecs): - """Given the storage system name, get the storage relocation service. - - :param conn: the connection to the ecom server - :param storageRelocationServiceInstanceName: the storage relocation - service - :param volumeInstanceName: the volume to be migrated - :param targetPoolInstanceName: the target pool to migrate the - volume to. - :param extraSpecs: additional info - :returns: int -- rc, return code - :raises: VolumeBackendAPIException - """ - LOG.debug( - "Volume instance name is %(volumeInstanceName)s. " - "Pool instance name is : %(targetPoolInstanceName)s. ", - {'volumeInstanceName': volumeInstanceName, - 'targetPoolInstanceName': targetPoolInstanceName}) - rc = -1 + srp_details = self.rest.get_srp_by_name(array, srp) + if not srp_details: + LOG.error("Unable to retrieve srp instance of %(srpName)s on " + "array %(array)s.", + {'srpName': srp, 'array': array}) + return 0, 0, 0, 0, False try: - rc = self._migrate_volume( - conn, storageRelocationServiceInstanceName, - volumeInstanceName, targetPoolInstanceName, extraSpecs) - except Exception as ex: - if 'source of a migration session' in six.text_type(ex): - try: - rc = self._terminate_migrate_session( - conn, volumeInstanceName, extraSpecs) - except Exception: - exceptionMessage = (_( - "Failed to terminate migrate session.")) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - try: - rc = self._migrate_volume( - conn, storageRelocationServiceInstanceName, - volumeInstanceName, targetPoolInstanceName, - extraSpecs) - except Exception: - exceptionMessage = (_( - "Failed to migrate volume for the second time.")) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) + total_capacity_gb = srp_details['total_usable_cap_gb'] + allocated_capacity_gb = srp_details['total_allocated_cap_gb'] + subscribed_capacity_gb = srp_details['total_subscribed_cap_gb'] + remaining_capacity_gb = float( + total_capacity_gb - allocated_capacity_gb) + array_reserve_percent = srp_details['reserved_cap_percent'] + except KeyError: + pass - else: - exceptionMessage = (_( - "Failed to migrate volume for the first time.")) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - return rc - - def _terminate_migrate_session(self, conn, volumeInstanceName, - extraSpecs): - """Given the volume instance terminate a migrate session. - - :param conn: the connection to the ecom server - :param volumeInstanceName: the volume to be migrated - :param extraSpecs: additional info - :returns: int -- return code - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - rc, job = conn.InvokeMethod( - 'RequestStateChange', volumeInstanceName, - RequestedState=self.utils.get_num(32769, '16')) - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Terminating migrate session. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod RequestStateChange " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - return rc - - def create_element_replica( - self, conn, repServiceInstanceName, cloneName, - sourceName, sourceInstance, targetInstance, extraSpecs, - copyOnWrite=False): - """Make SMI-S call to create replica for source element. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: replication service - :param cloneName: replica name - :param sourceName: source volume name - :param sourceInstance: source volume instance - :param targetInstance: the target instance - :param extraSpecs: additional info - :param copyOnWrite: optional - :returns: int -- return code - :returns: job object of the replica creation operation - :raises: VolumeBackendAPIException - """ - if copyOnWrite: - startTime = time.time() - # ReplicationType 10 - Synchronous Clone Local. - # Set DesiredCopyMethodology to Copy-On-Write (6). - rsdInstance = self.utils.set_copy_methodology_in_rsd( - conn, repServiceInstanceName, SYNC_CLONE_LOCAL, - COPY_ON_WRITE, extraSpecs) - - # SyncType 8 - Clone. - # ReplicationSettingData.DesiredCopyMethodology Copy-On-Write (6). - rc, job = conn.InvokeMethod( - 'CreateElementReplica', repServiceInstanceName, - ElementName=cloneName, - SyncType=self.utils.get_num(TF_CLONE, '16'), - ReplicationSettingData=rsdInstance, - SourceElement=sourceInstance.path) + total_slo_capacity = ( + self._get_remaining_slo_capacity_wlp( + array, srp, array_info)) + if total_slo_capacity != -1 and allocated_capacity_gb: + remaining_capacity_gb = float( + total_slo_capacity - allocated_capacity_gb) + wlp_enabled = True else: - startTime = time.time() - if targetInstance is None: - rc, job = conn.InvokeMethod( - 'CreateElementReplica', repServiceInstanceName, - ElementName=cloneName, - SyncType=self.utils.get_num(TF_CLONE, '16'), - SourceElement=sourceInstance.path) - else: - rc, job = conn.InvokeMethod( - 'CreateElementReplica', repServiceInstanceName, - ElementName=cloneName, - SyncType=self.utils.get_num(TF_CLONE, '16'), - SourceElement=sourceInstance.path, - TargetElement=targetInstance.path) + LOG.debug( + "Remaining capacity %(remaining_capacity_gb)s " + "GBs is determined from SRP capacity " + "and not the SLO capacity. Performance may " + "not be what you expect.", + {'remaining_capacity_gb': remaining_capacity_gb}) - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Create Cloned Volume: " - "Volume: %(cloneName)s Source Volume:" - "%(sourceName)s. Return code: %(rc)lu. " - "Error: %(error)s.") - % {'cloneName': cloneName, - 'sourceName': sourceName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) + return (total_capacity_gb, remaining_capacity_gb, + subscribed_capacity_gb, array_reserve_percent, wlp_enabled) - LOG.debug("InvokeMethod CreateElementReplica " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - return rc, job + def _get_remaining_slo_capacity_wlp(self, array, srp, array_info): + """Get the remaining capacity of the SLO/ workload combination. - def delete_clone_relationship( - self, conn, repServiceInstanceName, syncInstanceName, extraSpecs, - force=False): - """Deletes the relationship between the clone and source volume. - - Makes an SMI-S call to break clone relationship between the clone - volume and the source. - 8/Detach - Delete the synchronization between two storage objects. - Treat the objects as independent after the synchronization is deleted. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: instance name of the replication service - :param syncInstanceName: instance name of the - SE_StorageSynchronized_SV_SV object - :param extraSpecs: additional info - :param force: optional param - :returns: int -- return code - :returns: job object of the replica creation operation - :raises: VolumeBackendAPIException + This is derived from the WLP portion of Unisphere. Please + see the UniSphere doc and the readme doc for details. + :param array: the array serial number + :param srp: the srp name + :param array_info: array info dict + :return: remaining_capacity """ - startTime = time.time() + remaining_capacity = -1 + if array_info['SLO']: + headroom_capacity = self.rest.get_headroom_capacity( + array, srp, array_info['SLO'], array_info['Workload']) + if headroom_capacity: + remaining_capacity = headroom_capacity + LOG.debug("Received remaining SLO Capacity %(remaining)s GBs " + "for SLO %(SLO)s and workload %(workload)s.", + {'remaining': remaining_capacity, + 'SLO': array_info['SLO'], + 'workload': array_info['Workload']}) + return remaining_capacity - rc, job = conn.InvokeMethod( - 'ModifyReplicaSynchronization', repServiceInstanceName, - Operation=self.utils.get_num(8, '16'), - Synchronization=syncInstanceName, - Force=force) + def verify_slo_workload(self, array, slo, workload, srp): + """Check if SLO and workload values are valid. - LOG.debug("Delete clone relationship: Sync Name: %(syncName)s " - "Return code: %(rc)lu.", - {'syncName': syncInstanceName, - 'rc': rc}) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error break clone relationship: " - "Sync Name: %(syncName)s " - "Return code: %(rc)lu. Error: %(error)s.") - % {'syncName': syncInstanceName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod ModifyReplicaSynchronization " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - return rc, job - - def create_consistency_group( - self, conn, replicationService, consistencyGroupName, extraSpecs): - """Create a new consistency group. - - :param conn: the connection to the ecom server - :param replicationService: the replication Service - :param consistencyGroupName: the CG group name - :param extraSpecs: additional info - :returns: int -- return code - :returns: job object - :raises: VolumeBackendAPIException + :param array: the array serial number + :param slo: Service Level Object e.g bronze + :param workload: workload e.g DSS + :param srp: the storage resource pool name + :returns: boolean """ - startTime = time.time() + is_valid_slo, is_valid_workload = False, False - rc, job = conn.InvokeMethod( - 'CreateGroup', - replicationService, - GroupName=consistencyGroupName) + if workload: + if workload.lower() == 'none': + workload = None - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Failed to create consistency group: " - "%(consistencyGroupName)s " - "Return code: %(rc)lu. Error: %(error)s.") - % {'consistencyGroupName': consistencyGroupName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) + if not workload: + is_valid_workload = True - LOG.debug("InvokeMethod CreateGroup " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) + valid_slos = self.rest.get_slo_list(array) + valid_workloads = self.rest.get_workload_settings(array) + for valid_slo in valid_slos: + if slo == valid_slo: + is_valid_slo = True + break - return rc, job + for valid_workload in valid_workloads: + if workload == valid_workload: + is_valid_workload = True + break - def delete_consistency_group( - self, conn, replicationService, cgInstanceName, - consistencyGroupName, extraSpecs): + if not slo: + is_valid_slo = True + if workload: + is_valid_workload = False - """Delete a consistency group. + if not is_valid_slo: + LOG.error( + "SLO: %(slo)s is not valid. Valid values are: " + "%(valid_slos)s.", {'slo': slo, 'valid_slos': valid_slos}) - :param conn: the connection to the ecom server - :param replicationService: the replication Service - :param cgInstanceName: the CG instance name - :param consistencyGroupName: the CG group name - :param extraSpecs: additional info - :returns: int -- return code - :returns: job object - :raises: VolumeBackendAPIException - """ - startTime = time.time() + if not is_valid_workload: + LOG.error( + "Workload: %(workload)s is not valid. Valid values are " + "%(valid_workloads)s. Note you cannot " + "set a workload without an SLO.", + {'workload': workload, 'valid_workloads': valid_workloads}) - rc, job = conn.InvokeMethod( - 'DeleteGroup', - replicationService, - ReplicationGroup=cgInstanceName, - RemoveElements=True) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Failed to delete consistency group: " - "%(consistencyGroupName)s " - "Return code: %(rc)lu. Error: %(error)s.") - % {'consistencyGroupName': consistencyGroupName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod DeleteGroup " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - return rc, job - - def add_volume_to_cg( - self, conn, replicationService, cgInstanceName, - volumeInstanceName, cgName, volumeName, extraSpecs): - """Add a volume to a consistency group. - - :param conn: the connection to the ecom server - :param replicationService: the replication Service - :param cgInstanceName: the CG instance name - :param volumeInstanceName: the volume instance name - :param cgName: the CG group name - :param volumeName: the volume name - :param extraSpecs: additional info - :returns: int -- return code - :returns: job object - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - if isinstance(volumeInstanceName, list): - theElements = volumeInstanceName - volumeName = 'Bulk Add' - else: - theElements = [volumeInstanceName] - - rc, job = conn.InvokeMethod( - 'AddMembers', - replicationService, - Members=theElements, - ReplicationGroup=cgInstanceName) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Failed to add volume %(volumeName)s " - "to consistency group %(cgName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'volumeName': volumeName, - 'cgName': cgName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod AddMembers " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - return rc, job - - def remove_volume_from_cg( - self, conn, replicationService, cgInstanceName, - volumeInstanceName, cgName, volumeName, extraSpecs): - """Remove a volume from a consistency group. - - :param conn: the connection to the ecom server - :param replicationService: the replication Service - :param cgInstanceName: the CG instance name - :param volumeInstanceName: the volume instance name - :param cgName: the CG group name - :param volumeName: the volume name - :param extraSpecs: additional info - :returns: int -- return code - :returns: job object - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - if isinstance(volumeInstanceName, list): - theElements = volumeInstanceName - volumeName = 'Bulk Remove' - else: - theElements = [volumeInstanceName] - - rc, job = conn.InvokeMethod( - 'RemoveMembers', - replicationService, - Members=theElements, - ReplicationGroup=cgInstanceName) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Failed to remove volume %(volumeName)s " - "from consistency group %(cgName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'volumeName': volumeName, - 'cgName': cgName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod RemoveMembers " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - return rc, job - - def create_group_replica( - self, conn, replicationService, - srcGroupInstanceName, tgtGroupInstanceName, relationName, - extraSpecs): - """Make SMI-S call to create replica for source group. - - :param conn: the connection to the ecom server - :param replicationService: replication service - :param srcGroupInstanceName: source group instance name - :param tgtGroupInstanceName: target group instance name - :param relationName: relation name - :param extraSpecs: additional info - :returns: int -- return code - :returns: job object of the replica creation operation - :raises: VolumeBackendAPIException - """ - LOG.debug( - "Parameters for CreateGroupReplica: " - "replicationService: %(replicationService)s " - "RelationName: %(relationName)s " - "sourceGroup: %(srcGroup)s " - "targetGroup: %(tgtGroup)s.", - {'replicationService': replicationService, - 'relationName': relationName, - 'srcGroup': srcGroupInstanceName, - 'tgtGroup': tgtGroupInstanceName}) - # 8 for clone. - rc, job = conn.InvokeMethod( - 'CreateGroupReplica', - replicationService, - RelationshipName=relationName, - SourceGroup=srcGroupInstanceName, - TargetGroup=tgtGroupInstanceName, - SyncType=self.utils.get_num(8, '16'), - WaitForCopyState=self.utils.get_num(4, '16')) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMsg = (_("Error CreateGroupReplica: " - "source: %(source)s target: %(target)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'source': srcGroupInstanceName, - 'target': tgtGroupInstanceName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMsg) - raise exception.VolumeBackendAPIException(data=exceptionMsg) - return rc, job + return is_valid_slo, is_valid_workload diff --git a/cinder/volume/drivers/dell_emc/vmax/provision_v3.py b/cinder/volume/drivers/dell_emc/vmax/provision_v3.py deleted file mode 100644 index 6c312b19671..00000000000 --- a/cinder/volume/drivers/dell_emc/vmax/provision_v3.py +++ /dev/null @@ -1,1063 +0,0 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_log import log as logging -import six - -from cinder import coordination -from cinder import exception -from cinder.i18n import _ -from cinder.volume.drivers.dell_emc.vmax import utils - -LOG = logging.getLogger(__name__) - -STORAGEGROUPTYPE = 4 -POSTGROUPTYPE = 3 - -EMC_ROOT = 'root/emc' -THINPROVISIONINGCOMPOSITE = 32768 -THINPROVISIONING = 5 -INFO_SRC_V3 = 3 -ACTIVATESNAPVX = 4 -DEACTIVATESNAPVX = 19 -SNAPSYNCTYPE = 7 -RDF_FAILOVER = 10 -RDF_FAILBACK = 11 -RDF_RESYNC = 14 -RDF_SYNC_MODE = 2 -RDF_SYNCHRONIZED = 6 -RDF_FAILEDOVER = 12 - - -class VMAXProvisionV3(object): - """Provisioning Class for SMI-S based EMC volume drivers. - - This Provisioning class is for EMC volume drivers based on SMI-S. - It supports VMAX arrays. - """ - def __init__(self, prtcl): - self.protocol = prtcl - self.utils = utils.VMAXUtils(prtcl) - - def delete_volume_from_pool( - self, conn, storageConfigservice, volumeInstanceName, volumeName, - extraSpecs): - """Given the volume instance remove it from the pool. - - :param conn: connection to the ecom server - :param storageConfigservice: volume created from job - :param volumeInstanceName: the volume instance name - :param volumeName: the volume name (String) - :param extraSpecs: additional info - :returns: int -- return code - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - if isinstance(volumeInstanceName, list): - theElements = volumeInstanceName - volumeName = 'Bulk Delete' - else: - theElements = [volumeInstanceName] - - rc, job = conn.InvokeMethod( - 'ReturnElementsToStoragePool', storageConfigservice, - TheElements=theElements) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Delete Volume: %(volumeName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'volumeName': volumeName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod ReturnElementsToStoragePool took: " - "%(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - return rc - - def create_volume_from_sg( - self, conn, storageConfigService, volumeName, - sgInstanceName, volumeSize, extraSpecs): - """Create the volume and associate it with a storage group. - - We use EMCCollections parameter to supply a Device Masking Group - to contain a newly created storage volume. - - :param conn: the connection information to the ecom server - :param storageConfigService: the storage configuration service - :param volumeName: the volume name (String) - :param sgInstanceName: the storage group instance name - associated with an SLO - :param volumeSize: volume size (String) - :param extraSpecs: additional info - :returns: dict -- volumeDict - the volume dict - :returns: int -- return code - :raises: VolumeBackendAPIException - """ - try: - storageGroupInstance = conn.GetInstance(sgInstanceName) - except Exception: - exceptionMessage = (_( - "Unable to get the name of the storage group")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - sgName = storageGroupInstance['ElementName'] - - @coordination.synchronized("emc-sg-{storageGroup}") - def do_create_volume_from_sg(storageGroup): - startTime = time.time() - - rc, job = conn.InvokeMethod( - 'CreateOrModifyElementFromStoragePool', - storageConfigService, ElementName=volumeName, - EMCCollections=[sgInstanceName], - ElementType=self.utils.get_num(THINPROVISIONING, '16'), - Size=self.utils.get_num(volumeSize, '64')) - - LOG.debug("Create Volume: %(volumename)s. Return code: %(rc)lu.", - {'volumename': volumeName, - 'rc': rc}) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Create Volume: %(volumeName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'volumeName': volumeName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - # Find the newly created volume. - volumeDict = self.get_volume_dict_from_job(conn, job['Job']) - return volumeDict, rc - - return do_create_volume_from_sg(sgName) - - def _find_new_storage_group( - self, conn, maskingGroupDict, storageGroupName): - """After creating an new storage group find it and return it. - - :param conn: connection to the ecom server - :param maskingGroupDict: the maskingGroupDict dict - :param storageGroupName: storage group name (String) - :returns: maskingGroupDict['MaskingGroup'] or None - """ - foundStorageGroupInstanceName = None - if 'MaskingGroup' in maskingGroupDict: - foundStorageGroupInstanceName = maskingGroupDict['MaskingGroup'] - - return foundStorageGroupInstanceName - - def get_volume_dict_from_job(self, conn, jobInstance): - """Given the jobInstance determine the volume Instance. - - :param conn: the ecom connection - :param jobInstance: the instance of a job - :returns: dict -- volumeDict - an instance of a volume - """ - associators = conn.Associators( - jobInstance, - ResultClass='EMC_StorageVolume') - if len(associators) > 0: - return self.create_volume_dict(associators[0].path) - else: - exceptionMessage = (_( - "Unable to get storage volume from job.")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - def get_volume_from_job(self, conn, jobInstance): - """Given the jobInstance determine the volume Instance. - - :param conn: the ecom connection - :param jobInstance: the instance of a job - :returns: dict -- volumeDict - an instance of a volume - """ - associators = conn.Associators( - jobInstance, - ResultClass='EMC_StorageVolume') - if len(associators) > 0: - return associators[0] - else: - exceptionMessage = (_( - "Unable to get storage volume from job.")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - def create_volume_dict(self, volumeInstanceName): - """Create volume dictionary - - :param volumeInstanceName: the instance of a job - :returns: dict -- volumeDict - an instance of a volume - """ - volpath = volumeInstanceName - volumeDict = {} - volumeDict['classname'] = volpath.classname - keys = {} - keys['CreationClassName'] = volpath['CreationClassName'] - keys['SystemName'] = volpath['SystemName'] - keys['DeviceID'] = volpath['DeviceID'] - keys['SystemCreationClassName'] = volpath['SystemCreationClassName'] - volumeDict['keybindings'] = keys - - return volumeDict - - def get_or_create_default_sg(self, conn, extraSpecs, storageSystemName, - doDisableCompression): - """Get or create default storage group for a replica. - - :param conn: the connection to the ecom server - :param extraSpecs: the extra specifications - :param storageSystemName: the storage system name - :param doDisableCompression: flag for compression - :returns: sgInstanceName, instance of storage group - """ - pool = extraSpecs[self.utils.POOL] - slo = extraSpecs[self.utils.SLO] - workload = extraSpecs[self.utils.WORKLOAD] - storageGroupName, controllerConfigService, sgInstanceName = ( - self.utils.get_v3_default_sg_instance_name( - conn, pool, slo, workload, storageSystemName, - doDisableCompression)) - if sgInstanceName is None: - sgInstanceName = self.create_storage_group_v3( - conn, controllerConfigService, storageGroupName, - pool, slo, workload, extraSpecs, doDisableCompression) - return sgInstanceName - - def create_element_replica( - self, conn, repServiceInstanceName, - cloneName, syncType, sourceInstance, extraSpecs, - targetInstance=None, rsdInstance=None, copyState=None): - """Make SMI-S call to create replica for source element. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: replication service - :param cloneName: clone volume name - :param syncType: 7=snapshot, 8=clone - :param sourceInstance: source volume instance - :param extraSpecs: additional info - :param targetInstance: Target volume instance. Default None - :param rsdInstance: replication settingdata instance. Default None - :returns: int -- rc - return code - :returns: job - job object of the replica creation operation - :raises: VolumeBackendAPIException - """ - startTime = time.time() - LOG.debug("Create replica: %(clone)s " - "syncType: %(syncType)s Source: %(source)s.", - {'clone': cloneName, - 'syncType': syncType, - 'source': sourceInstance.path}) - storageSystemName = sourceInstance['SystemName'] - doDisableCompression = self.utils.is_compression_disabled(extraSpecs) - sgInstanceName = ( - self.get_or_create_default_sg( - conn, extraSpecs, storageSystemName, doDisableCompression)) - try: - storageGroupInstance = conn.GetInstance(sgInstanceName) - except Exception: - exceptionMessage = (_( - "Unable to get the name of the storage group")) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - @coordination.synchronized("emc-sg-{storageGroupName}") - def do_create_element_replica(storageGroupName): - if targetInstance is None and rsdInstance is None: - rc, job = conn.InvokeMethod( - 'CreateElementReplica', repServiceInstanceName, - ElementName=cloneName, - SyncType=self.utils.get_num(syncType, '16'), - SourceElement=sourceInstance.path, - Collections=[sgInstanceName]) - else: - rc, job = self._create_element_replica_extra_params( - conn, repServiceInstanceName, cloneName, syncType, - sourceInstance, targetInstance, rsdInstance, - sgInstanceName, copyState=copyState) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Create Cloned Volume: %(cloneName)s " - "Return code: %(rc)lu. Error: %(error)s.") - % {'cloneName': cloneName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateElementReplica " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - return rc, job - return do_create_element_replica(storageGroupInstance['ElementName']) - - def create_remote_element_replica( - self, conn, repServiceInstanceName, cloneName, syncType, - sourceInstance, targetInstance, rdfGroupInstance, extraSpecs): - """Create a replication relationship between source and target. - - :param conn: the ecom connection - :param repServiceInstanceName: the replication service - :param cloneName: the name of the target volume - :param syncType: the synchronization type - :param sourceInstance: the source volume instance - :param targetInstance: the target volume instance - :param rdfGroupInstance: the rdf group instance - :param extraSpecs: additional info - :return: rc, job - """ - startTime = time.time() - LOG.debug("Setup replication relationship: %(source)s " - "syncType: %(syncType)s Source: %(target)s.", - {'source': sourceInstance.path, - 'syncType': syncType, - 'target': targetInstance.path}) - rc, job = self._create_element_replica_extra_params( - conn, repServiceInstanceName, cloneName, syncType, - sourceInstance, targetInstance, None, None, rdfGroupInstance) - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = ( - _("Error Create Cloned Volume: %(cloneName)s " - "Return code: %(rc)lu. Error: %(error)s.") - % {'cloneName': cloneName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateElementReplica " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - return rc, job - - def _create_element_replica_extra_params( - self, conn, repServiceInstanceName, cloneName, syncType, - sourceInstance, targetInstance, rsdInstance, sgInstanceName, - rdfGroupInstance=None, copyState=None): - """CreateElementReplica using extra parameters. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: replication service - :param cloneName: clone volume name - :param syncType: 7=snapshot, 8=clone - :param sourceInstance: source volume instance - :param targetInstance: Target volume instance. Default None - :param rsdInstance: replication settingdata instance. Default None - :param sgInstanceName: pool instance name - :returns: int -- rc - return code - :returns: job - job object of the replica creation operation - """ - syncType = self.utils.get_num(syncType, '16') - modeType = self.utils.get_num(RDF_SYNC_MODE, '16') - if targetInstance and rsdInstance: - rc, job = conn.InvokeMethod( - 'CreateElementReplica', repServiceInstanceName, - ElementName=cloneName, - SyncType=syncType, - SourceElement=sourceInstance.path, - TargetElement=targetInstance.path, - ReplicationSettingData=rsdInstance) - elif targetInstance and rdfGroupInstance: - rc, job = conn.InvokeMethod( - 'CreateElementReplica', repServiceInstanceName, - SyncType=syncType, - Mode=modeType, - SourceElement=sourceInstance.path, - TargetElement=targetInstance.path, - ConnectivityCollection=rdfGroupInstance) - elif rsdInstance: - rc, job = conn.InvokeMethod( - 'CreateElementReplica', repServiceInstanceName, - ElementName=cloneName, - SyncType=syncType, - SourceElement=sourceInstance.path, - ReplicationSettingData=rsdInstance, - Collections=[sgInstanceName], - WaitForCopyState=copyState) - elif targetInstance and copyState: - rc, job = conn.InvokeMethod( - 'CreateElementReplica', repServiceInstanceName, - ElementName=cloneName, - SyncType=syncType, - SourceElement=sourceInstance.path, - TargetElement=targetInstance.path, - WaitForCopyState=copyState) - elif targetInstance: - rc, job = conn.InvokeMethod( - 'CreateElementReplica', repServiceInstanceName, - ElementName=cloneName, - SyncType=syncType, - SourceElement=sourceInstance.path, - TargetElement=targetInstance.path) - return rc, job - - def break_replication_relationship( - self, conn, repServiceInstanceName, syncInstanceName, - operation, extraSpecs, force=False): - """Deletes the relationship between the clone/snap and source volume. - - Makes an SMI-S call to break clone relationship between the clone - volume and the source. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: instance name of the replication service - :param syncInstanceName: instance name of the - SE_StorageSynchronized_SV_SV object - :param operation: operation code - :param extraSpecs: additional info - :param force: force to break replication relationship if True - :returns: rc - return code - :returns: job - job object of the replica creation operation - """ - LOG.debug("Break replication relationship: %(sv)s " - "operation: %(operation)s.", - {'sv': syncInstanceName, 'operation': operation}) - - return self._modify_replica_synchronization( - conn, repServiceInstanceName, syncInstanceName, operation, - extraSpecs, force) - - def create_storage_group_v3(self, conn, controllerConfigService, - groupName, srp, slo, workload, extraSpecs, - doDisableCompression): - """Create the volume in the specified pool. - - :param conn: the connection information to the ecom server - :param controllerConfigService: the controller configuration service - :param groupName: the group name (String) - :param srp: the SRP (String) - :param slo: the SLO (String) - :param workload: the workload (String) - :param extraSpecs: additional info - :param doDisableCompression: disable compression flag - :returns: storageGroupInstanceName - storage group instance name - """ - startTime = time.time() - - @coordination.synchronized("emc-sg-{sgGroupName}") - def do_create_storage_group_v3(sgGroupName): - if doDisableCompression: - if slo and workload: - rc, job = conn.InvokeMethod( - 'CreateGroup', - controllerConfigService, - GroupName=groupName, - Type=self.utils.get_num(4, '16'), - EMCSRP=srp, - EMCSLO=slo, - EMCWorkload=workload, - EMCDisableCompression=True) - else: - if slo and workload: - rc, job = conn.InvokeMethod( - 'CreateGroup', - controllerConfigService, - GroupName=groupName, - Type=self.utils.get_num(4, '16'), - EMCSRP=srp, - EMCSLO=slo, - EMCWorkload=workload) - else: - rc, job = conn.InvokeMethod( - 'CreateGroup', - controllerConfigService, - GroupName=groupName, - Type=self.utils.get_num(4, '16')) - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete( - conn, job, extraSpecs) - if rc != 0: - LOG.error( - "Error Create Group: %(groupName)s. " - "Return code: %(rc)lu. Error: %(error)s.", - {'groupName': groupName, - 'rc': rc, - 'error': errordesc}) - raise - - LOG.debug("InvokeMethod CreateGroup " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - foundStorageGroupInstanceName = self._find_new_storage_group( - conn, job, groupName) - return foundStorageGroupInstanceName - - return do_create_storage_group_v3(groupName) - - def get_storage_pool_capability(self, conn, poolInstanceName): - """Get the pool capability. - - :param conn: the connection information to the ecom server - :param poolInstanceName: the pool instance - :returns: the storage pool capability instance. None if not found - """ - storagePoolCapability = None - - associators = ( - conn.AssociatorNames(poolInstanceName, - ResultClass='Symm_StoragePoolCapabilities')) - - if len(associators) > 0: - storagePoolCapability = associators[0] - - return storagePoolCapability - - def get_storage_pool_setting( - self, conn, storagePoolCapability, slo, workload): - """Get the pool setting for pool capability. - - :param conn: the connection information to the ecom server - :param storagePoolCapability: the storage pool capability instance - :param slo: the slo string e.g Bronze - :param workload: the workload string e.g DSS_REP - :returns: the storage pool setting instance - """ - - foundStoragePoolSetting = None - storagePoolSettings = ( - conn.AssociatorNames(storagePoolCapability, - ResultClass='CIM_storageSetting')) - - for storagePoolSetting in storagePoolSettings: - settingInstanceID = storagePoolSetting['InstanceID'] - matchString = ("%(slo)s:%(workload)s" - % {'slo': slo, - 'workload': workload}) - if matchString in settingInstanceID: - foundStoragePoolSetting = storagePoolSetting - break - if foundStoragePoolSetting is None: - exceptionMessage = (_( - "The array does not support the storage pool setting " - "for SLO %(slo)s and workload %(workload)s. Please " - "check the array for valid SLOs and workloads.") - % {'slo': slo, - 'workload': workload}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - return foundStoragePoolSetting - - def _get_supported_size_range_for_SLO( - self, conn, storageConfigService, - srpPoolInstanceName, storagePoolSettingInstanceName, extraSpecs): - """Gets available performance capacity per SLO. - - :param conn: the connection information to the ecom server - :param storageConfigService: the storage configuration service instance - :param srpPoolInstanceName: the SRP storage pool instance - :param storagePoolSettingInstanceName: the SLO type, e.g Bronze - :param extraSpecs: additional info - :returns: dict -- supportedSizeDict - the supported size dict - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - rc, supportedSizeDict = conn.InvokeMethod( - 'GetSupportedSizeRange', - srpPoolInstanceName, - ElementType=self.utils.get_num(3, '16'), - Goal=storagePoolSettingInstanceName) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete( - conn, supportedSizeDict, extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Cannot get supported size range for %(sps)s " - "Return code: %(rc)lu. Error: %(error)s.") - % {'sps': storagePoolSettingInstanceName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod GetSupportedSizeRange " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - return supportedSizeDict - - def get_volume_range( - self, conn, storageConfigService, poolInstanceName, slo, workload, - extraSpecs): - """Get upper and lower range for volume for slo/workload combination. - - :param conn: the connection information to the ecom server - :param storageConfigService: the storage config service - :param poolInstanceName: the pool instance - :param slo: slo string e.g Bronze - :param workload: workload string e.g DSS - :param extraSpecs: additional info - :returns: supportedSizeDict - """ - supportedSizeDict = {} - storagePoolCapabilityInstanceName = self.get_storage_pool_capability( - conn, poolInstanceName) - if storagePoolCapabilityInstanceName: - storagePoolSettingInstanceName = self.get_storage_pool_setting( - conn, storagePoolCapabilityInstanceName, slo, workload) - supportedSizeDict = self._get_supported_size_range_for_SLO( - conn, storageConfigService, poolInstanceName, - storagePoolSettingInstanceName, extraSpecs) - return supportedSizeDict - - def activate_snap_relationship( - self, conn, repServiceInstanceName, syncInstanceName, extraSpecs): - """Activate snap relationship and start copy operation. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: instance name of the replication service - :param syncInstanceName: instance name of the - SE_StorageSynchronized_SV_SV object - :param extraSpecs: additional info - :returns: int -- return code - :returns: job object of the replica creation operation - """ - # Operation 4: activate the snapVx. - operation = ACTIVATESNAPVX - - LOG.debug("Activate snap: %(sv)s operation: %(operation)s.", - {'sv': syncInstanceName, 'operation': operation}) - - return self._modify_replica_synchronization( - conn, repServiceInstanceName, syncInstanceName, operation, - extraSpecs) - - def return_to_resource_pool(self, conn, repServiceInstanceName, - syncInstanceName, extraSpecs): - """Return the snap target resources back to the pool. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: instance name of the replication service - :param syncInstanceName: instance name of the - :param extraSpecs: additional info - :returns: rc - return code - :returns: job object of the replica creation operation - """ - # Operation 4: activate the snapVx. - operation = DEACTIVATESNAPVX - - LOG.debug("Return snap resource back to pool: " - "%(sv)s operation: %(operation)s.", - {'sv': syncInstanceName, 'operation': operation}) - - return self._modify_replica_synchronization( - conn, repServiceInstanceName, syncInstanceName, operation, - extraSpecs) - - def _modify_replica_synchronization( - self, conn, repServiceInstanceName, syncInstanceName, - operation, extraSpecs, force=False): - """Modify the relationship between the clone/snap and source volume. - - Helper function that makes an SMI-S call to break clone relationship - between the clone volume and the source. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: instance name of the replication service - :param syncInstanceName: instance name of the - SE_StorageSynchronized_SV_SV object - :param operation: operation code - :param extraSpecs: additional info - :param force: force to modify replication synchronization if True - :returns: int -- return code - :returns: job object of the replica creation operation - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - rc, job = conn.InvokeMethod( - 'ModifyReplicaSynchronization', repServiceInstanceName, - Operation=self.utils.get_num(operation, '16'), - Synchronization=syncInstanceName, - Force=force) - - LOG.debug("_modify_replica_synchronization: %(sv)s " - "operation: %(operation)s Return code: %(rc)lu.", - {'sv': syncInstanceName, 'operation': operation, 'rc': rc}) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error modify replica synchronization: %(sv)s " - "operation: %(operation)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'sv': syncInstanceName, 'operation': operation, - 'rc': rc, 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod ModifyReplicaSynchronization " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - return rc, job - - def create_group_replica( - self, conn, replicationService, - srcGroupInstanceName, tgtGroupInstanceName, relationName, - extraSpecs): - """Make SMI-S call to create replica for source group. - - :param conn: the connection to the ecom server - :param replicationService: replication service - :param srcGroupInstanceName: source group instance name - :param tgtGroupInstanceName: target group instance name - :param relationName: replica relationship name - :param extraSpecs: additional info - :returns: int -- return code - :returns: job object of the replica creation operation - :raises: VolumeBackendAPIException - """ - LOG.debug( - "Creating CreateGroupReplica V3: " - "replicationService: %(replicationService)s " - "RelationName: %(relationName)s " - "sourceGroup: %(srcGroup)s " - "targetGroup: %(tgtGroup)s.", - {'replicationService': replicationService, - 'relationName': relationName, - 'srcGroup': srcGroupInstanceName, - 'tgtGroup': tgtGroupInstanceName}) - rc, job = conn.InvokeMethod( - 'CreateGroupReplica', - replicationService, - RelationshipName=relationName, - SourceGroup=srcGroupInstanceName, - TargetGroup=tgtGroupInstanceName, - SyncType=self.utils.get_num(SNAPSYNCTYPE, '16'), - WaitForCopyState=self.utils.get_num(4, '16')) - - if rc != 0: - rc, errordesc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMsg = (_("Error CreateGroupReplica: " - "source: %(source)s target: %(target)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'source': srcGroupInstanceName, - 'target': tgtGroupInstanceName, - 'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMsg) - raise exception.VolumeBackendAPIException(data=exceptionMsg) - return rc, job - - def get_srp_pool_stats(self, conn, arrayInfo): - """Get the totalManagedSpace, remainingManagedSpace. - - :param conn: the connection to the ecom server - :param arrayInfo: the array dict - :returns: totalCapacityGb - :returns: remainingCapacityGb - :returns: subscribedCapacityGb - :returns: array_reserve_percent - :returns: wlpEnabled - """ - totalCapacityGb = -1 - remainingCapacityGb = -1 - subscribedCapacityGb = -1 - array_reserve_percent = -1 - wlpEnabled = False - storageSystemInstanceName = self.utils.find_storageSystem( - conn, arrayInfo['SerialNumber']) - - srpPoolInstanceNames = conn.AssociatorNames( - storageSystemInstanceName, - ResultClass='Symm_SRPStoragePool') - - for srpPoolInstanceName in srpPoolInstanceNames: - poolnameStr = self.utils.get_pool_name(conn, srpPoolInstanceName) - - if six.text_type(arrayInfo['PoolName']) == ( - six.text_type(poolnameStr)): - try: - # Check that pool hasn't suddently been deleted. - srpPoolInstance = conn.GetInstance(srpPoolInstanceName) - propertiesList = srpPoolInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'TotalManagedSpace': - cimProperties = properties[1] - totalManagedSpace = cimProperties.value - totalCapacityGb = self.utils.convert_bits_to_gbs( - totalManagedSpace) - elif properties[0] == 'RemainingManagedSpace': - cimProperties = properties[1] - remainingManagedSpace = cimProperties.value - remainingCapacityGb = ( - self.utils.convert_bits_to_gbs( - remainingManagedSpace)) - elif properties[0] == 'EMCSubscribedCapacity': - cimProperties = properties[1] - subscribedManagedSpace = cimProperties.value - subscribedCapacityGb = ( - self.utils.convert_bits_to_gbs( - subscribedManagedSpace)) - elif properties[0] == 'EMCPercentReservedCapacity': - cimProperties = properties[1] - array_reserve_percent = int(cimProperties.value) - except Exception: - pass - remainingSLOCapacityGb = ( - self._get_remaining_slo_capacity_wlp( - conn, srpPoolInstanceName, arrayInfo, - storageSystemInstanceName['Name'])) - if remainingSLOCapacityGb != -1: - remainingCapacityGb = remainingSLOCapacityGb - wlpEnabled = True - else: - LOG.warning( - "Remaining capacity %(remainingCapacityGb)s " - "GBs is determined from SRP pool capacity " - "and not the SLO capacity. Performance may " - "not be what you expect.", - {'remainingCapacityGb': remainingCapacityGb}) - - return (totalCapacityGb, remainingCapacityGb, subscribedCapacityGb, - array_reserve_percent, wlpEnabled) - - def _get_remaining_slo_capacity_wlp(self, conn, srpPoolInstanceName, - arrayInfo, systemName): - """Get the remaining SLO capacity. - - This is derived from the WLP portion of Unisphere. Please - see the SMIProvider doc and the readme doc for details. - - :param conn: the connection to the ecom server - :param srpPoolInstanceName: SRP instance name - :param arrayInfo: the array dict - :param systemName: the system name - :returns: remainingCapacityGb - """ - remainingCapacityGb = -1 - if arrayInfo['SLO']: - storageConfigService = ( - self.utils.find_storage_configuration_service( - conn, systemName)) - - supportedSizeDict = ( - self.get_volume_range( - conn, storageConfigService, srpPoolInstanceName, - arrayInfo['SLO'], arrayInfo['Workload'], - None)) - try: - if supportedSizeDict['EMCInformationSource'] == INFO_SRC_V3: - remainingCapacityGb = self.utils.convert_bits_to_gbs( - supportedSizeDict['EMCRemainingSLOCapacity']) - LOG.debug("Received remaining SLO Capacity " - "%(remainingCapacityGb)s GBs for SLO " - "%(SLO)s and workload %(workload)s.", - {'remainingCapacityGb': remainingCapacityGb, - 'SLO': arrayInfo['SLO'], - 'workload': arrayInfo['Workload']}) - except KeyError: - pass - return remainingCapacityGb - - def extend_volume_in_SG( - self, conn, storageConfigService, volumeInstanceName, - volumeName, volumeSize, extraSpecs): - """Extend a volume instance. - - :param conn: connection to the ecom server - :param storageConfigService: the storage configuration service - :param volumeInstanceName: the volume instance name - :param volumeName: the volume name (String) - :param volumeSize: the volume size - :param extraSpecs: additional info - :returns: volumeDict - :returns: int -- return code - :raises: VolumeBackendAPIException - """ - startTime = time.time() - - rc, job = conn.InvokeMethod( - 'CreateOrModifyElementFromStoragePool', - storageConfigService, TheElement=volumeInstanceName, - Size=self.utils.get_num(volumeSize, '64')) - - LOG.debug("Extend Volume: %(volumename)s. Return code: %(rc)lu.", - {'volumename': volumeName, - 'rc': rc}) - - if rc != 0: - rc, error_desc = self.utils.wait_for_job_complete(conn, job, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error Extend Volume: %(volumeName)s. " - "Return code: %(rc)lu. Error: %(error)s.") - % {'volumeName': volumeName, - 'rc': rc, - 'error': error_desc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - LOG.debug("InvokeMethod CreateOrModifyElementFromStoragePool " - "took: %(delta)s H:MM:SS.", - {'delta': self.utils.get_time_delta(startTime, - time.time())}) - - # Find the newly created volume. - volumeDict = self.get_volume_dict_from_job(conn, job['Job']) - return volumeDict, rc - - def get_rdf_group_instance(self, conn, repServiceInstanceName, - RDFGroupName): - """Get the SRDF group instance. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: the replication service - :param RDFGroupName: the element name of the RDF group - :return: foundRDFGroupInstanceName - """ - foundRDFGroupInstanceName = None - - RDFGroupInstances = ( - conn.Associators(repServiceInstanceName, - ResultClass='CIM_ConnectivityCollection')) - - for RDFGroupInstance in RDFGroupInstances: - - if RDFGroupName == ( - six.text_type(RDFGroupInstance['ElementName'])): - # Check that it has not been deleted recently. - instance = self.utils.get_existing_instance( - conn, RDFGroupInstance.path) - if instance is None: - # SRDF group not found. - foundRDFGroupInstanceName = None - else: - foundRDFGroupInstanceName = ( - RDFGroupInstance.path) - break - return foundRDFGroupInstanceName - - def failover_volume(self, conn, repServiceInstanceName, - storageSynchronizationSv, - extraSpecs): - """Failover a volume to its target device. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: the replication service - :param storageSynchronizationSv: the storage synchronized object - :param extraSpecs: the extra specifications - """ - operation = RDF_FAILOVER - # check if volume already in failover state - syncState = self._check_sync_state(conn, storageSynchronizationSv) - if syncState == RDF_FAILEDOVER: - return - - else: - LOG.debug("Failover: %(sv)s operation: %(operation)s.", - {'sv': storageSynchronizationSv, 'operation': operation}) - - return self._modify_replica_synchronization( - conn, repServiceInstanceName, storageSynchronizationSv, - operation, extraSpecs) - - def failback_volume(self, conn, repServiceInstanceName, - storageSynchronizationSv, - extraSpecs): - """Failback a volume to the source device. - - :param conn: the connection to the ecom server - :param repServiceInstanceName: the replication service - :param storageSynchronizationSv: the storage synchronized object - :param extraSpecs: the extra specifications - """ - failback_operation = RDF_FAILBACK - # check if volume already in failback state - syncState = self._check_sync_state(conn, storageSynchronizationSv) - if syncState == RDF_SYNCHRONIZED: - return - - else: - LOG.debug("Failback: %(sv)s operation: %(operation)s.", - {'sv': storageSynchronizationSv, - 'operation': failback_operation}) - - return self._modify_replica_synchronization( - conn, repServiceInstanceName, storageSynchronizationSv, - failback_operation, extraSpecs) - - def _check_sync_state(self, conn, syncName): - """Get the copy state of a sync name. - - :param conn: the connection to the ecom server - :param syncName: the storage sync sv name - :return: the copy state - """ - try: - syncInstance = conn.GetInstance(syncName, - LocalOnly=False) - syncState = syncInstance['syncState'] - LOG.debug("syncState is %(syncState)lu.", - {'syncState': syncState}) - return syncState - except Exception as ex: - exceptionMessage = ( - _("Getting sync instance failed with: %(ex)s.") - % {'ex': six.text_type(ex)}) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) diff --git a/cinder/volume/drivers/dell_emc/vmax/rest.py b/cinder/volume/drivers/dell_emc/vmax/rest.py new file mode 100644 index 00000000000..451db018c1a --- /dev/null +++ b/cinder/volume/drivers/dell_emc/vmax/rest.py @@ -0,0 +1,1594 @@ +# Copyright (c) 2017 Dell Inc. or its subsidiaries. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from oslo_log import log as logging +from oslo_service import loopingcall +import requests +import requests.auth +import requests.packages.urllib3.exceptions as urllib_exp +import six + +from cinder import exception +from cinder.i18n import _ +from cinder.utils import retry +from cinder.volume.drivers.dell_emc.vmax import utils + +requests.packages.urllib3.disable_warnings(urllib_exp.InsecureRequestWarning) + +LOG = logging.getLogger(__name__) +SLOPROVISIONING = 'sloprovisioning' +REPLICATION = 'replication' +U4V_VERSION = '84' +retry_exc_tuple = (exception.VolumeBackendAPIException,) +# HTTP constants +GET = 'GET' +POST = 'POST' +PUT = 'PUT' +DELETE = 'DELETE' +STATUS_200 = 200 +STATUS_201 = 201 +STATUS_202 = 202 +STATUS_204 = 204 +# Job constants +INCOMPLETE_LIST = ['created', 'scheduled', 'running', + 'validating', 'validated'] +CREATED = 'created' +SUCCEEDED = 'succeeded' +CREATE_VOL_STRING = "Creating new Volumes" + + +class VMAXRest(object): + """Rest class based on Unisphere for VMAX Rest API.""" + + def __init__(self): + self.utils = utils.VMAXUtils() + self.session = None + self.base_uri = None + self.user = None + self.passwd = None + self.verify = None + self.cert = None + + def set_rest_credentials(self, array_info): + """Given the array record set the rest server credentials. + + :param array_info: record + """ + ip = array_info['RestServerIp'] + port = array_info['RestServerPort'] + self.user = array_info['RestUserName'] + self.passwd = array_info['RestPassword'] + self.cert = array_info['SSLCert'] + verify = array_info['SSLVerify'] + if verify and verify.lower() == 'false': + verify = False + self.verify = verify + ip_port = "%(ip)s:%(port)s" % {'ip': ip, 'port': port} + self.base_uri = ("https://%(ip_port)s/univmax/restapi" + % {'ip_port': ip_port}) + self.session = self._establish_rest_session() + + def _establish_rest_session(self): + """Establish the rest session. + + :returns: requests.session() -- session, the rest session + """ + session = requests.session() + session.headers = {'content-type': 'application/json', + 'accept': 'application/json', + 'Application-Type': 'openstack'} + session.auth = requests.auth.HTTPBasicAuth(self.user, self.passwd) + if self.verify is not None: + session.verify = self.verify + if self.cert: + session.cert = self.cert + + return session + + def request(self, target_uri, method, params=None, request_object=None): + """Sends a request (GET, POST, PUT, DELETE) to the target api. + + :param target_uri: target uri (string) + :param method: The method (GET, POST, PUT, or DELETE) + :param params: Additional URL parameters + :param request_object: request payload (dict) + :return: server response object (dict) + :raises: VolumeBackendAPIException + """ + message, status_code = None, None + if not self.session: + self.session = self._establish_rest_session() + url = ("%(self.base_uri)s%(target_uri)s" % + {'self.base_uri': self.base_uri, + 'target_uri': target_uri}) + try: + if request_object: + response = self.session.request( + method=method, url=url, + data=json.dumps(request_object, sort_keys=True, + indent=4)) + elif params: + response = self.session.request(method=method, url=url, + params=params) + else: + response = self.session.request(method=method, url=url) + status_code = response.status_code + try: + message = response.json() + except ValueError: + LOG.debug("No response received from API. Status code " + "received is: %(status_code)s", + {'status_code': status_code}) + message = None + LOG.debug("%(method)s request to %(url)s has returned with " + "a status code of: %(status_code)s.", + {'method': method, 'url': url, + 'status_code': status_code}) + + except requests.Timeout: + LOG.error("The %(method)s request to URL %(url)s timed-out, " + "but may have been successful. Please check the array.", + {'method': method, 'url': url}) + except Exception as e: + exception_message = (_("The %(method)s request to URL %(url)s " + "failed with exception %(e)s") + % {'method': method, 'url': url, + 'e': six.text_type(e)}) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + + return status_code, message + + def wait_for_job_complete(self, job, extra_specs): + """Given the job wait for it to complete. + + :param job: the job dict + :param extra_specs: the extra_specs dict. + :return rc -- int, result -- string, status -- string, + task -- list of dicts detailing tasks in the job + :raises: VolumeBackendAPIException + """ + res, tasks = None, None + if job['status'].lower == CREATED: + try: + res, tasks = job['result'], job['task'] + except KeyError: + pass + return 0, res, job['status'], tasks + + def _wait_for_job_complete(): + result = None + # Called at an interval until the job is finished. + retries = kwargs['retries'] + try: + kwargs['retries'] = retries + 1 + if not kwargs['wait_for_job_called']: + is_complete, result, rc, status, task = ( + self._is_job_finished(job_id)) + if is_complete is True: + kwargs['wait_for_job_called'] = True + kwargs['rc'], kwargs['status'] = rc, status + kwargs['result'], kwargs['task'] = result, task + except Exception: + exception_message = (_("Issue encountered waiting for job.")) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + if retries > int(extra_specs[utils.RETRIES]): + LOG.error("_wait_for_job_complete failed after " + "%(retries)d tries.", {'retries': retries}) + kwargs['rc'], kwargs['result'] = -1, result + + raise loopingcall.LoopingCallDone() + if kwargs['wait_for_job_called']: + raise loopingcall.LoopingCallDone() + + job_id = job['jobId'] + kwargs = {'retries': 0, 'wait_for_job_called': False, + 'rc': 0, 'result': None} + + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete) + timer.start(interval=int(extra_specs[utils.INTERVAL])).wait() + LOG.debug("Return code is: %(rc)lu. Result is %(res)s.", + {'rc': kwargs['rc'], 'res': kwargs['result']}) + return (kwargs['rc'], kwargs['result'], + kwargs['status'], kwargs['task']) + + def _is_job_finished(self, job_id): + """Check if the job is finished. + + :param job_id: the id of the job + :returns: complete -- bool, result -- string, + rc -- int, status -- string, task -- list of dicts + """ + complete, rc, status, result, task = False, 0, None, None, None + job_url = "/%s/system/job/%s" % (U4V_VERSION, job_id) + job = self._get_request(job_url, 'job') + if job: + status = job['status'] + try: + result, task = job['result'], job['task'] + except KeyError: + pass + if status.lower() == SUCCEEDED: + complete = True + elif status.lower() in INCOMPLETE_LIST: + complete = False + else: + rc, complete = -1, True + return complete, result, rc, status, task + + @staticmethod + def check_status_code_success(operation, status_code, message): + """Check if a status code indicates success. + + :param operation: the operation + :param status_code: the status code + :param message: the server response + :raises: VolumeBackendAPIException + """ + if status_code not in [STATUS_200, STATUS_201, + STATUS_202, STATUS_204]: + exception_message = ( + _('Error %(operation)s. The status code received ' + 'is %(sc)s and the message is %(message)s.') + % {'operation': operation, + 'sc': status_code, 'message': message}) + raise exception.VolumeBackendAPIException( + data=exception_message) + + def wait_for_job(self, operation, status_code, job, extra_specs): + """Check if call is async, wait for it to complete. + + :param operation: the operation being performed + :param status_code: the status code + :param job: the job + :param extra_specs: the extra specifications + :return: task -- list of dicts detailing tasks in the job + :raises: VolumeBackendAPIException + """ + task = None + if status_code == STATUS_202: + rc, result, status, task = self.wait_for_job_complete( + job, extra_specs) + if rc != 0: + exception_message = (_( + "Error %(operation)s. Status code: %(sc)lu. " + "Error: %(error)s. Status: %(status)s.") + % {'operation': operation, 'sc': rc, + 'error': six.text_type(result), + 'status': status}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + return task + + @staticmethod + def _build_uri(array, category, resource_type, + resource_name=None, private=''): + """Build the target url. + + :param array: the array serial number + :param category: the resource category e.g. sloprovisioning + :param resource_type: the resource type e.g. maskingview + :param resource_name: the name of a specific resource + :param private: empty string or '/private' if private url + :return: target url, string + """ + target_uri = ('%(private)s/%(version)s/%(category)s/symmetrix/' + '%(array)s/%(resource_type)s' + % {'private': private, 'version': U4V_VERSION, + 'category': category, 'array': array, + 'resource_type': resource_type}) + if resource_name: + target_uri += '/%(resource_name)s' % { + 'resource_name': resource_name} + return target_uri + + def _get_request(self, target_uri, resource_type, params=None): + """Send a GET request to the array. + + :param target_uri: the target uri + :param resource_type: the resource type, e.g. maskingview + :param params: optional dict of filter params + :return: resource_object -- dict or None + """ + resource_object = None + sc, message = self.request(target_uri, GET, params=params) + operation = 'get %(res)s' % {'res': resource_type} + try: + self.check_status_code_success(operation, sc, message) + except Exception as e: + LOG.debug("Get resource failed with %(e)s", + {'e': e}) + if sc == STATUS_200: + resource_object = message + return resource_object + + def get_resource(self, array, category, resource_type, + resource_name=None, params=None, private=''): + """Get resource details from array. + + :param array: the array serial number + :param category: the resource category e.g. sloprovisioning + :param resource_type: the resource type e.g. maskingview + :param resource_name: the name of a specific resource + :param params: query parameters + :param private: empty string or '/private' if private url + :return: resource object -- dict or None + """ + target_uri = self._build_uri(array, category, resource_type, + resource_name, private) + return self._get_request(target_uri, resource_type, params) + + def create_resource(self, array, category, resource_type, payload, + private=''): + """Create a provisioning resource. + + :param array: the array serial number + :param category: the category + :param resource_type: the resource type + :param payload: the payload + :param private: empty string or '/private' if private url + :return: status_code -- int, message -- string, server response + """ + target_uri = self._build_uri(array, category, resource_type, + None, private) + status_code, message = self.request(target_uri, POST, + request_object=payload) + operation = 'Create %(res)s resource' % {'res': resource_type} + self.check_status_code_success( + operation, status_code, message) + return status_code, message + + def modify_resource(self, array, category, resource_type, payload, + resource_name=None, private=''): + """Modify a resource. + + :param array: the array serial number + :param category: the category + :param resource_type: the resource type + :param payload: the payload + :param resource_name: the resource name + :param private: empty string or '/private' if private url + :return: status_code -- int, message -- string (server response) + """ + target_uri = self._build_uri(array, category, resource_type, + resource_name, private) + status_code, message = self.request(target_uri, PUT, + request_object=payload) + operation = 'modify %(res)s resource' % {'res': resource_type} + self.check_status_code_success(operation, status_code, message) + return status_code, message + + def delete_resource( + self, array, category, resource_type, resource_name, + payload=None, private=''): + """Delete a provisioning resource. + + :param array: the array serial number + :param category: the resource category e.g. sloprovisioning + :param resource_type: the type of resource to be deleted + :param resource_name: the name of the resource to be deleted + :param payload: the payload, optional + :param private: empty string or '/private' if private url + """ + target_uri = self._build_uri(array, category, resource_type, + resource_name, private) + status_code, message = self.request(target_uri, DELETE, + request_object=payload) + operation = 'delete %(res)s resource' % {'res': resource_type} + self.check_status_code_success(operation, status_code, message) + + def get_array_serial(self, array): + """Get an array from its serial number. + + :param array: the array serial number + :return: array_details -- dict or None + """ + target_uri = '/%s/system/symmetrix/%s' % (U4V_VERSION, array) + array_details = self._get_request(target_uri, 'system') + if not array_details: + LOG.error("Cannot connect to array %(array)s.", + {'array': array}) + return array_details + + def get_srp_by_name(self, array, srp=None): + """Returns the details of a storage pool. + + :param array: the array serial number + :param srp: the storage resource pool name + :returns: SRP_details -- dict or None + """ + LOG.debug("storagePoolName: %(srp)s, array: %(array)s.", + {'srp': srp, 'array': array}) + srp_details = self.get_resource(array, SLOPROVISIONING, 'srp', + resource_name=srp, params=None) + return srp_details + + def get_slo_list(self, array): + """Returns the list of service levels associated with an srp. + + :param array: the array serial number + :return slo_list -- list of service level names + """ + slo_list = [] + slo_dict = self.get_resource(array, SLOPROVISIONING, 'slo') + if slo_dict: + slo_list = slo_dict['sloId'] + return slo_list + + def get_workload_settings(self, array): + """Get valid workload options from array. + + :param array: the array serial number + :return: workload_setting -- list of workload names + """ + workload_setting = [] + wl_details = self.get_resource(array, SLOPROVISIONING, 'workloadtype') + if wl_details: + workload_setting = wl_details['workloadId'] + return workload_setting + + def get_headroom_capacity(self, array, srp, slo, workload): + """Get capacity of the different slo/ workload combinations. + + :param array: the array serial number + :param srp: the storage resource srp + :param slo: the service level + :param workload: the workload + :return remaining_capacity -- string, or None + """ + params = {'srp': srp, 'slo': slo, 'workloadtype': workload} + try: + headroom = self.get_resource(array, 'wlp', + 'headroom', params=params) + remaining_capacity = headroom['headroom'][0]['headroomCapacity'] + except (KeyError, TypeError): + remaining_capacity = None + return remaining_capacity + + def get_storage_group(self, array, storage_group_name): + """Given a name, return storage group details. + + :param array: the array serial number + :param storage_group_name: the name of the storage group + :return: storage group dict or None + """ + return self.get_resource( + array, SLOPROVISIONING, 'storagegroup', + resource_name=storage_group_name) + + def get_storage_group_list(self, array, params=None): + """"Return a list of storage groups. + + :param array: the array serial number + :param params: optional filter parameters + :return: storage group list + """ + sg_list = [] + sg_details = self.get_resource(array, SLOPROVISIONING, + 'storagegroup', params=params) + if sg_details: + sg_list = sg_details['storageGroupId'] + return sg_list + + def get_num_vols_in_sg(self, array, storage_group_name): + """Get the number of volumes in a storage group. + + :param array: the array serial number + :param storage_group_name: the storage group name + :return: num_vols -- int + """ + num_vols = 0 + storagegroup = self.get_storage_group(array, storage_group_name) + try: + num_vols = int(storagegroup['num_of_vols']) + except (KeyError, TypeError): + pass + return num_vols + + def is_child_sg_in_parent_sg(self, array, child_name, parent_name): + """Check if a child storage group is a member of a parent group. + + :param array: the array serial number + :param child_name: the child sg name + :param parent_name: the parent sg name + :return: bool + """ + parent_sg = self.get_storage_group(array, parent_name) + if parent_sg and parent_sg.get('child_storage_group'): + child_sg_list = parent_sg['child_storage_group'] + if child_name in child_sg_list: + return True + return False + + def add_child_sg_to_parent_sg( + self, array, child_sg, parent_sg, extra_specs): + """Add a storage group to a parent storage group. + + This method adds an existing storage group to another storage + group, i.e. cascaded storage groups. + :param array: the array serial number + :param child_sg: the name of the child sg + :param parent_sg: the name of the parent sg + :param extra_specs: the extra specifications + """ + payload = {"editStorageGroupActionParam": { + "expandStorageGroupParam": { + "addExistingStorageGroupParam": { + "storageGroupId": [child_sg]}}}} + sc, job = self.modify_storage_group(array, parent_sg, payload) + self.wait_for_job('Add child sg to parent sg', sc, job, extra_specs) + + def remove_child_sg_from_parent_sg( + self, array, child_sg, parent_sg, extra_specs): + """Remove a storage group from its parent storage group. + + This method removes a child storage group from its parent group. + :param array: the array serial number + :param child_sg: the name of the child sg + :param parent_sg: the name of the parent sg + :param extra_specs: the extra specifications + """ + payload = {"editStorageGroupActionParam": { + "removeStorageGroupParam": { + "storageGroupId": [child_sg], "force": 'true'}}} + status_code, job = self.modify_storage_group( + array, parent_sg, payload) + self.wait_for_job( + 'Remove child sg from parent sg', status_code, job, extra_specs) + + def _create_storagegroup(self, array, payload): + """Create a storage group. + + :param array: the array serial number + :param payload: the payload -- dict + :return: status_code -- int, message -- string, server response + """ + return self.create_resource( + array, SLOPROVISIONING, 'storagegroup', payload) + + def create_storage_group(self, array, storagegroup_name, + srp, slo, workload, extra_specs): + """Create the volume in the specified storage group. + + :param array: the array serial number + :param storagegroup_name: the group name (String) + :param srp: the SRP (String) + :param slo: the SLO (String) + :param workload: the workload (String) + :param extra_specs: additional info + :returns: storagegroup_name - string + """ + srp_id = srp if slo else "None" + payload = ({"srpId": srp_id, + "storageGroupId": storagegroup_name, + "emulation": "FBA", + "create_empty_storage_group": "true"}) + + if slo: + slo_param = {"num_of_vols": 0, + "sloId": slo, + "workloadSelection": workload, + "volumeAttribute": { + "volume_size": "0", + "capacityUnit": "GB"}} + payload.update({"sloBasedStorageGroupParam": [slo_param]}) + + status_code, job = self._create_storagegroup(array, payload) + self.wait_for_job('Create storage group', status_code, + job, extra_specs) + return storagegroup_name + + def modify_storage_group(self, array, storagegroup, payload): + """Modify a storage group (PUT operation). + + :param array: the array serial number + :param storagegroup: storage group name + :param payload: the request payload + :return: status_code -- int, message -- string, server response + """ + return self.modify_resource( + array, SLOPROVISIONING, 'storagegroup', payload, + resource_name=storagegroup) + + def create_volume_from_sg(self, array, volume_name, storagegroup_name, + volume_size, extra_specs): + """Create a new volume in the given storage group. + + :param array: the array serial number + :param volume_name: the volume name (String) + :param storagegroup_name: the storage group name + :param volume_size: volume size (String) + :param extra_specs: the extra specifications + :returns: dict -- volume_dict - the volume dict + :raises: VolumeBackendAPIException + """ + payload = ( + {"executionOption": "ASYNCHRONOUS", + "editStorageGroupActionParam": { + "expandStorageGroupParam": { + "addVolumeParam": { + "num_of_vols": 1, + "emulation": "FBA", + "volumeIdentifier": { + "identifier_name": volume_name, + "volumeIdentifierChoice": "identifier_name"}, + "volumeAttribute": { + "volume_size": volume_size, + "capacityUnit": "GB"}}}}}) + status_code, job = self.modify_storage_group( + array, storagegroup_name, payload) + + LOG.debug("Create Volume: %(volumename)s. Status code: %(sc)lu.", + {'volumename': volume_name, + 'sc': status_code}) + + task = self.wait_for_job('Create volume', status_code, + job, extra_specs) + + # Find the newly created volume. + device_id = None + if task: + for t in task: + try: + desc = t["description"] + if CREATE_VOL_STRING in desc: + t_list = desc.split() + device_id = t_list[(len(t_list) - 1)] + device_id = device_id[1:-1] + break + if device_id: + self.get_volume(array, device_id) + except Exception as e: + LOG.info("Could not retrieve device id from job. " + "Exception received was %(e)s. Attempting " + "retrieval by volume_identifier.", + {'e': e}) + + if not device_id: + device_id = self.find_volume_device_id(array, volume_name) + + volume_dict = {'array': array, 'device_id': device_id} + return volume_dict + + def add_vol_to_sg(self, array, storagegroup_name, device_id, extra_specs): + """Add a volume to a storage group. + + :param array: the array serial number + :param storagegroup_name: storage group name + :param device_id: the device id + :param extra_specs: extra specifications + """ + if not isinstance(device_id, list): + device_id = [device_id] + payload = ({"executionOption": "ASYNCHRONOUS", + "editStorageGroupActionParam": { + "expandStorageGroupParam": { + "addSpecificVolumeParam": { + "volumeId": device_id}}}}) + status_code, job = self.modify_storage_group( + array, storagegroup_name, payload) + + self.wait_for_job('Add volume to sg', status_code, job, extra_specs) + + @retry(retry_exc_tuple, interval=2, retries=3) + def remove_vol_from_sg(self, array, storagegroup_name, + device_id, extra_specs): + """Remove a volume from a storage group. + + :param array: the array serial number + :param storagegroup_name: storage group name + :param device_id: the device id + :param extra_specs: the extra specifications + """ + if not isinstance(device_id, list): + device_id = [device_id] + payload = ({"executionOption": "ASYNCHRONOUS", + "editStorageGroupActionParam": { + "removeVolumeParam": { + "volumeId": device_id}}}) + status_code, job = self.modify_storage_group( + array, storagegroup_name, payload) + + self.wait_for_job('Remove vol from sg', status_code, job, extra_specs) + + def get_vmax_default_storage_group(self, array, srp, slo, workload): + """Get the default storage group. + + :param array: the array serial number + :param srp: the pool name + :param slo: the SLO + :param workload: the workload + :returns: the storage group dict (or None), the storage group name + """ + storagegroup_name = self.utils.get_default_storage_group_name( + srp, slo, workload) + storagegroup = self.get_storage_group(array, storagegroup_name) + return storagegroup, storagegroup_name + + def delete_storage_group(self, array, storagegroup_name): + """Delete a storage group. + + :param array: the array serial number + :param storagegroup_name: storage group name + """ + self.delete_resource( + array, SLOPROVISIONING, 'storagegroup', storagegroup_name) + LOG.debug("Storage Group successfully deleted.") + + def get_volume(self, array, device_id): + """Get a VMAX volume from array. + + :param array: the array serial number + :param device_id: the volume device id + :return: volume dict + :raises: VolumeBackendAPIException + """ + volume_dict = self.get_resource( + array, SLOPROVISIONING, 'volume', resource_name=device_id) + if not volume_dict: + exception_message = (_("Volume %(deviceID)s not found.") + % {'deviceID': device_id}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + return volume_dict + + def _get_private_volume(self, array, device_id): + """Get a more detailed list of attributes of a volume. + + :param array: the array serial number + :param device_id: the volume device id + :return: volume dict + :raises: VolumeBackendAPIException + """ + try: + wwn = (self.get_volume(array, device_id))['wwn'] + params = {'wwn': wwn} + volume_info = self.get_resource( + array, SLOPROVISIONING, 'volume', params=params, + private='/private') + volume_dict = volume_info['resultList']['result'][0] + except KeyError: + exception_message = (_("Volume %(deviceID)s not found.") + % {'deviceID': device_id}) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + return volume_dict + + def get_volume_list(self, array, params): + """Get a filtered list of VMAX volumes from array. + + Filter parameters are required as the unfiltered volume list could be + very large and could affect performance if called often. + :param array: the array serial number + :param params: filter parameters + :return: device_ids -- list + """ + device_ids = [] + volumes = self.get_resource( + array, SLOPROVISIONING, 'volume', params=params) + try: + volume_dict_list = volumes['resultList']['result'] + for vol_dict in volume_dict_list: + device_id = vol_dict['volumeId'] + device_ids.append(device_id) + except (KeyError, TypeError): + pass + return device_ids + + def _modify_volume(self, array, device_id, payload): + """Modify a volume (PUT operation). + + :param array: the array serial number + :param device_id: volume device id + :param payload: the request payload + """ + return self.modify_resource(array, SLOPROVISIONING, 'volume', + payload, resource_name=device_id) + + def extend_volume(self, array, device_id, new_size, extra_specs): + """Extend a VMAX volume. + + :param array: the array serial number + :param device_id: volume device id + :param new_size: the new required size for the device + :param extra_specs: the extra specifications + """ + extend_vol_payload = {"executionOption": "ASYNCHRONOUS", + "editVolumeActionParam": { + "expandVolumeParam": { + "volumeAttribute": { + "volume_size": new_size, + "capacityUnit": "GB"}}}} + + status_code, job = self._modify_volume( + array, device_id, extend_vol_payload) + LOG.debug("Extend Device: %(device_id)s. Status code: %(sc)lu.", + {'device_id': device_id, 'sc': status_code}) + self.wait_for_job('Extending volume', status_code, job, extra_specs) + + def rename_volume(self, array, device_id, new_name): + """Rename a volume. + + :param array: the array serial number + :param device_id: the volume device id + :param new_name: the new name for the volume + """ + rename_vol_payload = {"editVolumeActionParam": { + "modifyVolumeIdentifierParam": { + "volumeIdentifier": { + "identifier_name": new_name, + "volumeIdentifierChoice": "identifier_name"}}}} + self._modify_volume(array, device_id, rename_vol_payload) + + def delete_volume(self, array, device_id): + """Deallocate and delete a volume. + + :param array: the array serial number + :param device_id: volume device id + """ + # Deallocate volume + payload = {"editVolumeActionParam": { + "freeVolumeParam": {"free_volume": 'true'}}} + try: + self._modify_volume(array, device_id, payload) + except Exception as e: + LOG.warning('Deallocate volume failed with %(e)s.' + 'Attempting delete.', {'e': e}) + # Delete volume + self.delete_resource(array, SLOPROVISIONING, "volume", device_id) + + def find_mv_connections_for_vol(self, array, maskingview, device_id): + """Find the host_lun_id for a volume in a masking view. + + :param array: the array serial number + :param maskingview: the masking view name + :param device_id: the device ID + :return: host_lun_id -- int + """ + host_lun_id = None + resource_name = ('%(maskingview)s/connections' + % {'maskingview': maskingview}) + params = {'volume_id': device_id} + connection_info = self.get_resource( + array, SLOPROVISIONING, 'maskingview', + resource_name=resource_name, params=params) + if not connection_info: + LOG.error('Cannot retrive masking view connection information ' + 'for %(mv)s.', {'mv': maskingview}) + else: + try: + host_lun_id = (connection_info['maskingViewConnection'] + [0]['host_lun_address']) + host_lun_id = int(host_lun_id, 16) + except Exception as e: + LOG.error("Unable to retrieve connection information " + "for volume %(vol)s in masking view %(mv)s" + "Exception received: %(e)s.", + {'vol': device_id, 'mv': maskingview, + 'e': e}) + return host_lun_id + + def get_storage_groups_from_volume(self, array, device_id): + """Returns all the storage groups for a particular volume. + + :param array: the array serial number + :param device_id: the volume device id + :return: storagegroup_list + """ + sg_list = [] + vol = self.get_volume(array, device_id) + if vol and vol.get('storageGroupId'): + sg_list = vol['storageGroupId'] + num_storage_groups = len(sg_list) + LOG.debug("There are %(num)d storage groups associated " + "with volume %(deviceId)s.", + {'num': num_storage_groups, 'deviceId': device_id}) + return sg_list + + def is_volume_in_storagegroup(self, array, device_id, storagegroup): + """See if a volume is a member of the given storage group. + + :param array: the array serial number + :param device_id: the device id + :param storagegroup: the storage group name + :return: bool + """ + is_vol_in_sg = False + sg_list = self.get_storage_groups_from_volume(array, device_id) + if storagegroup in sg_list: + is_vol_in_sg = True + return is_vol_in_sg + + def find_volume_device_id(self, array, volume_name): + """Given a volume identifier, find the corresponding device_id. + + :param array: the array serial number + :param volume_name: the volume name (OS-) + :return: device_id + """ + device_id = None + params = {"volume_identifier": volume_name} + + volume_list = self.get_volume_list(array, params) + if not volume_list: + LOG.debug("Cannot find record for volume %(volumeId)s.", + {'volumeId': volume_name}) + else: + device_id = volume_list[0] + return device_id + + def find_volume_identifier(self, array, device_id): + """Get the volume identifier of a VMAX volume. + + :param array: array serial number + :param device_id: the device id + :return: the volume identifier -- string + """ + vol = self.get_volume(array, device_id) + return vol['volume_identifier'] + + def get_size_of_device_on_array(self, array, device_id): + """Get the size of the volume from the array. + + :param array: the array serial number + :param device_id: the volume device id + :return: size -- or None + """ + cap = None + try: + vol = self.get_volume(array, device_id) + cap = vol['cap_gb'] + except Exception as e: + LOG.error("Error retrieving size of volume %(vol)s. " + "Exception received was %(e)s.", + {'vol': device_id, 'e': e}) + return cap + + def get_portgroup(self, array, portgroup): + """Get a portgroup from the array. + + :param array: array serial number + :param portgroup: the portgroup name + :return: portgroup dict or None + """ + return self.get_resource( + array, SLOPROVISIONING, 'portgroup', resource_name=portgroup) + + def get_port_ids(self, array, portgroup): + """Get a list of port identifiers from a port group. + + :param array: the array serial number + :param portgroup: the name of the portgroup + :return: list of port ids, e.g. ['FA-3D:35', 'FA-4D:32'] + """ + portlist = [] + portgroup_info = self.get_portgroup(array, portgroup) + if portgroup_info: + port_key = portgroup_info["symmetrixPortKey"] + for key in port_key: + port = key['portId'] + portlist.append(port) + return portlist + + def get_port(self, array, port_id): + """Get director port details. + + :param array: the array serial number + :param port_id: the port id + :return: port dict, or None + """ + dir_id = port_id.split(':')[0] + port_no = port_id.split(':')[1] + + resource_name = ('%(directorId)s/port/%(port_number)s' + % {'directorId': dir_id, 'port_number': port_no}) + return self.get_resource(array, SLOPROVISIONING, 'director', + resource_name=resource_name) + + def get_iscsi_ip_address_and_iqn(self, array, port_id): + """Get the IPv4Address from the director port. + + :param array: the array serial number + :param port_id: the director port identifier + :return: (list of ip_addresses, iqn) + """ + ip_addresses, iqn = None, None + port_details = self.get_port(array, port_id) + if port_details: + ip_addresses = port_details['symmetrixPort']['ip_addresses'] + iqn = port_details['symmetrixPort']['identifier'] + return ip_addresses, iqn + + def get_target_wwns(self, array, portgroup): + """Get the director ports' wwns. + + :param array: the array serial number + :param portgroup: portgroup + :returns: target_wwns -- the list of target wwns for the masking view + """ + target_wwns = [] + port_ids = self.get_port_ids(array, portgroup) + for port in port_ids: + port_info = self.get_port(array, port) + if port_info: + wwn = port_info['symmetrixPort']['identifier'] + target_wwns.append(wwn) + else: + LOG.error("Error retrieving port %(port)s " + "from portgroup %(portgroup)s.", + {'port': port, 'portgroup': portgroup}) + return target_wwns + + def get_initiator_group(self, array, initiator_group=None, params=None): + """Retrieve initiator group details from the array. + + :param array: the array serial number + :param initiator_group: the initaitor group name + :param params: optional filter parameters + :return: initiator group dict, or None + """ + return self.get_resource( + array, SLOPROVISIONING, 'host', + resource_name=initiator_group, params=params) + + def get_initiator(self, array, initiator_id): + """Retrieve initaitor details from the array. + + :param array: the array serial number + :param initiator_id: the initiator id + :return: initiator dict, or None + """ + return self.get_resource( + array, SLOPROVISIONING, 'initiator', + resource_name=initiator_id) + + def get_initiator_list(self, array, params=None): + """Retrieve initaitor list from the array. + + :param array: the array serial number + :param params: dict of optional params + :return: list of initiators + """ + init_dict = self.get_resource( + array, SLOPROVISIONING, 'initiator', params=params) + try: + init_list = init_dict['initiatorId'] + except KeyError: + init_list = [] + return init_list + + def get_in_use_initiator_list_from_array(self, array): + """Get the list of initiators which are in-use from the array. + + Gets the list of initiators from the array which are in + hosts/ initiator groups. + :param array: the array serial number + :return: init_list + """ + params = {'in_a_host': 'true'} + return self.get_initiator_list(array, params) + + def get_initiator_group_from_initiator(self, array, initiator): + """Given an initiator, get its corresponding initiator group, if any. + + :param array: the array serial number + :param initiator: the initiator id + :return: found_init_group_name -- string + """ + found_init_group_name = None + init_details = self.get_initiator(array, initiator) + if init_details: + found_init_group_name = init_details.get('host') + else: + LOG.error("Unable to retrieve initiator details for " + "%(init)s.", {'init': initiator}) + return found_init_group_name + + def create_initiator_group(self, array, init_group_name, + init_list, extra_specs): + """Create a new initiator group containing the given initiators. + + :param array: the array serial number + :param init_group_name: the initiator group name + :param init_list: the list of initiators + :param extra_specs: extra specifications + """ + new_ig_data = ({"executionOption": "ASYNCHRONOUS", + "hostId": init_group_name, "initiatorId": init_list}) + sc, job = self.create_resource(array, SLOPROVISIONING, + 'host', new_ig_data) + self.wait_for_job('create initiator group', sc, job, extra_specs) + + def delete_initiator_group(self, array, initiatorgroup_name): + """Delete an initiator group. + + :param array: the array serial number + :param initiatorgroup_name: initiator group name + """ + self.delete_resource( + array, SLOPROVISIONING, 'host', initiatorgroup_name) + LOG.debug("Initiator Group successfully deleted.") + + def get_masking_view(self, array, masking_view_name): + """Get details of a masking view. + + :param array: array serial number + :param masking_view_name: the masking view name + :return: masking view dict + """ + return self.get_resource( + array, SLOPROVISIONING, 'maskingview', masking_view_name) + + def get_masking_view_list(self, array, params): + """Get a list of masking views from the array. + + :param array: array serial number + :param params: optional GET parameters + :return: masking view list + """ + masking_view_list = [] + masking_view_details = self.get_resource( + array, SLOPROVISIONING, 'maskingview', params=params) + try: + masking_view_list = masking_view_details['maskingViewId'] + except (KeyError, TypeError): + pass + return masking_view_list + + def get_masking_views_from_storage_group(self, array, storagegroup): + """Return any masking views associated with a storage group. + + :param array: the array serial number + :param storagegroup: the storage group name + :return: masking view list + """ + maskingviewlist = [] + storagegroup = self.get_storage_group(array, storagegroup) + if storagegroup and storagegroup.get('maskingview'): + maskingviewlist = storagegroup['maskingview'] + return maskingviewlist + + def get_masking_views_by_initiator_group( + self, array, initiatorgroup_name): + """Given initiator group, retrieve the masking view instance name. + + Retrieve the list of masking view instances associated with the + given initiator group. + :param array: the array serial number + :param initiatorgroup_name: the name of the initiator group + :returns: list of masking view names + """ + masking_view_list = [] + ig_details = self.get_initiator_group( + array, initiatorgroup_name) + if ig_details: + if ig_details.get('maskingview'): + masking_view_list = ig_details['maskingview'] + else: + LOG.error("Error retrieving initiator group %(ig_name)s", + {'ig_name': initiatorgroup_name}) + return masking_view_list + + def get_element_from_masking_view( + self, array, maskingview_name, portgroup=False, host=False, + storagegroup=False): + """Return the name of the specified element from a masking view. + + :param array: the array serial number + :param maskingview_name: the masking view name + :param portgroup: the port group name - optional + :param host: the host name - optional + :param storagegroup: the storage group name - optional + :return: name of the specified element -- string + :raises: VolumeBackendAPIException + """ + element = None + masking_view_details = self.get_masking_view(array, maskingview_name) + if masking_view_details: + if portgroup: + element = masking_view_details['portGroupId'] + elif host: + element = masking_view_details['hostId'] + elif storagegroup: + element = masking_view_details['storageGroupId'] + else: + exception_message = (_("Error retrieving masking group.")) + LOG.error(exception_message) + raise exception.VolumeBackendAPIException(data=exception_message) + return element + + def get_common_masking_views(self, array, portgroup_name, ig_name): + """Get common masking views for a given portgroup and initiator group. + + :param array: the array serial number + :param portgroup_name: the port group name + :param ig_name: the initiator group name + :return: masking view list + """ + params = {'port_group_name': portgroup_name, + 'host_or_host_group_name': ig_name} + masking_view_list = self.get_masking_view_list(array, params) + if not masking_view_list: + LOG.info("No common masking views found for %(pg_name)s " + "and %(ig_name)s.", + {'pg_name': portgroup_name, 'ig_name': ig_name}) + return masking_view_list + + def create_masking_view(self, array, maskingview_name, storagegroup_name, + port_group_name, init_group_name, extra_specs): + """Create a new masking view. + + :param array: the array serial number + :param maskingview_name: the masking view name + :param storagegroup_name: the storage group name + :param port_group_name: the port group + :param init_group_name: the initiator group + :param extra_specs: extra specifications + """ + payload = ({"executionOption": "ASYNCHRONOUS", + "portGroupSelection": { + "useExistingPortGroupParam": { + "portGroupId": port_group_name}}, + "maskingViewId": maskingview_name, + "hostOrHostGroupSelection": { + "useExistingHostParam": { + "hostId": init_group_name}}, + "storageGroupSelection": { + "useExistingStorageGroupParam": { + "storageGroupId": storagegroup_name}}}) + + status_code, job = self.create_resource( + array, SLOPROVISIONING, 'maskingview', payload) + + self.wait_for_job('Create masking view', status_code, job, extra_specs) + + def delete_masking_view(self, array, maskingview_name): + """Delete a masking view. + + :param array: the array serial number + :param maskingview_name: the masking view name + """ + return self.delete_resource( + array, SLOPROVISIONING, 'maskingview', maskingview_name) + + def get_replication_capabilities(self, array): + """Check what replication features are licensed and enabled. + + Example return value for this method: + {"symmetrixId": "000197800128", + "snapVxCapable": true, + "rdfCapable": true} + :param: array + :returns: capabilities dict for the given array + """ + array_capabilities = None + target_uri = ("/%s/replication/capabilities/symmetrix" + % U4V_VERSION) + capabilities = self._get_request( + target_uri, 'replication capabilities') + if capabilities: + symm_list = capabilities['symmetrixCapability'] + for symm in symm_list: + if symm['symmetrixId'] == array: + array_capabilities = symm + break + return array_capabilities + + def is_snapvx_licensed(self, array): + """Check if the snapVx feature is licensed and enabled. + + :param array: the array serial number + :returns: True if licensed and enabled; False otherwise. + """ + snap_capability = False + capabilities = self.get_replication_capabilities(array) + if capabilities: + snap_capability = capabilities['snapVxCapable'] + else: + LOG.error("Cannot access replication capabilities " + "for array %(array)s", {'array': array}) + return snap_capability + + def create_volume_snap(self, array, snap_name, device_id, extra_specs): + """Create a snapVx snapshot of a volume. + + :param array: the array serial number + :param snap_name: the name of the snapshot + :param device_id: the source device id + :param extra_specs: the extra specifications + """ + payload = {"deviceNameListSource": [{"name": device_id}], + "bothSides": 'false', "star": 'false', + "force": 'false'} + resource_type = 'snapshot/%(snap)s' % {'snap': snap_name} + status_code, job = self.create_resource( + array, REPLICATION, resource_type, + payload, private='/private') + self.wait_for_job('Create volume snapVx', status_code, + job, extra_specs) + + def modify_volume_snap(self, array, source_id, target_id, snap_name, + extra_specs, link=False, unlink=False): + """Link or unlink a snapVx to or from a target volume. + + :param array: the array serial number + :param source_id: the source device id + :param target_id: the target device id + :param snap_name: the snapshot name + :param extra_specs: extra specifications + :param link: Flag to indicate action = Link + :param unlink: Flag to indicate action = Unlink + """ + action = '' + if link: + action = "Link" + elif unlink: + action = "Unlink" + if action: + payload = {"deviceNameListSource": [{"name": source_id}], + "deviceNameListTarget": [ + {"name": target_id}], + "copy": 'true', "action": action, + "star": 'false', "force": 'false', + "exact": 'false', "remote": 'false', + "symforce": 'false', "nocopy": 'false'} + status_code, job = self.modify_resource( + array, REPLICATION, 'snapshot', payload, + resource_name=snap_name, private='/private') + + self.wait_for_job('Modify snapVx relationship to target', + status_code, job, extra_specs) + + def delete_volume_snap(self, array, snap_name, source_device_id): + """Delete the snapshot of a volume. + + :param array: the array serial number + :param snap_name: the name of the snapshot + :param source_device_id: the source device id + """ + payload = {"deviceNameListSource": [{"name": source_device_id}]} + return self.delete_resource( + array, REPLICATION, 'snapshot', snap_name, payload=payload, + private='/private') + + def get_volume_snap_info(self, array, source_device_id): + """Get snapVx information associated with a volume. + + :param array: the array serial number + :param source_device_id: the source volume device ID + :return: message -- dict, or None + """ + resource_name = ("%(device_id)s/snapshot" + % {'device_id': source_device_id}) + return self.get_resource(array, REPLICATION, 'volume', + resource_name, private='/private') + + def get_volume_snap(self, array, device_id, snap_name): + """Given a volume snap info, retrieve the snapVx object. + + :param array: the array serial number + :param device_id: the source volume device id + :param snap_name: the name of the snapshot + :return: snapshot dict, or None + """ + snapshot = None + snap_info = self.get_volume_snap_info(array, device_id) + if snap_info: + if (snap_info.get('snapshotSrcs') and + bool(snap_info['snapshotSrcs'])): + for snap in snap_info['snapshotSrcs']: + if snap['snapshotName'] == snap_name: + snapshot = snap + return snapshot + + def get_volume_snapshot_list(self, array, source_device_id): + """Get a list of snapshot details for a particular volume. + + :param array: the array serial number + :param source_device_id: the osurce device id + :return: snapshot list or None + """ + snapshot_list = [] + snap_info = self.get_volume_snap_info(array, source_device_id) + if snap_info: + if bool(snap_info['snapshotSrcs']): + snapshot_list = snap_info['snapshotSrcs'] + return snapshot_list + + def is_vol_in_rep_session(self, array, device_id): + """Check if a volume is in a replication session. + + :param array: the array serial number + :param device_id: the device id + :return: snapvx_tgt -- bool, snapvx_src -- bool, + rdf_grp -- list or None + """ + snapvx_src = False + snapvx_tgt = False + rdf_grp = None + volume_details = self.get_volume(array, device_id) + if volume_details: + if volume_details.get('snapvx_target'): + snap_target = volume_details['snapvx_target'] + snapvx_tgt = True if snap_target == 'true' else False + if volume_details.get('snapvx_source'): + snap_source = volume_details['snapvx_source'] + snapvx_src = True if snap_source == 'true' else False + if volume_details.get('rdfGroupId'): + rdf_grp = volume_details['rdfGroupId'] + return snapvx_tgt, snapvx_src, rdf_grp + + def is_sync_complete(self, array, source_device_id, + target_device_id, snap_name, extra_specs): + """Check if a sync session is complete. + + :param array: the array serial number + :param source_device_id: source device id + :param target_device_id: target device id + :param snap_name: snapshot name + :param extra_specs: extra specifications + :return: bool + """ + + def _wait_for_sync(): + """Called at an interval until the synchronization is finished. + + :raises: loopingcall.LoopingCallDone + :raises: VolumeBackendAPIException + """ + retries = kwargs['retries'] + try: + kwargs['retries'] = retries + 1 + if not kwargs['wait_for_sync_called']: + if self._is_sync_complete( + array, source_device_id, snap_name, + target_device_id): + kwargs['wait_for_sync_called'] = True + except Exception: + exception_message = (_("Issue encountered waiting for " + "synchronization.")) + LOG.exception(exception_message) + raise exception.VolumeBackendAPIException( + data=exception_message) + + if kwargs['retries'] > int(extra_specs[utils.RETRIES]): + LOG.error("_wait_for_sync failed after %(retries)d " + "tries.", {'retries': retries}) + raise loopingcall.LoopingCallDone( + retvalue=int(extra_specs[utils.RETRIES])) + if kwargs['wait_for_sync_called']: + raise loopingcall.LoopingCallDone() + + kwargs = {'retries': 0, + 'wait_for_sync_called': False} + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_sync) + rc = timer.start(interval=int( + extra_specs[utils.INTERVAL])).wait() + return rc + + def _is_sync_complete(self, array, source_device_id, snap_name, + target_device_id): + """Helper function to check if snapVx sync session is complete. + + :param array: the array serial number + :param source_device_id: source device id + :param snap_name: the snapshot name + :param target_device_id: the target device id + :return: defined -- bool + """ + defined = True + session = self._get_sync_session( + array, source_device_id, snap_name, target_device_id) + if session: + defined = session['defined'] + return defined + + def _get_sync_session(self, array, source_device_id, snap_name, + target_device_id): + """Get a particular sync session. + + :param array: the array serial number + :param source_device_id: source device id + :param snap_name: the snapshot name + :param target_device_id: the target device id + :return: sync session -- dict, or None + """ + session = None + linked_device_list = self.get_snap_linked_device_list( + array, source_device_id, snap_name) + for target in linked_device_list: + if target_device_id == target['targetDevice']: + session = target + return session + + def _find_snap_vx_source_sessions(self, array, source_device_id): + """Find all snap sessions for a given source volume. + + :param array: the array serial number + :param source_device_id: the source device id + :return: list of snapshot dicts + """ + snap_dict_list = [] + snapshots = self.get_volume_snapshot_list(array, source_device_id) + for snapshot in snapshots: + if bool(snapshot['linkedDevices']): + link_info = {'linked_vols': snapshot['linkedDevices'], + 'snap_name': snapshot['snapshotName']} + snap_dict_list.append(link_info) + return snap_dict_list + + def get_snap_linked_device_list(self, array, source_device_id, snap_name): + """Get the list of linked devices for a particular snapVx snapshot. + + :param array: the array serial number + :param source_device_id: source device id + :param snap_name: the snapshot name + :return: linked_device_list + """ + linked_device_list = [] + snap_list = self._find_snap_vx_source_sessions(array, source_device_id) + for snap in snap_list: + if snap['snap_name'] == snap_name: + linked_device_list = snap['linked_vols'] + return linked_device_list + + def find_snap_vx_sessions(self, array, device_id, tgt_only=False): + """Find all snapVX sessions for a device (source and target). + + :param array: the array serial number + :param device_id: the device id + :param tgt_only: Flag - return only sessions where device is target + :return: list of snapshot dicts + """ + snap_dict_list, sessions = [], [] + vol_details = self._get_private_volume(array, device_id) + snap_vx_info = vol_details['timeFinderInfo'] + is_snap_src = snap_vx_info['snapVXSrc'] + is_snap_tgt = snap_vx_info['snapVXTgt'] + if snap_vx_info.get('snapVXSession'): + sessions = snap_vx_info['snapVXSession'] + if is_snap_src and not tgt_only: + for session in sessions: + if session.get('srcSnapshotGenInfo'): + src_list = session['srcSnapshotGenInfo'] + for src in src_list: + snap_name = src['snapshotHeader']['snapshotName'] + target_list, target_dict = [], {} + if src.get('lnkSnapshotGenInfo'): + target_dict = src['lnkSnapshotGenInfo'] + for tgt in target_dict: + target_list.append(tgt['targetDevice']) + link_info = {'target_vol_list': target_list, + 'snap_name': snap_name, + 'source_vol': device_id} + snap_dict_list.append(link_info) + if is_snap_tgt: + for session in sessions: + if session.get('tgtSrcSnapshotGenInfo'): + tgt = session['tgtSrcSnapshotGenInfo'] + snap_name = tgt['snapshotName'] + target_list = [tgt['targetDevice']] + source_vol = tgt['sourceDevice'] + link_info = {'target_vol_list': target_list, + 'snap_name': snap_name, + 'source_vol': source_vol} + snap_dict_list.append(link_info) + return snap_dict_list diff --git a/cinder/volume/drivers/dell_emc/vmax/utils.py b/cinder/volume/drivers/dell_emc/vmax/utils.py index 37d7a626863..ebbf78e75d4 100644 --- a/cinder/volume/drivers/dell_emc/vmax/utils.py +++ b/cinder/volume/drivers/dell_emc/vmax/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 - 2015 EMC Corporation. +# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -13,1128 +13,98 @@ # License for the specific language governing permissions and limitations # under the License. -import ast import datetime import hashlib -import os import random import re -import tempfile -import time from xml.dom import minidom from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_service import loopingcall -from oslo_utils import units import six -from cinder import context from cinder import exception from cinder.i18n import _ -from cinder.objects import fields from cinder.volume import volume_types LOG = logging.getLogger(__name__) - -try: - import pywbem - pywbemAvailable = True -except ImportError: - pywbemAvailable = False - -STORAGEGROUPTYPE = 4 -POSTGROUPTYPE = 3 -CLONE_REPLICATION_TYPE = 10 -SYNC_SNAPSHOT_LOCAL = 6 -ASYNC_SNAPSHOT_LOCAL = 7 -MAX_POOL_LENGTH = 16 -MAX_FASTPOLICY_LENGTH = 14 - -EMC_ROOT = 'root/emc' -CONCATENATED = 'concatenated' -CINDER_EMC_CONFIG_FILE_PREFIX = '/etc/cinder/cinder_emc_config_' -CINDER_EMC_CONFIG_FILE_POSTFIX = '.xml' -LIVE_MIGRATION_FILE = tempfile.gettempdir() + '/livemigrationarray' +# SHARED CONSTANTS ISCSI = 'iscsi' FC = 'fc' -JOB_RETRIES = 60 -INTERVAL_10_SEC = 10 -INTERVAL = 'storagetype:interval' -RETRIES = 'storagetype:retries' -CIM_ERR_NOT_FOUND = 6 +INTERVAL = 'interval' +RETRIES = 'retries' VOLUME_ELEMENT_NAME_PREFIX = 'OS-' -SYNCHRONIZED = 4 -RDF_FAILOVER = 10 -SMI_VERSION_83 = 830 -IS_RE = 'replication_enabled' -REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER +MAX_SRP_LENGTH = 16 +TRUNCATE_5 = 5 +TRUNCATE_27 = 27 + +ARRAY = 'array' +SLO = 'slo' +WORKLOAD = 'workload' +SRP = 'srp' +PORTGROUPNAME = 'port_group_name' +DEVICE_ID = 'device_id' +INITIATOR_CHECK = 'initiator_check' +SG_NAME = 'storagegroup_name' +MV_NAME = 'maskingview_name' +IG_NAME = 'init_group_name' +PARENT_SG_NAME = 'parent_sg_name' +CONNECTOR = 'connector' +VOL_NAME = 'volume_name' +EXTRA_SPECS = 'extra_specs' class VMAXUtils(object): - """Utility class for SMI-S based EMC volume drivers. + """Utility class for Rest based VMAX volume drivers. - This Utility class is for EMC volume drivers based on SMI-S. - It supports VMAX arrays. + This Utility class is for VMAX volume drivers based on Unisphere Rest API. """ - SLO = 'storagetype:slo' - WORKLOAD = 'storagetype:workload' - POOL = 'storagetype:pool' - ARRAY = 'storagetype:array' - DISABLECOMPRESSION = 'storagetype:disablecompression' - def __init__(self, prtcl): - if not pywbemAvailable: - LOG.info( - "Module PyWBEM not installed. " - "Install PyWBEM using the python-pywbem package.") - self.protocol = prtcl + def __init__(self): + """Utility class for Rest based VMAX volume drivers.""" - def _find_service(self, conn, instance_name, - service_type, storageSystemName): - """Get service with given storage system name. - - :param conn: connection to the ecom server - :param instance_name: instance name - :param service_type: message into the log - :param storageSystemName: the storage system name - :returns: foundConfigService - :raises: VolumeBackendAPIException - """ - foundService = None - services = conn.EnumerateInstanceNames(instance_name) - for service in services: - if storageSystemName == service['SystemName']: - foundService = service - LOG.debug("Found %(service_type)s Service: %(service)s.", - {'service_type': service_type, - 'service': service}) - break - - if foundService is None: - exceptionMessage = (_("%(service_type)s not found on %(storage)s.") - % {'service_type': service_type, - 'storage': storageSystemName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return foundService - - def find_storage_configuration_service(self, conn, storageSystemName): - # Get storage configuration service with given storage system name. - return self._find_service(conn, 'EMC_StorageConfigurationService', - 'Storage Configuration Service', - storageSystemName) - - def find_controller_configuration_service(self, conn, storageSystemName): - # Get the controller config by using the storage service name. - return self._find_service(conn, 'EMC_ControllerConfigurationService', - 'Controller Configuration Service', - storageSystemName) - - def find_element_composition_service(self, conn, storageSystemName): - # Given the storage system name, get the element composition service. - return self._find_service(conn, 'Symm_ElementCompositionService', - 'Element Composition Service', - storageSystemName) - - def find_storage_relocation_service(self, conn, storageSystemName): - # Given the storage system name, get the storage relocation service. - return self._find_service(conn, 'Symm_StorageRelocationService', - 'Element Composition Service', - storageSystemName) - - def find_storage_hardwareid_service(self, conn, storageSystemName): - # Given the storage system name, get the storage hardware service. - return self._find_service(conn, - 'EMC_StorageHardwareIDManagementService', - 'Storage Hardware ID Management Service', - storageSystemName) - - def find_replication_service(self, conn, storageSystemName): - # Given the storage system name, get the replication service. - return self._find_service(conn, 'EMC_ReplicationService', - 'Replication Service', storageSystemName) - - def get_tier_policy_service(self, conn, storageSystemInstanceName): - """Gets the tier policy service for a given storage system instance. - - Given the storage system instance name, get the existing tier - policy service. - - :param conn: the connection information to the ecom server - :param storageSystemInstanceName: the storageSystem instance Name - :returns: foundTierPolicyService - the tier policy - service instance name - :raises: VolumeBackendAPIException - """ - foundTierPolicyService = None - groups = conn.AssociatorNames( - storageSystemInstanceName, - ResultClass='Symm_TierPolicyService', - AssocClass='CIM_HostedService') - - if len(groups) > 0: - foundTierPolicyService = groups[0] - if foundTierPolicyService is None: - exceptionMessage = (_( - "Tier Policy Service not found " - "for %(storageSystemName)s.") - % {'storageSystemName': storageSystemInstanceName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return foundTierPolicyService - - def wait_for_job_complete(self, conn, job, extraSpecs=None): - """Given the job wait for it to complete. - - :param conn: connection to the ecom server - :param job: the job dict - :param extraSpecs: the extraSpecs dict. Defaults to None - :raises: VolumeBackendAPIException - """ - - def _wait_for_job_complete(): - # Called at an interval until the job is finished. - retries = kwargs['retries'] - try: - kwargs['retries'] = retries + 1 - if not kwargs['wait_for_job_called']: - if self._is_job_finished(conn, job): - kwargs['rc'], kwargs['errordesc'] = ( - self._verify_job_state(conn, job)) - kwargs['wait_for_job_called'] = True - except Exception: - exceptionMessage = (_("Issue encountered waiting for job.")) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException(exceptionMessage) - - if retries > maxJobRetries: - kwargs['rc'], kwargs['errordesc'] = ( - self._verify_job_state(conn, job)) - LOG.error("_wait_for_job_complete failed after %(retries)d " - "tries.", - {'retries': retries}) - - raise loopingcall.LoopingCallDone() - if kwargs['wait_for_job_called']: - raise loopingcall.LoopingCallDone() - maxJobRetries = self._get_max_job_retries(extraSpecs) - kwargs = {'retries': 0, - 'wait_for_job_called': False, - 'rc': 0, - 'errordesc': None} - - intervalInSecs = self._get_interval_in_secs(extraSpecs) - - timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete) - timer.start(interval=intervalInSecs).wait() - LOG.debug("Return code is: %(rc)lu. " - "Error Description is: %(errordesc)s.", - {'rc': kwargs['rc'], - 'errordesc': kwargs['errordesc']}) - return kwargs['rc'], kwargs['errordesc'] - - def _get_max_job_retries(self, extraSpecs): - """Get max job retries either default or user defined - - :param extraSpecs: extraSpecs dict - :returns: JOB_RETRIES or user defined - """ - if extraSpecs and RETRIES in extraSpecs: - jobRetries = extraSpecs[RETRIES] - else: - jobRetries = JOB_RETRIES - return int(jobRetries) - - def _get_interval_in_secs(self, extraSpecs): - """Get interval in secs, either default or user defined - - :param extraSpecs: extraSpecs dict - :returns: INTERVAL_10_SEC or user defined - """ - if extraSpecs and INTERVAL in extraSpecs: - intervalInSecs = extraSpecs[INTERVAL] - else: - intervalInSecs = INTERVAL_10_SEC - return int(intervalInSecs) - - def _is_job_finished(self, conn, job): - """Check if the job is finished. - - :param conn: connection to the ecom server - :param job: the job dict - :returns: boolean -- True if finished; False if not finished; - """ - - jobInstanceName = job['Job'] - jobinstance = conn.GetInstance(jobInstanceName, - LocalOnly=False) - jobstate = jobinstance['JobState'] - # From ValueMap of JobState in CIM_ConcreteJob - # 2=New, 3=Starting, 4=Running, 32767=Queue Pending - # ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767, - # 32768..65535"), - # Values("New, Starting, Running, Suspended, Shutting Down, - # Completed, Terminated, Killed, Exception, Service, - # Query Pending, DMTF Reserved, Vendor Reserved")] - if jobstate in [2, 3, 4, 32767]: - return False - else: - return True - - def _verify_job_state(self, conn, job): - """Check if the job is finished. - - :param conn: connection to the ecom server - :param job: the job dict - :returns: boolean -- True if finished; False if not finished; - """ - jobstatedict = {2: 'New', - 3: 'Starting', - 4: 'Running', - 5: 'Suspended', - 6: 'Shutting Down', - 7: 'Completed', - 8: 'Terminated', - 9: 'Killed', - 10: 'Exception', - 11: 'Service', - 32767: 'Queue Pending', - 32768: 'DMTF Reserved', - 65535: 'Vendor Reserved'} - jobInstanceName = job['Job'] - jobinstance = conn.GetInstance(jobInstanceName, - LocalOnly=False) - operationalstatus = jobinstance['OperationalStatus'] - if not operationalstatus: - jobstate = jobinstance['JobState'] - errordescription = (_( - "The job has not completed and is in a %(state)s " - "state.") - % {'state': jobstatedict[int(jobstate)]}) - LOG.error(errordescription) - errorcode = -1 - else: - errordescription = jobinstance['ErrorDescription'] - errorcode = jobinstance['ErrorCode'] - return errorcode, errordescription - - def wait_for_sync(self, conn, syncName, extraSpecs=None): - """Given the sync name wait for it to fully synchronize. - - :param conn: connection to the ecom server - :param syncName: the syncName - :param extraSpecs: extra specifications - :raises: loopingcall.LoopingCallDone - :raises: VolumeBackendAPIException - """ - - def _wait_for_sync(): - """Called at an interval until the synchronization is finished. - - :raises: loopingcall.LoopingCallDone - :raises: VolumeBackendAPIException - """ - retries = kwargs['retries'] - try: - kwargs['retries'] = retries + 1 - if not kwargs['wait_for_sync_called']: - if self._is_sync_complete(conn, syncName): - kwargs['wait_for_sync_called'] = True - except Exception: - exceptionMessage = (_("Issue encountered waiting for " - "synchronization.")) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException(exceptionMessage) - - if kwargs['retries'] > maxJobRetries: - LOG.error("_wait_for_sync failed after %(retries)d tries.", - {'retries': retries}) - raise loopingcall.LoopingCallDone(retvalue=maxJobRetries) - if kwargs['wait_for_sync_called']: - raise loopingcall.LoopingCallDone() - - maxJobRetries = self._get_max_job_retries(extraSpecs) - kwargs = {'retries': 0, - 'wait_for_sync_called': False} - intervalInSecs = self._get_interval_in_secs(extraSpecs) - timer = loopingcall.FixedIntervalLoopingCall(_wait_for_sync) - rc = timer.start(interval=intervalInSecs).wait() - return rc - - def _is_sync_complete(self, conn, syncName): - """Check if the job is finished. - - :param conn: connection to the ecom server - :param syncName: the sync name - :returns: True if fully synchronized; False if not; - """ - syncInstance = conn.GetInstance(syncName, - LocalOnly=False) - copyState = syncInstance['CopyState'] - LOG.debug("CopyState is %(copyState)lu.", - {'copyState': copyState}) - - return copyState == SYNCHRONIZED - - def get_num(self, numStr, datatype): - """Get the ecom int from the number. - - :param numStr: the number in string format - :param datatype: the type to convert it to - :returns: result - """ - try: - result = { - '8': pywbem.Uint8(numStr), - '16': pywbem.Uint16(numStr), - '32': pywbem.Uint32(numStr), - '64': pywbem.Uint64(numStr) - } - result = result.get(datatype, numStr) - except NameError: - result = numStr - - return result - - def find_storage_system(self, conn, configService): - """Finds the storage system for a particular config service. - - Given the storage configuration service get the CIM_StorageSystem - from it. - - :param conn: the connection to the ecom server - :param configService: the storage configuration service - :returns: int -- rc - the return code of the job - :returns: dict -- jobDict - the job dict - """ - foundStorageSystemInstanceName = None - groups = conn.AssociatorNames( - configService, - AssocClass='CIM_HostedService') - - if len(groups) > 0: - foundStorageSystemInstanceName = groups[0] - else: - LOG.error("Cannot get storage system.") - raise - - return foundStorageSystemInstanceName - - def get_storage_group_from_volume(self, conn, volumeInstanceName, sgName): - """Returns the storage group for a particular volume. - - Given the volume instance name get the associated storage group if it - is belong to one. - - :param conn: connection to the ecom server - :param volumeInstanceName: the volume instance name - :param sgName: the storage group name - :returns: foundStorageGroupInstanceName - """ - foundStorageGroupInstanceName = None - - storageGroupInstanceNames = conn.AssociatorNames( - volumeInstanceName, - ResultClass='CIM_DeviceMaskingGroup') - - if len(storageGroupInstanceNames) > 1: - LOG.info( - "The volume belongs to more than one storage group. " - "Returning storage group %(sgName)s.", - {'sgName': sgName}) - for storageGroupInstanceName in storageGroupInstanceNames: - instance = self.get_existing_instance( - conn, storageGroupInstanceName) - if instance and sgName == instance['ElementName']: - foundStorageGroupInstanceName = storageGroupInstanceName - break - - return foundStorageGroupInstanceName - - def get_storage_groups_from_volume(self, conn, volumeInstanceName): - """Returns all the storage group for a particular volume. - - Given the volume instance name get all the associated storage groups. - - :param conn: connection to the ecom server - :param volumeInstanceName: the volume instance name - :returns: foundStorageGroupInstanceName - """ - storageGroupInstanceNames = conn.AssociatorNames( - volumeInstanceName, - ResultClass='CIM_DeviceMaskingGroup') - - if storageGroupInstanceNames: - LOG.debug("There are %(len)d storage groups associated " - "with volume %(volumeInstanceName)s.", - {'len': len(storageGroupInstanceNames), - 'volumeInstanceName': volumeInstanceName}) - else: - LOG.debug("There are no storage groups associated " - "with volume %(volumeInstanceName)s.", - {'volumeInstanceName': volumeInstanceName}) - - return storageGroupInstanceNames - - def wrap_get_storage_group_from_volume(self, conn, volumeInstanceName, - sgName): - """Unit test aid""" - return self.get_storage_group_from_volume(conn, volumeInstanceName, - sgName) - - def find_storage_masking_group(self, conn, controllerConfigService, - storageGroupName): - """Given the storage group name get the storage group. - - :param conn: connection to the ecom server - :param controllerConfigService: the controllerConfigService - :param storageGroupName: the name of the storage group you are getting - :returns: foundStorageMaskingGroupInstanceName - """ - foundStorageMaskingGroupInstanceName = None - - storageMaskingGroupInstances = ( - conn.Associators(controllerConfigService, - ResultClass='CIM_DeviceMaskingGroup')) - - for storageMaskingGroupInstance in storageMaskingGroupInstances: - - if storageGroupName == storageMaskingGroupInstance['ElementName']: - # Check that it has not been deleted recently. - instance = self.get_existing_instance( - conn, storageMaskingGroupInstance.path) - if instance is None: - # Storage group not found. - foundStorageMaskingGroupInstanceName = None - else: - foundStorageMaskingGroupInstanceName = ( - storageMaskingGroupInstance.path) - - break - return foundStorageMaskingGroupInstanceName - - def find_storage_system_name_from_service(self, configService): - """Given any service get the storage system name from it. - - :param configService: the configuration service - :returns: string -- configService['SystemName'] - storage system name - """ - return configService['SystemName'] - - def find_volume_instance(self, conn, volumeDict, volumeName): - """Given the volumeDict get the instance from it. - - :param conn: connection to the ecom server - :param volumeDict: the volume Dict - :param volumeName: the user friendly name of the volume - :returns: foundVolumeInstance - the found volume instance - """ - volumeInstanceName = self.get_instance_name(volumeDict['classname'], - volumeDict['keybindings']) - foundVolumeInstance = conn.GetInstance(volumeInstanceName) - - if foundVolumeInstance is None: - LOG.debug("Volume %(volumeName)s not found on the array.", - {'volumeName': volumeName}) - else: - LOG.debug("Volume name: %(volumeName)s Volume instance: " - "%(vol_instance)s.", - {'volumeName': volumeName, - 'vol_instance': foundVolumeInstance.path}) - - return foundVolumeInstance - - def get_host_short_name(self, hostName): + def get_host_short_name(self, host_name): """Returns the short name for a given qualified host name. Checks the host name to see if it is the fully qualified host name - and returns part before the dot. If there is no dot in the hostName - the full hostName is returned. - - :param hostName: the fully qualified host name () - :returns: string -- the short hostName + and returns part before the dot. If there is no dot in the host name + the full host name is returned. + :param host_name: the fully qualified host name + :returns: string -- the short host_name """ - shortHostName = None - - hostArray = hostName.split('.') - if len(hostArray) > 1: - shortHostName = hostArray[0] + host_array = host_name.split('.') + if len(host_array) > 1: + short_host_name = host_array[0] else: - shortHostName = hostName + short_host_name = host_name - return self.generate_unique_trunc_host(shortHostName) + return self.generate_unique_trunc_host(short_host_name) - def get_instance_name(self, classname, bindings): - """Get the instance from the classname and bindings. - - :param classname: class name for the volume instance - :param bindings: volume created from job - :returns: pywbem.CIMInstanceName -- instanceName - """ - instanceName = None - try: - instanceName = pywbem.CIMInstanceName( - classname, - namespace=EMC_ROOT, - keybindings=bindings) - except NameError: - instanceName = None - - return instanceName - - def parse_pool_instance_id(self, poolInstanceId): - """Given the instance Id parse the pool name and system name from it. - - Example of pool InstanceId: Symmetrix+0001233455555+U+Pool 0 - - :param poolInstanceId: the path and name of the file - :returns: string -- poolName - the pool name - :returns: string -- systemName - the system name - """ - poolName = None - systemName = None - endp = poolInstanceId.rfind('+') - if endp > -1: - poolName = poolInstanceId[endp + 1:] - - idarray = poolInstanceId.split('+') - if len(idarray) > 2: - systemName = self._format_system_name(idarray[0], idarray[1], '+') - - LOG.debug("Pool name: %(poolName)s System name: %(systemName)s.", - {'poolName': poolName, 'systemName': systemName}) - return poolName, systemName - - def _format_system_name(self, part1, part2, sep): - """Join to make up system name - - :param part1: the prefix - :param sep: the separator - :param part2: the postfix - :returns: systemName - """ - return ("%(part1)s%(sep)s%(part2)s" - % {'part1': part1, - 'sep': sep, - 'part2': part2}) - - def parse_pool_instance_id_v3(self, poolInstanceId): - """Given the instance Id parse the pool name and system name from it. - - Example of pool InstanceId: Symmetrix+0001233455555+U+Pool 0 - - :param poolInstanceId: the path and name of the file - :returns: poolName - the pool name - :returns: systemName - the system name - """ - poolName = None - systemName = None - endp = poolInstanceId.rfind('-+-') - if endp > -1: - poolName = poolInstanceId[endp + 3:] - - idarray = poolInstanceId.split('-+-') - if len(idarray) > 2: - systemName = ( - self._format_system_name(idarray[0], idarray[1], '-+-')) - - LOG.debug("Pool name: %(poolName)s System name: %(systemName)s.", - {'poolName': poolName, 'systemName': systemName}) - return poolName, systemName - - def convert_gb_to_bits(self, strGbSize): - """Convert GB(string) to bytes(string). - - :param strGbSize: string -- The size in GB - :returns: string -- The size in bytes - """ - strBitsSize = six.text_type(int(strGbSize) * units.Gi) - - LOG.debug("Converted %(strGbSize)s GBs to %(strBitsSize)s Bits.", - {'strGbSize': strGbSize, 'strBitsSize': strBitsSize}) - - return strBitsSize - - def check_if_volume_is_composite(self, conn, volumeInstance): - """Check if the volume is composite. - - :param conn: the connection information to the ecom server - :param volumeInstance: the volume Instance - :returns: string -- 'True', 'False' or 'Undetermined' - """ - propertiesList = volumeInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'IsComposite': - cimProperties = properties[1] - - if 'True' in six.text_type(cimProperties.value): - return 'True' - elif 'False' in six.text_type(cimProperties.value): - return 'False' - else: - return 'Undetermined' - return 'Undetermined' - - def get_assoc_pool_from_volume(self, conn, volumeInstanceName): - """Give the volume instance get the associated pool instance - - :param conn: connection to the ecom server - :param volumeInstanceName: the volume instance name - :returns: foundPoolInstanceName - """ - foundPoolInstanceName = None - foundPoolInstanceNames = ( - conn.AssociatorNames(volumeInstanceName, - ResultClass='EMC_VirtualProvisioningPool')) - if len(foundPoolInstanceNames) > 0: - foundPoolInstanceName = foundPoolInstanceNames[0] - return foundPoolInstanceName - - def get_assoc_v2_pool_from_volume(self, conn, volumeInstanceName): - """Give the volume instance get the associated pool instance. - - :param conn: connection to the ecom server - :param volumeInstanceName: the volume instance name - :returns: foundPoolInstanceName - """ - foundPoolInstanceName = None - foundPoolInstanceNames = ( - conn.AssociatorNames(volumeInstanceName, - ResultClass='EMC_VirtualProvisioningPool')) - - if not foundPoolInstanceNames: - deviceID = volumeInstanceName['DeviceID'] - LOG.debug("Volume %(deviceId)s not in V2 pool", - {'deviceId': deviceID}) - else: - LOG.debug("Retrieved pool: %(foundPoolInstanceNames)s", - {'foundPoolInstanceNames': foundPoolInstanceNames}) - - if foundPoolInstanceNames and len(foundPoolInstanceNames) > 0: - foundPoolInstanceName = foundPoolInstanceNames[0] - - return foundPoolInstanceName - - def get_assoc_v3_pool_from_volume(self, conn, volumeInstanceName): - """Give the volume instance get the associated pool instance. - - :param conn: connection to the ecom server - :param volumeInstanceName: the volume instance name - :returns: foundPoolInstanceName - """ - foundPoolInstanceName = None - foundPoolInstanceNames = ( - conn.AssociatorNames(volumeInstanceName, - ResultClass='Symm_SRPStoragePool')) - - if not foundPoolInstanceNames: - deviceID = volumeInstanceName['DeviceID'] - LOG.debug("Volume %(deviceId)s not in V3 SRP", - {'deviceId': deviceID}) - exceptionMessage = ("Unable to locate volume %(deviceId)s", - {'deviceId': deviceID}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - else: - LOG.debug("Retrieved pool: %(foundPoolInstanceNames)s", - {'foundPoolInstanceNames': foundPoolInstanceNames}) - - if foundPoolInstanceNames and len(foundPoolInstanceNames) > 0: - foundPoolInstanceName = foundPoolInstanceNames[0] - - return foundPoolInstanceName - - def check_if_volume_is_extendable(self, conn, volumeInstance): - """Checks if a volume is extendable or not. - - Check underlying CIM_StorageExtent to see if the volume is - concatenated or not. - If isConcatenated is true then it is a concatenated and - extendable. - If isConcatenated is False and isVolumeComposite is True then - it is striped and not extendable. - If isConcatenated is False and isVolumeComposite is False then - it has one member only but is still extendable. - - :param conn: the connection information to the ecom server - :param volumeInstance: the volume instance - :returns: string -- 'True', 'False' or 'Undetermined' - """ - isConcatenated = None - - isVolumeComposite = self.check_if_volume_is_composite( - conn, volumeInstance) - - storageExtentInstances = conn.Associators( - volumeInstance.path, - ResultClass='CIM_StorageExtent') - - if len(storageExtentInstances) > 0: - storageExtentInstance = storageExtentInstances[0] - propertiesList = storageExtentInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'IsConcatenated': - cimProperties = properties[1] - isConcatenated = six.text_type(cimProperties.value) - - if isConcatenated is not None: - break - - if 'True' in isConcatenated: - return 'True' - elif 'False' in isConcatenated and 'True' in isVolumeComposite: - return 'False' - elif 'False' in isConcatenated and 'False' in isVolumeComposite: - return 'True' - else: - return 'Undetermined' - - def get_composite_type(self, compositeTypeStr): - """Get the int value of composite type. - - The default is '2' concatenated. - - :param compositeTypeStr: 'concatenated' or 'striped'. Cannot be None - :returns: int -- compositeType = 2 for concatenated, or 3 for striped - """ - compositeType = 2 - stripedStr = 'striped' - try: - if compositeTypeStr.lower() == stripedStr.lower(): - compositeType = 3 - except KeyError: - # Default to concatenated if not defined. - pass - - return compositeType - - def is_volume_bound_to_pool(self, conn, volumeInstance): - """Check if volume is bound to a pool. - - :param conn: the connection information to the ecom server - :param volumeInstance: the volume instance - :returns: string -- 'True' 'False' or 'Undetermined' - """ - propertiesList = volumeInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'EMCIsBound': - cimProperties = properties[1] - - if 'True' in six.text_type(cimProperties.value): - return 'True' - elif 'False' in six.text_type(cimProperties.value): - return 'False' - else: - return 'Undetermined' - return 'Undetermined' - - def get_space_consumed(self, conn, volumeInstance): - """Check the space consumed of a volume. - - :param conn: the connection information to the ecom server - :param volumeInstance: the volume Instance - :returns: spaceConsumed - """ - foundSpaceConsumed = None - unitnames = conn.References( - volumeInstance, ResultClass='CIM_AllocatedFromStoragePool', - Role='Dependent') - - for unitname in unitnames: - propertiesList = unitname.properties.items() - for properties in propertiesList: - if properties[0] == 'SpaceConsumed': - cimProperties = properties[1] - foundSpaceConsumed = cimProperties.value - break - if foundSpaceConsumed is not None: - break - - return foundSpaceConsumed - - def get_volume_size(self, conn, volumeInstance): - """Get the volume size which is ConsumableBlocks * BlockSize. - - :param conn: the connection information to the ecom server - :param volumeInstance: the volume Instance - :returns: string -- volumeSizeOut - """ - volumeSizeOut = 'Undetermined' - numBlocks = 0 - blockSize = 0 - - propertiesList = volumeInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'ConsumableBlocks': - cimProperties = properties[1] - numBlocks = int(cimProperties.value) - if properties[0] == 'BlockSize': - cimProperties = properties[1] - blockSize = int(cimProperties.value) - if blockSize > 0 and numBlocks > 0: - break - if blockSize > 0 and numBlocks > 0: - volumeSizeOut = six.text_type(numBlocks * blockSize) - - return volumeSizeOut - - def determine_member_count(self, sizeStr, memberCount, compositeType): - """Determines how many members a volume should contain. - - Based on the size of the proposed volume, the compositeType and the - memberCount, determine (or validate) how many meta members there - should be in a volume. - - :param sizeStr: the size in GBs of the proposed volume - :param memberCount: the initial member count - :param compositeType: the composite type - :returns: string -- memberCount - :returns: string -- errorDesc - the error description - """ - errorDesc = None - if compositeType in 'concatenated' and int(sizeStr) > 240: - newMemberCount = int(sizeStr) // 240 - modular = int(sizeStr) % 240 - if modular > 0: - newMemberCount += 1 - memberCount = six.text_type(newMemberCount) - - if compositeType in 'striped': - metaSize = int(sizeStr) / int(memberCount) - modular = int(sizeStr) % int(memberCount) - metaSize = metaSize + modular - if metaSize > 240: - errorDesc = ('Meta Size is greater than maximum allowed meta ' - 'size') - - return memberCount, errorDesc - - def get_extra_specs_by_volume_type_name(self, volumeTypeName): + @staticmethod + def get_volumetype_extra_specs(volume, volume_type_id=None): """Gets the extra specs associated with a volume type. - Given the string value of the volume type name, get the extra specs - object associated with the volume type. - - :param volumeTypeName: string value of the volume type name - :returns: extra_specs - extra specs object + :param volume: the volume dictionary + :param volume_type_id: Optional override for volume.volume_type_id + :returns: dict -- extra_specs - the extra specs + :raises: VolumeBackendAPIException """ - ctxt = context.get_admin_context() - volume_type = volume_types.get_volume_type_by_name( - ctxt, volumeTypeName) - extra_specs = volume_type['extra_specs'] + extra_specs = {} + + try: + if volume_type_id: + type_id = volume_type_id + else: + type_id = volume.volume_type_id + if type_id is not None: + extra_specs = volume_types.get_volume_type_extra_specs(type_id) + except Exception as e: + LOG.debug('Exception getting volume type extra specs: %(e)s', + {'e': six.text_type(e)}) return extra_specs - def get_pool_capacities(self, conn, poolName, storageSystemName): - """Get the total and remaining capacity in GB for a storage pool. - - Given the storage pool name, get the total capacity and remaining - capacity in GB. - - :param conn: connection to the ecom server - :param poolName: string value of the storage pool name - :param storageSystemName: the storage system name - :returns: tuple -- (total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb) - """ - LOG.debug( - "Retrieving capacity for pool %(poolName)s on array %(array)s.", - {'poolName': poolName, - 'array': storageSystemName}) - - poolInstanceName = self.get_pool_by_name( - conn, poolName, storageSystemName) - if poolInstanceName is None: - LOG.error( - "Unable to retrieve pool instance of %(poolName)s on " - "array %(array)s.", - {'poolName': poolName, 'array': storageSystemName}) - return (0, 0) - storagePoolInstance = conn.GetInstance( - poolInstanceName, LocalOnly=False) - total_capacity_gb = self.convert_bits_to_gbs( - storagePoolInstance['TotalManagedSpace']) - provisioned_capacity_gb = self.convert_bits_to_gbs( - storagePoolInstance['EMCSubscribedCapacity']) - free_capacity_gb = self.convert_bits_to_gbs( - storagePoolInstance['RemainingManagedSpace']) - try: - array_max_over_subscription = self.get_ratio_from_max_sub_per( - storagePoolInstance['EMCMaxSubscriptionPercent']) - except KeyError: - array_max_over_subscription = 65534 - return (total_capacity_gb, free_capacity_gb, - provisioned_capacity_gb, array_max_over_subscription) - - def get_pool_by_name(self, conn, storagePoolName, storageSystemName): - """Returns the instance name associated with a storage pool name. - - :param conn: connection to the ecom server - :param storagePoolName: string value of the storage pool name - :param storageSystemName: string value of array - :returns: foundPoolInstanceName - instance name of storage pool - """ - foundPoolInstanceName = None - LOG.debug( - "storagePoolName: %(poolName)s, storageSystemName: %(array)s.", - {'poolName': storagePoolName, 'array': storageSystemName}) - storageSystemInstanceName = self.find_storageSystem(conn, - storageSystemName) - poolInstanceNames = conn.AssociatorNames( - storageSystemInstanceName, - ResultClass='EMC_VirtualProvisioningPool') - for poolInstanceName in poolInstanceNames: - poolName = self.get_pool_name(conn, poolInstanceName) - if (poolName == storagePoolName): - # Check that the pool hasn't been recently deleted. - instance = self.get_existing_instance(conn, poolInstanceName) - if instance is None: - foundPoolInstanceName = None - else: - foundPoolInstanceName = poolInstanceName - break - - return foundPoolInstanceName - - def convert_bits_to_gbs(self, strBitSize): - """Convert bytes(string) to GB(string). - - :param strBitSize: string -- The size in bytes - :returns: int -- The size in GB - """ - gbSize = int(strBitSize) // 1024 // 1024 // 1024 - return gbSize - - def compare_size(self, size1Str, size2Str): - """Compare the bit sizes to an approximate. - - :param size1Str: the first bit size (String) - :param size2Str: the second bit size (String) - :returns: int -- size1GBs - size2GBs - """ - size1GBs = self.convert_bits_to_gbs(size1Str) - size2GBs = self.convert_bits_to_gbs(size2Str) - - return size1GBs - size2GBs - - def get_volumetype_extraspecs(self, volume, volumeTypeId=None): - """Compare the bit sizes to an approximate. - - :param volume: the volume dictionary - :param volumeTypeId: Optional override for volume['volume_type_id'] - :returns: dict -- extraSpecs - the extra specs - """ - extraSpecs = {} - - try: - if volumeTypeId: - type_id = volumeTypeId - else: - type_id = volume['volume_type_id'] - if type_id is not None: - extraSpecs = volume_types.get_volume_type_extra_specs(type_id) - - except Exception: - pass - - return extraSpecs - - def get_volumetype_qosspecs(self, volume, volumeTypeId=None): - """Get the qos specs. - - :param volume: the volume dictionary - :param volumeTypeId: Optional override for volume['volume_type_id'] - :returns: dict -- qosSpecs - the qos specs - """ - qosSpecs = {} - - try: - if volumeTypeId: - type_id = volumeTypeId - else: - type_id = volume['volume_type_id'] - if type_id is not None: - qosSpecs = volume_types.get_volume_type_qos_specs(type_id) - - except Exception: - LOG.debug("Unable to get QoS specifications.") - - return qosSpecs - - def get_volume_type_name(self, volume): - """Get the volume type name. - - :param volume: the volume dictionary - :returns: string -- volumeTypeName - the volume type name - """ - volumeTypeName = None - - ctxt = context.get_admin_context() - typeId = volume['volume_type_id'] - if typeId is not None: - volumeType = volume_types.get_volume_type(ctxt, typeId) - volumeTypeName = volumeType['name'] - - return volumeTypeName - - def parse_volume_type_from_filename(self, emcConfigFile): - """Parse the volume type from the file (if it exists). - - :param emcConfigFile: the EMC configuration file - :returns: volumeTypeName - the volume type name - """ - volumeTypeName = None - - m = re.search('/etc/cinder/cinder_emc_config_(.+?).xml', emcConfigFile) - if m: - volumeTypeName = m.group(1) - - return volumeTypeName - - def get_volumes_from_pool(self, conn, poolInstanceName): - """Check the space consumed of a volume. - - :param conn: the connection information to the ecom server - :param poolInstanceName: the pool instance name - :returns: the volumes in the pool - """ - return conn.AssociatorNames( - poolInstanceName, AssocClass='CIM_AllocatedFromStoragePool', - ResultClass='CIM_StorageVolume') - - def check_is_volume_bound_to_pool(self, conn, volumeInstance): - """Check the space consumed of a volume. - - :param conn: the connection information to the ecom server - :param volumeInstance: the volume Instance - :returns: string -- 'True', 'False' or 'Undetermined' - """ - foundSpaceConsumed = None - unitnames = conn.References( - volumeInstance, ResultClass='CIM_AllocatedFromStoragePool', - Role='Dependent') - - for unitname in unitnames: - propertiesList = unitname.properties.items() - for properties in propertiesList: - if properties[0] == 'EMCBoundToThinStoragePool': - cimProperties = properties[1] - foundSpaceConsumed = cimProperties.value - break - if foundSpaceConsumed is not None: - break - if 'True' in six.text_type(cimProperties.value): - return 'True' - elif 'False' in six.text_type(cimProperties.value): - return 'False' - else: - return 'Undetermined' - - def get_short_protocol_type(self, protocol): + @staticmethod + def get_short_protocol_type(protocol): """Given the protocol type, return I for iscsi and F for fc. :param protocol: iscsi or fc @@ -1147,1154 +117,261 @@ class VMAXUtils(object): else: return protocol - def get_hardware_id_instances_from_array( - self, conn, hardwareIdManagementService): - """Get all the hardware ids from an array. - - :param conn: connection to the ecom server - :param: hardwareIdManagementService - hardware id management service - :returns: hardwareIdInstances - the list of hardware - id instances - """ - hardwareIdInstances = ( - conn.Associators(hardwareIdManagementService, - ResultClass='EMC_StorageHardwareID')) - - return hardwareIdInstances - - def truncate_string(self, strToTruncate, maxNum): + @staticmethod + def truncate_string(str_to_truncate, max_num): """Truncate a string by taking first and last characters. - :param strToTruncate: the string to be truncated - :param maxNum: the maximum number of characters + :param str_to_truncate: the string to be truncated + :param max_num: the maximum number of characters :returns: string -- truncated string or original string """ - if len(strToTruncate) > maxNum: - newNum = len(strToTruncate) - maxNum // 2 - firstChars = strToTruncate[:maxNum // 2] - lastChars = strToTruncate[newNum:] - strToTruncate = firstChars + lastChars + if len(str_to_truncate) > max_num: + new_num = len(str_to_truncate) - max_num // 2 + first_chars = str_to_truncate[:max_num // 2] + last_chars = str_to_truncate[new_num:] + str_to_truncate = first_chars + last_chars + return str_to_truncate - return strToTruncate - - def get_array(self, host): - """Extract the array from the host capabilites. - - :param host: the host object - :returns: storageSystem - storage system represents the array - """ - - try: - if '@' in host: - infoDetail = host.split('@') - storageSystem = 'SYMMETRIX+' + infoDetail[0] - except Exception: - LOG.error("Error parsing array from host capabilities.") - - return storageSystem - - def get_time_delta(self, startTime, endTime): + @staticmethod + def get_time_delta(start_time, end_time): """Get the delta between start and end time. - :param startTime: the start time - :param endTime: the end time + :param start_time: the start time + :param end_time: the end time :returns: string -- delta in string H:MM:SS """ - delta = endTime - startTime + delta = end_time - start_time return six.text_type(datetime.timedelta(seconds=int(delta))) - def find_sync_sv_by_volume( - self, conn, storageSystem, volumeInstance, extraSpecs, - waitforsync=True): - """Find the storage synchronized name by device ID. + @staticmethod + def get_default_storage_group_name(srp_name, slo, workload): + """Determine default storage group from extra_specs. - :param conn: connection to the ecom server - :param storageSystem: the storage system name - :param volumeInstance: volume instance - :param extraSpecs: the extraSpecs dict - :param waitforsync: wait for the synchronization to complete if True - :returns: foundSyncInstanceName - """ - foundSyncInstanceName = None - syncInstanceNames = conn.ReferenceNames( - volumeInstance.path, - ResultClass='SE_StorageSynchronized_SV_SV') - for syncInstanceName in syncInstanceNames: - syncSvTarget = syncInstanceName['SyncedElement'] - syncSvSource = syncInstanceName['SystemElement'] - if storageSystem != syncSvTarget['SystemName']: - continue - if syncSvTarget['DeviceID'] == volumeInstance['DeviceID'] or ( - syncSvSource['DeviceID'] == volumeInstance['DeviceID']): - # Check that it hasn't recently been deleted. - try: - conn.GetInstance(syncInstanceName) - foundSyncInstanceName = syncInstanceName - LOG.debug("Found sync Name: " - "%(syncName)s.", - {'syncName': foundSyncInstanceName}) - except Exception: - foundSyncInstanceName = None - break - - if foundSyncInstanceName: - # Wait for SE_StorageSynchronized_SV_SV to be fully synced. - if waitforsync: - LOG.warning( - "Expect a performance hit as volume is not fully " - "synced on %(deviceId)s.", - {'deviceId': volumeInstance['DeviceID']}) - startTime = time.time() - self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs) - LOG.warning( - "Synchronization process took " - "took: %(delta)s H:MM:SS.", - {'delta': self.get_time_delta(startTime, - time.time())}) - - return foundSyncInstanceName - - def find_group_sync_rg_by_target( - self, conn, storageSystem, targetRgInstanceName, extraSpecs, - waitforsync=True): - """Find the SE_GroupSynchronized_RG_RG instance name by target group. - - :param conn: connection to the ecom server - :param storageSystem: the storage system name - :param targetRgInstanceName: target group instance name - :param extraSpecs: the extraSpecs dict - :param waitforsync: wait for synchronization to complete - :returns: foundSyncInstanceName - """ - foundSyncInstanceName = None - groupSyncRgInstanceNames = conn.EnumerateInstanceNames( - 'SE_GroupSynchronized_RG_RG') - for rgInstanceName in groupSyncRgInstanceNames: - rgTarget = rgInstanceName['SyncedElement'] - if targetRgInstanceName['InstanceID'] == rgTarget['InstanceID']: - # Check that it has not recently been deleted. - try: - conn.GetInstance(rgInstanceName) - foundSyncInstanceName = rgInstanceName - LOG.debug("Found group sync name: " - "%(syncName)s.", - {'syncName': foundSyncInstanceName}) - except Exception: - foundSyncInstanceName = None - break - - if foundSyncInstanceName is None: - LOG.warning( - "Group sync name not found for target group %(target)s " - "on %(storageSystem)s.", - {'target': targetRgInstanceName['InstanceID'], - 'storageSystem': storageSystem}) - else: - # Wait for SE_StorageSynchronized_SV_SV to be fully synced. - if waitforsync: - self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs) - - return foundSyncInstanceName - - def get_firmware_version(self, conn, arrayName): - """Get the firmware version of array. - - :param conn: the connection to the ecom server - :param arrayName: the array name - :returns: string -- firmwareVersion - """ - firmwareVersion = None - softwareIdentities = conn.EnumerateInstanceNames( - 'symm_storageSystemsoftwareidentity') - - for softwareIdentity in softwareIdentities: - if arrayName in softwareIdentity['InstanceID']: - softwareIdentityInstance = conn.GetInstance(softwareIdentity) - propertiesList = softwareIdentityInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'VersionString': - cimProperties = properties[1] - firmwareVersion = cimProperties.value - break - - return firmwareVersion - - def get_srp_pool_stats(self, conn, arrayName, poolName): - """Get the totalManagedSpace, remainingManagedSpace. - - :param conn: the connection to the ecom server - :param arrayName: the array name - :param poolName: the pool name - :returns: totalCapacityGb - :returns: remainingCapacityGb - """ - totalCapacityGb = -1 - remainingCapacityGb = -1 - storageSystemInstanceName = self.find_storageSystem(conn, arrayName) - - srpPoolInstanceNames = conn.AssociatorNames( - storageSystemInstanceName, - ResultClass='Symm_SRPStoragePool') - - for srpPoolInstanceName in srpPoolInstanceNames: - poolInstanceID = srpPoolInstanceName['InstanceID'] - poolnameStr, _systemName = ( - self.parse_pool_instance_id_v3(poolInstanceID)) - - if six.text_type(poolName) == six.text_type(poolnameStr): - try: - # Check that pool hasn't suddenly been deleted. - srpPoolInstance = conn.GetInstance(srpPoolInstanceName) - propertiesList = srpPoolInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'TotalManagedSpace': - cimProperties = properties[1] - totalManagedSpace = cimProperties.value - totalCapacityGb = self.convert_bits_to_gbs( - totalManagedSpace) - elif properties[0] == 'RemainingManagedSpace': - cimProperties = properties[1] - remainingManagedSpace = cimProperties.value - remainingCapacityGb = self.convert_bits_to_gbs( - remainingManagedSpace) - except Exception: - pass - - return totalCapacityGb, remainingCapacityGb - - def isArrayV3(self, conn, arrayName): - """Check if the array is V2 or V3. - - :param conn: the connection to the ecom server - :param arrayName: the array name - :returns: boolean - """ - firmwareVersion = self.get_firmware_version(conn, arrayName) - - m = re.search('^(\d+)', firmwareVersion) - majorVersion = m.group(0) - - if int(majorVersion) >= 5900: - return True - else: - return False - - def get_pool_and_system_name_v2( - self, conn, storageSystemInstanceName, poolNameInStr): - """Get pool instance and system name string for V2. - - :param conn: the connection to the ecom server - :param storageSystemInstanceName: the storage system instance name - :param poolNameInStr: the pool name - :returns: foundPoolInstanceName - :returns: string -- systemNameStr - """ - vpoolInstanceNames = conn.AssociatorNames( - storageSystemInstanceName, - ResultClass='EMC_VirtualProvisioningPool') - - return self._get_pool_instance_and_system_name( - conn, vpoolInstanceNames, storageSystemInstanceName, - poolNameInStr) - - def get_pool_and_system_name_v3( - self, conn, storageSystemInstanceName, poolNameInStr): - """Get pool instance and system name string for V2. - - :param conn: the connection to the ecom server - :param storageSystemInstanceName: the storage system instance name - :param poolNameInStr: the pool name - :returns: foundPoolInstanceName - :returns: string -- systemNameStr - """ - srpPoolInstanceNames = conn.AssociatorNames( - storageSystemInstanceName, - ResultClass='Symm_SRPStoragePool') - - return self._get_pool_instance_and_system_name( - conn, srpPoolInstanceNames, storageSystemInstanceName, - poolNameInStr) - - def _get_pool_instance_and_system_name( - self, conn, poolInstanceNames, storageSystemInstanceName, - poolname): - """Get the pool instance and the system name - - :param conn: the ecom connection - :param poolInstanceNames: list of pool instances - :param poolname: pool name (string) - :returns: foundPoolInstanceName, systemname - """ - foundPoolInstanceName = None - poolnameStr = None - systemNameStr = storageSystemInstanceName['Name'] - for poolInstanceName in poolInstanceNames: - # Example: SYMMETRIX-+-000196700535-+-SR-+-SRP_1 - # Example: SYMMETRIX+000195900551+TP+Sol_Innov - poolnameStr = self.get_pool_name(conn, poolInstanceName) - if poolnameStr is not None: - if six.text_type(poolname) == six.text_type(poolnameStr): - try: - conn.GetInstance(poolInstanceName) - foundPoolInstanceName = poolInstanceName - except Exception: - foundPoolInstanceName = None - break - - return foundPoolInstanceName, systemNameStr - - def get_pool_name(self, conn, poolInstanceName): - """Get the pool name from the instance - - :param conn: the ecom connection - :param poolInstanceName: the pool instance - :returns: poolnameStr - """ - poolnameStr = None - try: - poolInstance = conn.GetInstance(poolInstanceName) - poolnameStr = poolInstance['ElementName'] - except Exception: - pass - return poolnameStr - - def find_storageSystem(self, conn, arrayStr): - """Find an array instance name by the array name. - - :param conn: the ecom connection - :param arrayStr: the array Serial number (string) - :returns: foundPoolInstanceName, the CIM Instance Name of the Pool - :raises: VolumeBackendAPIException - """ - foundStorageSystemInstanceName = None - storageSystemInstanceNames = conn.EnumerateInstanceNames( - 'EMC_StorageSystem') - for storageSystemInstanceName in storageSystemInstanceNames: - arrayName = storageSystemInstanceName['Name'] - index = arrayName.find(arrayStr) - if index > -1: - foundStorageSystemInstanceName = storageSystemInstanceName - - if foundStorageSystemInstanceName is None: - exceptionMessage = (_("StorageSystem %(array)s is not found.") - % {'array': arrayStr}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - LOG.debug("Array Found: %(array)s.", - {'array': arrayStr}) - - return foundStorageSystemInstanceName - - def verify_slo_workload(self, slo, workload): - """Check if SLO and workload values are valid. - - :param slo: Service Level Object e.g bronze - :param workload: workload e.g DSS - :returns: boolean - """ - isValidSLO = False - isValidWorkload = False - - if not slo: - isValidSLO = True - if not workload: - isValidWorkload = True - - validSLOs = ['Bronze', 'Silver', 'Gold', - 'Platinum', 'Diamond', 'Optimized', - 'NONE'] - validWorkloads = ['DSS_REP', 'DSS', 'OLTP', - 'OLTP_REP', 'NONE'] - - for validSLO in validSLOs: - if slo == validSLO: - isValidSLO = True - break - - for validWorkload in validWorkloads: - if workload == validWorkload: - isValidWorkload = True - break - - if not isValidSLO: - LOG.error( - "SLO: %(slo)s is not valid. Valid values are Bronze, Silver, " - "Gold, Platinum, Diamond, Optimized, NONE.", {'slo': slo}) - - if not isValidWorkload: - LOG.error( - "Workload: %(workload)s is not valid. Valid values are " - "DSS_REP, DSS, OLTP, OLTP_REP, NONE.", {'workload': workload}) - - return isValidSLO, isValidWorkload - - def get_v3_storage_group_name(self, poolName, slo, workload, - isCompressionDisabled, rep_enabled=False): - """Determine default v3 storage group from extraSpecs. - - :param poolName: the poolName - :param slo: the SLO string e.g Bronze + :param srp_name: the name of the srp on the array + :param slo: the service level string e.g Bronze :param workload: the workload string e.g DSS - :param isCompressionDisabled: is compression disabled - :param rep_enabled: True if replication enabled - :returns: storageGroupName + :returns: storage_group_name """ if slo and workload: - - prefix = ("OS-%(poolName)s-%(slo)s-%(workload)s" - % {'poolName': poolName, - 'slo': slo, + prefix = ("OS-%(srpName)s-%(slo)s-%(workload)s" + % {'srpName': srp_name, 'slo': slo, 'workload': workload}) - - if isCompressionDisabled: - prefix += "-CD" else: prefix = "OS-no_SLO" - if rep_enabled: - prefix += "-RE" + storage_group_name = ("%(prefix)s-SG" % {'prefix': prefix}) + return storage_group_name - storageGroupName = ("%(prefix)s-SG" - % {'prefix': prefix}) - return storageGroupName + @staticmethod + def get_volume_element_name(volume_id): + """Get volume element name follows naming convention, i.e. 'OS-UUID'. - def _get_fast_settings_from_storage_group(self, storageGroupInstance): - """Get the emc FAST setting from the storage group. - - :param storageGroupInstance: the storage group instance - :returns: emcFastSetting + :param volume_id: Openstack volume ID containing uuid + :returns: volume element name in format of OS-UUID """ - emcFastSetting = None - propertiesList = storageGroupInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'EMCFastSetting': - cimProperties = properties[1] - emcFastSetting = cimProperties.value - break - return emcFastSetting + element_name = volume_id + uuid_regex = (re.compile( + '[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', + re.I)) + match = uuid_regex.search(volume_id) + if match: + volume_uuid = match.group() + element_name = ("%(prefix)s%(volumeUUID)s" + % {'prefix': VOLUME_ELEMENT_NAME_PREFIX, + 'volumeUUID': volume_uuid}) + LOG.debug( + "get_volume_element_name elementName: %(elementName)s.", + {'elementName': element_name}) + return element_name - def get_volume_meta_head(self, conn, volumeInstanceName): - """Get the head of a meta volume. + def generate_unique_trunc_host(self, host_name): + """Create a unique short host name under 16 characters. - :param conn: the ecom connection - :param volumeInstanceName: the composite volume instance name - :returns: the instance name of the meta volume head + :param host_name: long host name + :returns: truncated host name """ - metaHeadInstanceName = None - metaHeads = conn.AssociatorNames( - volumeInstanceName, - ResultClass='EMC_Meta') + if host_name and len(host_name) > 16: + host_name = host_name.lower() + m = hashlib.md5() + m.update(host_name.encode('utf-8')) + uuid = m.hexdigest() + new_name = ("%(host)s%(uuid)s" + % {'host': host_name[-6:], + 'uuid': uuid}) + host_name = self.truncate_string(new_name, 16) + return host_name - if len(metaHeads) > 0: - metaHeadInstanceName = metaHeads[0] - if metaHeadInstanceName is None: - LOG.info( - "Volume %(volume)s does not have meta device members.", - {'volume': volumeInstanceName}) + def get_pg_short_name(self, portgroup_name): + """Create a unique port group name under 12 characters. - return metaHeadInstanceName - - def get_meta_members_of_composite_volume( - self, conn, metaHeadInstanceName): - """Get the member volumes of a composite volume. - - :param conn: the ecom connection - :param metaHeadInstanceName: head of the composite volume - :returns: an array containing instance names of member volumes + :param portgroup_name: long portgroup_name + :returns: truncated portgroup_name """ - metaMembers = conn.AssociatorNames( - metaHeadInstanceName, - AssocClass='CIM_BasedOn', - ResultClass='EMC_PartialAllocOfConcreteExtent') - LOG.debug("metaMembers: %(members)s.", {'members': metaMembers}) - return metaMembers + if portgroup_name and len(portgroup_name) > 12: + portgroup_name = portgroup_name.lower() + m = hashlib.md5() + m.update(portgroup_name.encode('utf-8')) + uuid = m.hexdigest() + new_name = ("%(pg)s%(uuid)s" + % {'pg': portgroup_name[-6:], + 'uuid': uuid}) + portgroup_name = self.truncate_string(new_name, 12) + return portgroup_name - def get_meta_members_capacity_in_byte(self, conn, volumeInstanceNames): - """Get the capacity in byte of all meta device member volumes. + @staticmethod + def override_ratio(max_over_sub_ratio, max_sub_ratio_from_per): + """Override ratio if necessary. - :param conn: the ecom connection - :param volumeInstanceNames: array contains meta device member volumes - :returns: array contains capacities of each member device in bits + The over subscription ratio will be overridden if the max subscription + percent is less than the user supplied max oversubscription ratio. + :param max_over_sub_ratio: user supplied over subscription ratio + :param max_sub_ratio_from_per: property on the srp + :returns: max_over_sub_ratio """ - capacitiesInByte = [] - headVolume = conn.GetInstance(volumeInstanceNames[0]) - totalSizeInByte = ( - headVolume['ConsumableBlocks'] * headVolume['BlockSize']) - volumeInstanceNames.pop(0) - for volumeInstanceName in volumeInstanceNames: - volumeInstance = conn.GetInstance(volumeInstanceName) - numOfBlocks = volumeInstance['ConsumableBlocks'] - blockSize = volumeInstance['BlockSize'] - volumeSizeInByte = numOfBlocks * blockSize - capacitiesInByte.append(volumeSizeInByte) - totalSizeInByte = totalSizeInByte - volumeSizeInByte + if max_over_sub_ratio: + try: + max_over_sub_ratio = max(float(max_over_sub_ratio), + float(max_sub_ratio_from_per)) + except ValueError: + if max_sub_ratio_from_per is not None: + max_over_sub_ratio = float(max_sub_ratio_from_per) + elif max_sub_ratio_from_per: + max_over_sub_ratio = float(max_sub_ratio_from_per) - capacitiesInByte.insert(0, totalSizeInByte) - return capacitiesInByte + return max_over_sub_ratio - def get_existing_instance(self, conn, instanceName): - """Check that the instance name still exists and return the instance. - - :param conn: the connection to the ecom server - :param instanceName: the instanceName to be checked - :returns: instance or None - """ - instance = None - try: - instance = conn.GetInstance(instanceName, LocalOnly=False) - except pywbem.cim_operations.CIMError as arg: - instance = self.process_exception_args(arg, instanceName) - return instance - - def process_exception_args(self, arg, instanceName): - """Process exception arguments. - - :param arg: the arg list - :param instanceName: the instance name - :returns: None - :raises: VolumeBackendAPIException - """ - instance = None - code, desc = arg[0], arg[1] - if code == CIM_ERR_NOT_FOUND: - # Object doesn't exist any more. - instance = None - else: - # Something else that we cannot recover from has happened. - LOG.error("Exception: %s", desc) - exceptionMessage = (_( - "Cannot verify the existence of object:" - "%(instanceName)s.") - % {'instanceName': instanceName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - return instance - - def find_replication_service_capabilities(self, conn, storageSystemName): - """Find the replication service capabilities instance name. - - :param conn: the connection to the ecom server - :param storageSystemName: the storage system name - :returns: foundRepServCapability - """ - foundRepServCapability = None - repservices = conn.EnumerateInstanceNames( - 'CIM_ReplicationServiceCapabilities') - for repservCap in repservices: - if storageSystemName in repservCap['InstanceID']: - foundRepServCapability = repservCap - LOG.debug("Found Replication Service Capabilities: " - "%(repservCap)s", - {'repservCap': repservCap}) - break - if foundRepServCapability is None: - exceptionMessage = (_("Replication Service Capability not found " - "on %(storageSystemName)s.") - % {'storageSystemName': storageSystemName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException(data=exceptionMessage) - - return foundRepServCapability - - def is_clone_licensed(self, conn, capabilityInstanceName, isV3): - """Check if the clone feature is licensed and enabled. - - :param conn: the connection to the ecom server - :param capabilityInstanceName: the replication service capabilities - instance name - :returns: True if licensed and enabled; False otherwise. - """ - capabilityInstance = conn.GetInstance(capabilityInstanceName) - propertiesList = capabilityInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'SupportedReplicationTypes': - cimProperties = properties[1] - repTypes = cimProperties.value - LOG.debug("Found supported replication types: " - "%(repTypes)s", - {'repTypes': repTypes}) - if isV3: - if (SYNC_SNAPSHOT_LOCAL in repTypes or - ASYNC_SNAPSHOT_LOCAL in repTypes): - # Snapshot is a supported replication type. - LOG.debug("Snapshot for VMAX3 is licensed and " - "enabled.") - return True - else: - if CLONE_REPLICATION_TYPE in repTypes: - # Clone is a supported replication type. - LOG.debug("Clone for VMAX2 is licensed and " - "enabled.") - return True - return False - - def create_storage_hardwareId_instance_name( - self, conn, hardwareIdManagementService, initiator): - """Create storage hardware ID instance name based on the WWPN/IQN. - - :param conn: connection to the ecom server - :param hardwareIdManagementService: the hardware ID management service - :param initiator: initiator(IQN or WWPN) to create the hardware ID - instance - :returns: hardwareIdList - """ - hardwareIdList = None - hardwareIdType = self._get_hardware_type(initiator) - rc, ret = conn.InvokeMethod( - 'CreateStorageHardwareID', - hardwareIdManagementService, - StorageID=initiator, - IDType=self.get_num(hardwareIdType, '16')) - - if 'HardwareID' in ret: - LOG.debug("Created hardware ID instance for initiator:" - "%(initiator)s rc=%(rc)d, ret=%(ret)s", - {'initiator': initiator, 'rc': rc, 'ret': ret}) - hardwareIdList = ret['HardwareID'] - else: - LOG.warning("CreateStorageHardwareID failed. initiator: " - "%(initiator)s, rc=%(rc)d, ret=%(ret)s.", - {'initiator': initiator, 'rc': rc, 'ret': ret}) - return hardwareIdList - - def _get_hardware_type( - self, initiator): - """Determine the hardware type based on the initiator. - - :param initiator: initiator(IQN or WWPN) - :returns: hardwareTypeId - """ - hardwareTypeId = 0 - try: - int(initiator, 16) - hardwareTypeId = 2 - except Exception: - if 'iqn' in initiator.lower(): - hardwareTypeId = 5 - if hardwareTypeId == 0: - LOG.warning("Cannot determine the hardware type.") - return hardwareTypeId - - def _process_tag(self, element, tagName): + @staticmethod + def _process_tag(element, tag_name): """Process the tag to get the value. :param element: the parent element - :param tagName: the tag name + :param tag_name: the tag name :returns: nodeValue(can be None) """ - nodeValue = None + node_value = None try: - processedElement = element.getElementsByTagName(tagName)[0] - nodeValue = processedElement.childNodes[0].nodeValue - if nodeValue: - nodeValue = nodeValue.strip() + processed_element = element.getElementsByTagName(tag_name)[0] + node_value = processed_element.childNodes[0].nodeValue + if node_value: + node_value = node_value.strip() except IndexError: pass - return nodeValue + return node_value - def _get_connection_info(self, ecomElement): - """Given the filename get the ecomUser and ecomPasswd. + def _get_connection_info(self, rest_element): + """Given the filename get the rest server connection details. - :param ecomElement: the ecom element + :param rest_element: the rest element :returns: dict -- connargs - the connection info dictionary :raises: VolumeBackendAPIException """ - connargs = {} - connargs['EcomServerIp'] = ( - self._process_tag(ecomElement, 'EcomServerIp')) - connargs['EcomServerPort'] = ( - self._process_tag(ecomElement, 'EcomServerPort')) - connargs['EcomUserName'] = ( - self._process_tag(ecomElement, 'EcomUserName')) - connargs['EcomPassword'] = ( - self._process_tag(ecomElement, 'EcomPassword')) + connargs = { + 'RestServerIp': ( + self._process_tag(rest_element, 'RestServerIp')), + 'RestServerPort': ( + self._process_tag(rest_element, 'RestServerPort')), + 'RestUserName': ( + self._process_tag(rest_element, 'RestUserName')), + 'RestPassword': ( + self._process_tag(rest_element, 'RestPassword'))} for k, __ in connargs.items(): if connargs[k] is None: - exceptionMessage = (_( - "EcomServerIp, EcomServerPort, EcomUserName, " - "EcomPassword must have valid values.")) - LOG.error(exceptionMessage) + exception_message = (_( + "RestServerIp, RestServerPort, RestUserName, " + "RestPassword must have valid values.")) + LOG.error(exception_message) raise exception.VolumeBackendAPIException( - data=exceptionMessage) + data=exception_message) # These can be None - connargs['EcomUseSSL'] = self._process_tag(ecomElement, 'EcomUseSSL') - connargs['EcomCACert'] = self._process_tag(ecomElement, 'EcomCACert') - connargs['EcomNoVerification'] = ( - self._process_tag(ecomElement, 'EcomNoVerification')) - - if connargs['EcomUseSSL'] and connargs['EcomUseSSL'] == 'True': - connargs['EcomUseSSL'] = True - if connargs['EcomNoVerification'] and ( - connargs['EcomNoVerification'] == 'True'): - connargs['EcomNoVerification'] = True - else: - connargs['EcomUseSSL'] = False - connargs['EcomNoVerification'] = False + connargs['SSLCert'] = self._process_tag(rest_element, 'SSLCert') + connargs['SSLVerify'] = ( + self._process_tag(rest_element, 'SSLVerify')) return connargs - def _fill_record(self, connargs, serialNumber, poolName, - portGroup, element): - """Fill a single record. - - :param connargs: the connection info - :param serialNumber: the serial number of array - :param poolName: the poolname - :param portGroup: the portGroup - :param element: the parent element - :returns: dict -- kwargs - """ - kwargs = {} - kwargs['EcomServerIp'] = connargs['EcomServerIp'] - kwargs['EcomServerPort'] = connargs['EcomServerPort'] - kwargs['EcomUserName'] = connargs['EcomUserName'] - kwargs['EcomPassword'] = connargs['EcomPassword'] - kwargs['EcomUseSSL'] = connargs['EcomUseSSL'] - kwargs['EcomCACert'] = connargs['EcomCACert'] - kwargs['EcomNoVerification'] = connargs['EcomNoVerification'] - - slo = self._process_tag(element, 'ServiceLevel') - kwargs['SLO'] = slo - workload = self._process_tag(element, 'Workload') - if workload is None and slo: - workload = 'NONE' - - kwargs['Workload'] = workload - fastPolicy = self._process_tag(element, 'FastPolicy') - kwargs['FastPolicy'] = fastPolicy - kwargs['SerialNumber'] = serialNumber - kwargs['PoolName'] = poolName - kwargs['PortGroup'] = portGroup - - return kwargs - - def parse_file_to_get_array_map(self, fileName): + def parse_file_to_get_array_map(self, file_name): """Parses a file and gets array map. - Given a file, parse it to get array and any pool(s) or - fast policy(s), SLOs, Workloads that might exist. + Given a file, parse it to get array and pool(srp). - :param fileName: the path and name of the file - :returns: list - Sample VMAX2 XML file - 10.108.246.202 - 5988 - admin - #1Password + 10.108.246.202 + 8443 + smc + smc + /path/client.cert + /path/to/certfile.pem OS-PORTGROUP1-PG 000198700439 - FC_SLVR1 + SRP_1 - - Sample VMAX3 XML file - - 10.108.246.202 - 5988 - admin - #1Password - - OS-PORTGROUP1-PG - - 000198700439 - FC_SLVR1 - Diamond <--This is optional - OLTP <--This is optional - - :param fileName: the configuration file + :param file_name: the configuration file :returns: list """ - myList = [] kwargs = {} - connargs = {} - with open(fileName, 'r') as my_file: - data = my_file.read() + my_file = open(file_name, 'r') + data = my_file.read() my_file.close() dom = minidom.parseString(data) try: connargs = self._get_connection_info(dom) - interval = self._process_tag(dom, 'Interval') - retries = self._process_tag(dom, 'Retries') - portGroup = self._get_random_portgroup(dom) - serialNumber = self._process_tag(dom, 'Array') - if serialNumber is None: - LOG.error( - "Array Serial Number must be in the file " - "%(fileName)s.", - {'fileName': fileName}) - poolName = self._process_tag(dom, 'Pool') - if poolName is None: - LOG.error( - "PoolName must be in the file " - "%(fileName)s.", - {'fileName': fileName}) - kwargs = self._fill_record( - connargs, serialNumber, poolName, portGroup, dom) - if interval: - kwargs['Interval'] = interval - if retries: - kwargs['Retries'] = retries + portgroup = self._get_random_portgroup(dom) + serialnumber = self._process_tag(dom, 'Array') + if serialnumber is None: + LOG.error("Array Serial Number must be in the file %(file)s.", + {'file': file_name}) + srp_name = self._process_tag(dom, 'SRP') + if srp_name is None: + LOG.error("SRP Name must be in the file %(file)s.", + {'file': file_name}) + kwargs = ( + {'RestServerIp': connargs['RestServerIp'], + 'RestServerPort': connargs['RestServerPort'], + 'RestUserName': connargs['RestUserName'], + 'RestPassword': connargs['RestPassword'], + 'SSLCert': connargs['SSLCert'], + 'SSLVerify': connargs['SSLVerify'], + 'SerialNumber': serialnumber, + 'srpName': srp_name, + 'PortGroup': portgroup}) - myList.append(kwargs) except IndexError: pass - return myList + return kwargs - def extract_record(self, arrayInfo, pool): - """Given pool string determine the correct record. - - The poolName and the serialNumber will determine the - correct record to return in VMAX2. - The poolName, SLO and the serialNumber will determine the - correct record to return in VMAX3. - - :param arrayInfo: list of records - :param pool: e.g 'SATA_BRONZE1+000198700439' - 'SRP_1+Bronze+000198700555' - :returns: single record - """ - foundArrayInfoRec = {} - if pool: - for arrayInfoRec in arrayInfo: - if pool.count('+') == 2: - compString = ("%(slo)s+%(poolName)s+%(array)s" - % {'slo': arrayInfoRec['SLO'], - 'poolName': arrayInfoRec['PoolName'], - 'array': arrayInfoRec['SerialNumber']}) - else: - compString = ("%(poolName)s+%(array)s" - % {'poolName': arrayInfoRec['PoolName'], - 'array': arrayInfoRec['SerialNumber']}) - if compString == pool: - LOG.info("The pool_name from extraSpecs is %(pool)s.", - {'pool': pool}) - foundArrayInfoRec = arrayInfoRec - break - else: - foundArrayInfoRec = self._get_serial_number(arrayInfo) - - return foundArrayInfoRec - - def _get_random_portgroup(self, element): - """Get a portgroup from list of portgroup. - - Parse all available port groups under a particular - array and choose one. + @staticmethod + def _get_random_portgroup(element): + """Randomly choose a portgroup from list of portgroups. :param element: the parent element :returns: the randomly chosen port group - :raises: VolumeBackendAPIException """ - portGroupElements = element.getElementsByTagName('PortGroup') - if portGroupElements and len(portGroupElements) > 0: - portGroupNames = [] - for portGroupElement in portGroupElements: - if portGroupElement.childNodes: - portGroupName = portGroupElement.childNodes[0].nodeValue - if portGroupName: - portGroupNames.append(portGroupName.strip()) - portGroupNames = VMAXUtils._filter_list(portGroupNames) - if len(portGroupNames) > 0: - return VMAXUtils.get_random_pg_from_list(portGroupNames) + portgroupelements = element.getElementsByTagName('PortGroup') + if portgroupelements and len(portgroupelements) > 0: + portgroupnames = [portgroupelement.childNodes[0].nodeValue.strip() + for portgroupelement in portgroupelements + if portgroupelement.childNodes] + portgroupnames = list(set(filter(None, portgroupnames))) + pg_len = len(portgroupnames) + if pg_len > 0: + return portgroupnames[random.randint(0, pg_len - 1)] + return None - exception_message = (_("No Port Group elements found in config file.")) - LOG.error(exception_message) - raise exception.VolumeBackendAPIException(data=exception_message) + def get_temp_snap_name(self, clone_name, source_device_id): + """Construct a temporary snapshot name for clone operation. - @staticmethod - def get_random_pg_from_list(portgroupnames): - """From list of portgroup, choose one randomly - - :param portgroupnames: list of available portgroups - :returns: portgroupname - the random portgroup + :param clone_name: the name of the clone + :param source_device_id: the source device id + :return: snap_name """ - portgroupname = ( - portgroupnames[random.randint(0, len(portgroupnames) - 1)]) - - return portgroupname - - @staticmethod - def _filter_list(portgroupnames): - """Clean up the port group list - - :param portgroupnames: list of available portgroups - :returns: portgroupnames - cleaned up list - """ - portgroupnames = filter(None, portgroupnames) - # Convert list to set to remove duplicate portgroups - portgroupnames = list(set(portgroupnames)) - return portgroupnames - - def _get_serial_number(self, arrayInfo): - """If we don't have a pool then we just get the serial number. - - If there is more then one serial number we must return an - error and a recommendation to edit the EMC conf file. - - :param arrayInfo: list of records - :returns: any record where serial number exists - :raises: VolumeBackendAPIException - """ - serialNumberList = [] - foundRecord = {} - - for arrayInfoRec in arrayInfo: - serialNumberList.append(arrayInfoRec['SerialNumber']) - foundRecord = arrayInfoRec - - if len(set(serialNumberList)) > 1: - # We have more than one serial number in the dict. - exception_message = (_("Multiple SerialNumbers found, when only " - "one was expected for this operation. " - "Please change your EMC config file.")) - raise exception.VolumeBackendAPIException(data=exception_message) - - return foundRecord - - def _get_pool_info(self, arrayElement, fileName, connargs, interval, - retries, myList): - """Get pool information from element. - - :param arrayElement: arrayElement - :param fileName: configuration file - :param connargs: connection arguments - :param interval: interval, can be None - :param retries: retries, can be None - :param myList: list (input) - :returns: list (output) - :raises: VolumeBackendAPIException - """ - kwargs = {} - portGroup = self._get_random_portgroup(arrayElement) - serialNumber = self._process_tag( - arrayElement, 'SerialNumber') - if serialNumber is None: - exceptionMessage = (_( - "SerialNumber must be in the file " - "%(fileName)s."), - {'fileName': fileName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - poolElements = arrayElement.getElementsByTagName('Pool') - if poolElements and len(poolElements) > 0: - for poolElement in poolElements: - poolName = self._process_tag(poolElement, 'PoolName') - if poolName is None: - exceptionMessage = (_( - "PoolName must be in the file " - "%(fileName)s."), - {'fileName': fileName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - kwargs = self._fill_record(connargs, serialNumber, - poolName, portGroup, - poolElement) - if interval: - kwargs['Interval'] = interval - if retries: - kwargs['Retries'] = retries - myList.append(kwargs) - return myList - - def find_volume_by_device_id_on_array(self, storageSystem, deviceID): - """Find the volume by device ID on a specific array. - - :param storageSystem: the storage system name - :param deviceID: string value of the volume device ID - :returns: foundVolumeInstanceName - """ - systemName = 'SYMMETRIX-+-%s' % storageSystem - bindings = {'CreationClassName': 'Symm_StorageVolume', - 'SystemName': systemName, - 'DeviceID': deviceID, - 'SystemCreationClassName': 'Symm_StorageSystem'} - - instanceName = pywbem.CIMInstanceName( - classname='Symm_StorageVolume', - namespace=EMC_ROOT, - keybindings=bindings) - - LOG.debug("Retrieved volume from VMAX: %(instanceName)s", - {'instanceName': instanceName}) - - return instanceName - - def check_volume_no_fast(self, extraSpecs): - """Check if the volume's extraSpecs indicate FAST is enabled. - - :param extraSpecs: dict -- extra spec dict - :return: True if not fast - :raises: VolumeBackendAPIException - """ - try: - if extraSpecs['storagetype:fastpolicy'] is not None: - LOG.warning( - "FAST is enabled. Policy: %(fastPolicyName)s.", - {'fastPolicyName': extraSpecs['storagetype:fastpolicy']}) - exceptionMessage = (_( - "Manage volume is not supported if FAST is enabled. " - "FAST policy: %(fastPolicyName)s." - ) % {'fastPolicyName': extraSpecs[ - 'storagetype:fastpolicy']}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - else: - return True - except KeyError: - return True - - def check_volume_not_in_masking_view(self, conn, volumeInstanceName, - deviceId): - """Check if volume is in Masking View. - - :param conn: connection to the ecom server - :param volumeInstanceName: the volume instance name - :param deviceId: string value of the volume device ID - :raises: VolumeBackendAPIException - :return: True if not in Masking View - """ - sgInstanceNames = ( - self.get_storage_groups_from_volume( - conn, volumeInstanceName)) - - mvInstanceName = None - for sgInstanceName in sgInstanceNames: - maskingViews = conn.AssociatorNames( - sgInstanceName, - ResultClass='Symm_LunMaskingView') - if len(maskingViews) > 0: - mvInstanceName = maskingViews[0] - if mvInstanceName: - exceptionMessage = (_( - "Unable to import volume %(deviceId)s to cinder. " - "Volume is in masking view %(mv)s.") - % {'deviceId': deviceId, 'mv': mvInstanceName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - if not mvInstanceName: - return True - - def check_volume_not_replication_source(self, conn, storageSystemName, - deviceId): - """Check volume not replication source. - - Check if the volume is the source of a replicated - volume. - - :param conn: connection to the ecom server - :param storageSystemName: the storage system name - :param deviceId: string value of the volume device ID - :raises: VolumeBackendAPIException - :returns: True if not replication source - """ - repSessionInstanceName = ( - self.get_associated_replication_from_source_volume( - conn, storageSystemName, deviceId)) - - if repSessionInstanceName: - exceptionMessage = (_( - "Unable to import volume %(deviceId)s to cinder. " - "It is the source volume of replication session %(sync)s.") - % {'deviceId': deviceId, 'sync': repSessionInstanceName}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - else: - return True - - def check_is_volume_in_cinder_managed_pool( - self, conn, volumeInstanceName, cinderPoolInstanceName, - deviceId): - """Check if volume is in a Cinder managed pool. - - :param conn: connection to the ecom server - :param volumeInstanceName: the volume instance name - :param cinderPoolInstanceName: the name of the storage pool - :param deviceId: string value of the volume device ID - :raises: VolumeBackendAPIException - :returns: True if volume in cinder managed pool - """ - volumePoolInstanceName = ( - self.get_assoc_v2_pool_from_volume(conn, - volumeInstanceName)) - if not volumePoolInstanceName: - volumePoolInstanceName = ( - self.get_assoc_v3_pool_from_volume(conn, - volumeInstanceName)) - - volumePoolName = volumePoolInstanceName['InstanceID'] - cinderPoolName = cinderPoolInstanceName['InstanceID'] - - LOG.debug("Storage pool of existing volume: %(volPool)s, " - "Storage pool currently managed by cinder: %(cinderPool)s.", - {'volPool': volumePoolName, - 'cinderPool': cinderPoolName}) - - if volumePoolName != cinderPoolName: - exceptionMessage = (_( - "Unable to import volume %(deviceId)s to cinder. The external " - "volume is not in the pool managed by current cinder host.") - % {'deviceId': deviceId}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - else: - return True - - def get_volume_element_name(self, volumeId): - """Get volume element name follows naming convention, i.e. 'OS-UUID'. - - :param volumeId: volume id containing uuid - :returns: volume element name in format of OS-UUID - """ - elementName = volumeId - uuid_regex = (re.compile( - '[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', - re.I)) - match = uuid_regex.search(volumeId) - if match: - volumeUUID = match.group() - elementName = ("%(prefix)s%(volumeUUID)s" - % {'prefix': VOLUME_ELEMENT_NAME_PREFIX, - 'volumeUUID': volumeUUID}) - LOG.debug( - "get_volume_element_name elementName: %(elementName)s.", - {'elementName': elementName}) - return elementName - - def rename_volume(self, conn, volume, newName): - """Change the volume ElementName to specified new name. - - :param conn: connection to the ecom server - :param volume: the volume instance name or volume instance - :param newName: new ElementName of the volume - :returns: volumeInstance after rename - """ - if type(volume) is pywbem.cim_obj.CIMInstance: - volumeInstance = volume - volumeInstance['ElementName'] = newName - else: - volumeInstance = conn.GetInstance(volume) - volumeInstance['ElementName'] = newName - - LOG.debug("Rename volume to new ElementName %(newName)s.", - {'newName': newName}) - - conn.ModifyInstance(volumeInstance, PropertyList=['ElementName']) - - return volumeInstance + trunc_clone = self.truncate_string(clone_name, 10) + snap_name = ("temp-%(device)s-%(clone)s" + % {'device': source_device_id, 'clone': trunc_clone}) + return snap_name @staticmethod def get_array_and_device_id(volume, external_ref): @@ -2308,7 +385,7 @@ class VMAXUtils(object): LOG.debug("External_ref: %(er)s", {'er': external_ref}) if not device_id: device_id = external_ref.get(u'source-id', None) - host = volume['host'] + host = volume.host host_list = host.split('+') array = host_list[(len(host_list) - 1)] @@ -2322,727 +399,3 @@ class VMAXUtils(object): raise exception.VolumeBackendAPIException( data=exception_message) return array, device_id - - def get_associated_replication_from_source_volume( - self, conn, storageSystem, sourceDeviceId): - """Get associated replication from source volume. - - Given the source volume device ID, find associated replication - storage synchronized instance names. - - :param conn: connection to the ecom server - :param storageSystem: the storage system name - :param source: target volume object - :returns: foundSyncName (String) - """ - foundSyncInstanceName = None - syncInstanceNames = conn.EnumerateInstanceNames( - 'SE_StorageSynchronized_SV_SV') - for syncInstanceName in syncInstanceNames: - sourceVolume = syncInstanceName['SystemElement'] - if storageSystem != sourceVolume['SystemName']: - continue - if sourceVolume['DeviceID'] == sourceDeviceId: - # Check that it hasn't recently been deleted. - try: - conn.GetInstance(syncInstanceName) - foundSyncInstanceName = syncInstanceName - LOG.debug("Found sync Name: " - "%(syncName)s.", - {'syncName': foundSyncInstanceName}) - except Exception: - foundSyncInstanceName = None - break - - if foundSyncInstanceName is None: - LOG.info( - "No replication synchronization session found associated " - "with source volume %(source)s on %(storageSystem)s.", - {'source': sourceDeviceId, 'storageSystem': storageSystem}) - - return foundSyncInstanceName - - def get_volume_model_updates( - self, volumes, cgId, status='available'): - """Update the volume model's status and return it. - - :param volumes: volumes object api - :param cgId: cg id - :param status: string value reflects the status of the member volume - :returns: volume_model_updates - updated volumes - """ - volume_model_updates = [] - LOG.info("Updaing status for CG: %(id)s.", {'id': cgId}) - if volumes: - for volume in volumes: - volume_model_updates.append({'id': volume['id'], - 'status': status}) - else: - LOG.info("No volume found for CG: %(cg)s.", {'cg': cgId}) - return volume_model_updates - - def get_smi_version(self, conn): - """Get the SMI_S version. - - :param conn: the connection to the ecom server - :returns: string -- version - """ - intVersion = 0 - swIndentityInstances = conn.EnumerateInstances( - 'SE_ManagementServerSoftwareIdentity') - if swIndentityInstances: - swIndentityInstance = swIndentityInstances[0] - majorVersion = swIndentityInstance['MajorVersion'] - minorVersion = swIndentityInstance['MinorVersion'] - revisionNumber = swIndentityInstance['RevisionNumber'] - - intVersion = int(six.text_type(majorVersion) + - six.text_type(minorVersion) + - six.text_type(revisionNumber)) - - LOG.debug("Major version: %(majV)lu, Minor version: %(minV)lu, " - "Revision number: %(revNum)lu, Version: %(intV)lu.", - {'majV': majorVersion, - 'minV': minorVersion, - 'revNum': revisionNumber, - 'intV': intVersion}) - return intVersion - - def get_composite_elements( - self, conn, volumeInstance): - """Get the meta members of a composite volume. - - :param conn: ECOM connection - :param volumeInstance: the volume instance - :returns memberVolumes: a list of meta members - """ - memberVolumes = None - storageSystemName = volumeInstance['SystemName'] - elementCompositionService = self.find_element_composition_service( - conn, storageSystemName) - rc, ret = conn.InvokeMethod( - 'GetCompositeElements', - elementCompositionService, - TheElement=volumeInstance.path) - - if 'OutElements' in ret: - LOG.debug("Get composite elements of volume " - "%(volume)s rc=%(rc)d, ret=%(ret)s", - {'volume': volumeInstance.path, 'rc': rc, 'ret': ret}) - memberVolumes = ret['OutElements'] - return memberVolumes - - def generate_unique_trunc_host(self, hostName): - """Create a unique short host name under 40 chars - - :param sgName: long storage group name - :returns: truncated storage group name - """ - if hostName and len(hostName) > 38: - hostName = hostName.lower() - m = hashlib.md5() - m.update(hostName.encode('utf-8')) - uuid = m.hexdigest() - return( - ("%(host)s%(uuid)s" - % {'host': hostName[-6:], - 'uuid': uuid})) - else: - return hostName - - def generate_unique_trunc_pool(self, poolName): - """Create a unique pool name under 16 chars - - :param poolName: long pool name - :returns: truncated pool name - """ - if poolName and len(poolName) > MAX_POOL_LENGTH: - return ( - ("%(first)s_%(last)s" - % {'first': poolName[:8], - 'last': poolName[-7:]})) - else: - return poolName - - def generate_unique_trunc_fastpolicy(self, fastPolicyName): - """Create a unique fast policy name under 14 chars - - :param fastPolicyName: long fast policy name - :returns: truncated fast policy name - """ - if fastPolicyName and len(fastPolicyName) > MAX_FASTPOLICY_LENGTH: - return ( - ("%(first)s_%(last)s" - % {'first': fastPolicyName[:7], - 'last': fastPolicyName[-6:]})) - else: - return fastPolicyName - - def get_iscsi_protocol_endpoints(self, conn, portgroupinstancename): - """Get the iscsi protocol endpoints of a port group. - - :param conn: the ecom connection - :param portgroupinstancename: the portgroup instance name - :returns: iscsiendpoints - """ - iscsiendpoints = conn.AssociatorNames( - portgroupinstancename, - AssocClass='CIM_MemberOfCollection') - return iscsiendpoints - - def get_tcp_protocol_endpoints(self, conn, iscsiendpointinstancename): - """Get the tcp protocol endpoints associated with an iscsi endpoint - - :param conn: the ecom connection - :param iscsiendpointinstancename: the iscsi endpoint instance name - :returns: tcpendpoints - """ - tcpendpoints = conn.AssociatorNames( - iscsiendpointinstancename, - AssocClass='CIM_BindsTo') - return tcpendpoints - - def get_ip_protocol_endpoints(self, conn, tcpendpointinstancename): - """Get the ip protocol endpoints associated with an tcp endpoint - - :param conn: the ecom connection - :param tcpendpointinstancename: the tcp endpoint instance name - :returns: ipendpoints - """ - ipendpoints = conn.AssociatorNames( - tcpendpointinstancename, - AssocClass='CIM_BindsTo') - return ipendpoints - - def get_iscsi_ip_address(self, conn, ipendpointinstancename): - """Get the IPv4Address from the ip endpoint instance name - - :param conn: the ecom connection - :param ipendpointinstancename: the ip endpoint instance name - :returns: foundIpAddress - """ - foundIpAddress = None - ipendpointinstance = conn.GetInstance(ipendpointinstancename) - propertiesList = ipendpointinstance.properties.items() - for properties in propertiesList: - if properties[0] == 'IPv4Address': - cimProperties = properties[1] - foundIpAddress = cimProperties.value - return foundIpAddress - - def get_replication_setting_data(self, conn, repServiceInstanceName, - replication_type, extraSpecs): - """Get the replication setting data - - :param conn: connection the ecom server - :param repServiceInstanceName: the storage group instance name - :param replication_type: the replication type - :param copy_methodology: the copy methodology - :returns: instance rsdInstance - """ - repServiceCapabilityInstanceNames = conn.AssociatorNames( - repServiceInstanceName, - ResultClass='CIM_ReplicationServiceCapabilities', - AssocClass='CIM_ElementCapabilities') - repServiceCapabilityInstanceName = ( - repServiceCapabilityInstanceNames[0]) - - rc, rsd = conn.InvokeMethod( - 'GetDefaultReplicationSettingData', - repServiceCapabilityInstanceName, - ReplicationType=self.get_num(replication_type, '16')) - - if rc != 0: - rc, errordesc = self.wait_for_job_complete(conn, rsd, - extraSpecs) - if rc != 0: - exceptionMessage = (_( - "Error getting ReplicationSettingData. " - "Return code: %(rc)lu. " - "Error: %(error)s.") - % {'rc': rc, - 'error': errordesc}) - LOG.error(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - return rsd - - def set_copy_methodology_in_rsd(self, conn, repServiceInstanceName, - replication_type, copy_methodology, - extraSpecs): - """Get the replication setting data - - :param conn: connection the ecom server - :param repServiceInstanceName: the storage group instance name - :param replication_type: the replication type - :param copy_methodology: the copy methodology - :returns: instance rsdInstance - """ - rsd = self.get_replication_setting_data( - conn, repServiceInstanceName, replication_type, extraSpecs) - rsdInstance = rsd['DefaultInstance'] - rsdInstance['DesiredCopyMethodology'] = ( - self.get_num(copy_methodology, '16')) - return rsdInstance - - def set_target_element_supplier_in_rsd( - self, conn, repServiceInstanceName, replication_type, - target_type, extraSpecs): - """Get the replication setting data - - :param conn: connection the ecom server - :param repServiceInstanceName: the storage group instance name - :param replication_type: the replication type - :param target_type: Use existing, Create new, Use and create - :returns: instance rsdInstance - """ - rsd = self.get_replication_setting_data( - conn, repServiceInstanceName, replication_type, extraSpecs) - rsdInstance = rsd['DefaultInstance'] - rsdInstance['TargetElementSupplier'] = ( - self.get_num(target_type, '16')) - - return rsdInstance - - def get_v3_default_sg_instance_name( - self, conn, poolName, slo, workload, storageSystemName, - isCompressionDisabled, is_re=False): - """Get the V3 default instance name - - :param conn: the connection to the ecom server - :param poolName: the pool name - :param slo: the SLO - :param workload: the workload - :param storageSystemName: the storage system name - :param isCompressionDisabled: is compression disabled - :returns: the storage group instance name - """ - storageGroupName = self.get_v3_storage_group_name( - poolName, slo, workload, isCompressionDisabled, is_re) - controllerConfigService = ( - self.find_controller_configuration_service( - conn, storageSystemName)) - sgInstanceName = self.find_storage_masking_group( - conn, controllerConfigService, storageGroupName) - return storageGroupName, controllerConfigService, sgInstanceName - - def get_ratio_from_max_sub_per(self, max_subscription_percent): - """Get ratio from max subscription percent if it exists. - - Check if the max subscription is set on the pool, if it is convert - it to a ratio. - - :param max_subscription_percent: max subscription percent - :returns: max_over_subscription_ratio - """ - if max_subscription_percent == '0': - return None - try: - max_subscription_percent_int = int(max_subscription_percent) - except ValueError: - LOG.error("Cannot convert max subscription percent to int.") - return None - return float(max_subscription_percent_int) / 100 - - def override_ratio(self, max_over_sub_ratio, max_sub_ratio_from_per): - """Override ratio if necessary - - The over subscription ratio will be overridden if the max subscription - percent is less than the user supplied max oversubscription ratio. - - :param max_over_sub_ratio: user supplied over subscription ratio - :param max_sub_ratio_from_per: property on the pool - :returns: max_over_sub_ratio - """ - if max_over_sub_ratio: - try: - max_over_sub_ratio = max(float(max_over_sub_ratio), - float(max_sub_ratio_from_per)) - except ValueError: - max_over_sub_ratio = float(max_sub_ratio_from_per) - elif max_sub_ratio_from_per: - max_over_sub_ratio = float(max_sub_ratio_from_per) - - return max_over_sub_ratio - - def update_storagegroup_qos(self, conn, storagegroup, extraspecs): - """Update the storagegroupinstance with qos details. - - If MaxIOPS or maxMBPS is in extraspecs, then DistributionType can be - modified in addition to MaxIOPS or/and maxMBPS - If MaxIOPS or maxMBPS is NOT in extraspecs, we check to see if - either is set in StorageGroup. If so, then DistributionType can be - modified - - :param conn: connection to the ecom server - :param storagegroup: the storagegroup instance name - :param extraspecs: extra specifications - """ - modifiedInstance = None - if type(storagegroup) is pywbem.cim_obj.CIMInstance: - storagegroupInstance = storagegroup - else: - storagegroupInstance = conn.GetInstance(storagegroup) - propertylist = [] - if 'maxIOPS' in extraspecs.get('qos'): - maxiops = self.get_num(extraspecs.get('qos').get('maxIOPS'), '32') - if maxiops != storagegroupInstance['EMCMaximumIO']: - storagegroupInstance['EMCMaximumIO'] = maxiops - propertylist.append('EMCMaximumIO') - if 'maxMBPS' in extraspecs.get('qos'): - maxmbps = self.get_num(extraspecs.get('qos').get('maxMBPS'), '32') - if maxmbps != storagegroupInstance['EMCMaximumBandwidth']: - storagegroupInstance['EMCMaximumBandwidth'] = maxmbps - propertylist.append('EMCMaximumBandwidth') - if 'DistributionType' in extraspecs.get('qos') and ( - propertylist or ( - storagegroupInstance['EMCMaximumBandwidth'] != 0) or ( - storagegroupInstance['EMCMaximumIO'] != 0)): - dynamicdict = {'never': 1, 'onfailure': 2, 'always': 3} - dynamicvalue = dynamicdict.get( - extraspecs.get('qos').get('DistributionType').lower()) - if dynamicvalue: - distributiontype = self.get_num(dynamicvalue, '16') - if distributiontype != ( - storagegroupInstance['EMCMaxIODynamicDistributionType'] - ): - storagegroupInstance['EMCMaxIODynamicDistributionType'] = ( - distributiontype) - propertylist.append('EMCMaxIODynamicDistributionType') - if propertylist: - modifiedInstance = conn.ModifyInstance(storagegroupInstance, - PropertyList=propertylist) - return modifiedInstance - - def get_iqn(self, conn, ipendpointinstancename): - """Get the IPv4Address from the ip endpoint instance name. - - :param conn: the ecom connection - :param ipendpointinstancename: the ip endpoint instance name - :returns: foundIqn - """ - foundIqn = None - ipendpointinstance = conn.GetInstance(ipendpointinstancename) - propertiesList = ipendpointinstance.properties.items() - for properties in propertiesList: - if properties[0] == 'Name': - cimProperties = properties[1] - foundIqn = cimProperties.value - return foundIqn - - def check_ig_instance_name( - self, conn, initiatorGroupInstanceName): - """Check if a given Initiator Group Instance Name has been deleted. - - :param conn: the ecom connection - :param initiatorGroupInstanceName: the given IG instance name - :return: foundinitiatorGroupInstanceName or None if deleted - """ - foundinitiatorGroupInstanceName = self.get_existing_instance( - conn, initiatorGroupInstanceName) - if foundinitiatorGroupInstanceName is not None: - LOG.debug("Found initiator group name: " - "%(igName)s.", - {'igName': foundinitiatorGroupInstanceName}) - else: - LOG.debug("Could not find initiator group name: " - "%(igName)s.", - {'igName': foundinitiatorGroupInstanceName}) - return foundinitiatorGroupInstanceName - - def is_all_flash(self, conn, array): - """Check if array is all flash. - - :param conn: connection the ecom server - :param array: - :returns: True/False - """ - smi_version = self.get_smi_version(conn) - if smi_version >= SMI_VERSION_83: - return self._is_all_flash(conn, array) - else: - return False - - def _is_all_flash(self, conn, array): - """Check if array is all flash. - - :param conn: connection the ecom server - :param array: - :returns: True/False - """ - is_all_flash = False - arrayChassisInstanceNames = conn.EnumerateInstanceNames( - 'Symm_ArrayChassis') - for arrayChassisInstanceName in arrayChassisInstanceNames: - tag = arrayChassisInstanceName['Tag'] - if array in tag: - arrayChassisInstance = ( - conn.GetInstance(arrayChassisInstanceName)) - propertiesList = arrayChassisInstance.properties.items() - for properties in propertiesList: - if properties[0] == 'Model': - cimProperties = properties[1] - model = cimProperties.value - if re.search('^VMAX\s?[0-9]+FX?$', model): - is_all_flash = True - return is_all_flash - - def is_compression_disabled(self, extraSpecs): - """Check is compression is to be disabled. - - :param extraSpecs: extra specifications - :returns: dict -- a dictionary with masking view information - """ - doDisableCompression = False - if self.DISABLECOMPRESSION in extraSpecs: - if self.str2bool(extraSpecs[self.DISABLECOMPRESSION]): - doDisableCompression = True - return doDisableCompression - - def change_compression_type(self, isSourceCompressionDisabled, newType): - """Check if volume type have different compression types. - - :param isSourceCompressionDisabled: from source - :param newType: from target - :returns: boolean - """ - extraSpecs = newType['extra_specs'] - isTargetCompressionDisabled = self.is_compression_disabled(extraSpecs) - if isTargetCompressionDisabled == isSourceCompressionDisabled: - return False - else: - return True - - def str2bool(self, value): - """Check if value is yes or true. - - :param value - string value - :returns: boolean - """ - return value.lower() in ("yes", "true") - - def is_replication_enabled(self, extraSpecs): - """Check if replication is to be enabled. - - :param extraSpecs: extra specifications - :returns: bool - true if enabled, else false - """ - replication_enabled = False - if IS_RE in extraSpecs: - replication_enabled = True - return replication_enabled - - def get_replication_config(self, rep_device_list): - """Gather necessary replication configuration info. - - :param rep_device_list: the replication device list from cinder.conf - :returns: rep_config, replication configuration dict - """ - rep_config = {} - if not rep_device_list: - return None - else: - target = rep_device_list[0] - try: - rep_config['array'] = target['target_device_id'] - rep_config['pool'] = target['remote_pool'] - rep_config['rdf_group_label'] = target['rdf_group_label'] - rep_config['portgroup'] = target['remote_port_group'] - - except KeyError as ke: - errorMessage = (_("Failed to retrieve all necessary SRDF " - "information. Error received: %(ke)s.") % - {'ke': six.text_type(ke)}) - LOG.exception(errorMessage) - raise exception.VolumeBackendAPIException(data=errorMessage) - - try: - allow_extend = target['allow_extend'] - if self.str2bool(allow_extend): - rep_config['allow_extend'] = True - else: - rep_config['allow_extend'] = False - except KeyError: - rep_config['allow_extend'] = False - - return rep_config - - def failover_provider_location(self, provider_location, - replication_keybindings): - """Transfer ownership of a volume from one array to another. - - :param provider_location: the provider location - :param replication_keybindings: the rep keybindings - :return: updated provider_location - """ - if isinstance(provider_location, six.text_type): - provider_location = ast.literal_eval(provider_location) - if isinstance(replication_keybindings, six.text_type): - replication_keybindings = ast.literal_eval( - replication_keybindings) - - keybindings = provider_location['keybindings'] - provider_location['keybindings'] = replication_keybindings - replication_driver_data = keybindings - return provider_location, replication_driver_data - - def find_rdf_storage_sync_sv_sv( - self, conn, sourceInstance, storageSystem, - targetInstance, targetStorageSystem, - extraSpecs, waitforsync=True): - """Find the storage synchronized name. - - :param conn: the connection to the ecom server - :param sourceInstance: the source instance - :param storageSystem: the source storage system name - :param targetInstance: the target instance - :param targetStorageSystem: the target storage system name - :param extraSpecs: the extra specifications - :param waitforsync: flag for waiting until sync is complete - :return: foundSyncInstanceName - """ - - foundSyncInstanceName = None - syncInstanceNames = conn.ReferenceNames( - sourceInstance.path, ResultClass='SE_StorageSynchronized_SV_SV') - for syncInstanceName in syncInstanceNames: - syncSvTarget = syncInstanceName['SyncedElement'] - syncSvSource = syncInstanceName['SystemElement'] - if storageSystem != syncSvSource['SystemName'] or ( - targetStorageSystem != syncSvTarget['SystemName']): - continue - if syncSvTarget['DeviceID'] == targetInstance['DeviceID'] and ( - syncSvSource['DeviceID'] == sourceInstance['DeviceID']): - # Check that it hasn't recently been deleted. - try: - conn.GetInstance(syncInstanceName) - foundSyncInstanceName = syncInstanceName - LOG.debug("Found sync Name: %(sync_name)s.", - {'sync_name': foundSyncInstanceName}) - except Exception: - foundSyncInstanceName = None - break - - if foundSyncInstanceName: - # Wait for SE_StorageSynchronized_SV_SV to be fully synced. - if waitforsync: - LOG.warning( - "Expect a performance hit as volume is not not fully " - "synced on %(deviceId)s.", - {'deviceId': sourceInstance['DeviceID']}) - startTime = time.time() - self.wait_for_sync(conn, foundSyncInstanceName, extraSpecs) - LOG.warning( - "Synchronization process took: %(delta)s H:MM:SS.", - {'delta': self.get_time_delta(startTime, - time.time())}) - - return foundSyncInstanceName - - @staticmethod - def is_volume_failed_over(volume): - """Check if a volume has been failed over. - - :param volume: the volume object - :return: bool - """ - if volume is None: - return False - else: - if volume.get('replication_status'): - if volume['replication_status'] == REPLICATION_FAILOVER: - return True - else: - return False - - def update_extra_specs(self, extraSpecs): - """Update extra specs. - - :param extraSpecs: the additional info - :return: extraSpecs - """ - try: - poolDetails = extraSpecs['pool_name'].split('+') - extraSpecs[self.SLO] = poolDetails[0] - extraSpecs[self.WORKLOAD] = poolDetails[1] - extraSpecs[self.POOL] = poolDetails[2] - extraSpecs[self.ARRAY] = poolDetails[3] - except KeyError: - LOG.error("Error parsing SLO, workload from " - "the provided extra_specs.") - return extraSpecs - - def get_default_intervals_retries(self): - """Get the default intervals and retries. - - :return: default_dict - """ - default_dict = {} - default_dict[INTERVAL] = INTERVAL_10_SEC - default_dict[RETRIES] = JOB_RETRIES - return default_dict - - def insert_live_migration_record(self, volume): - """Insert a record of live migration destination into a temporary file - - :param volume: the volume dictionary - """ - lm_file_name = self.get_live_migration_file_name(volume) - live_migration_details = self.get_live_migration_record(volume) - if live_migration_details: - return - else: - live_migration_details = {volume['id']: [volume['id']]} - try: - with open(lm_file_name, "w") as f: - jsonutils.dump(live_migration_details, f) - except Exception: - exceptionMessage = (_( - "Error in processing live migration file.")) - LOG.exception(exceptionMessage) - raise exception.VolumeBackendAPIException( - data=exceptionMessage) - - def delete_live_migration_record(self, volume): - """Delete record of live migration - - Delete record of live migration destination from file and if - after deletion of record, delete file if empty. - - :param volume: the volume dictionary - """ - lm_file_name = self.get_live_migration_file_name(volume) - live_migration_details = self.get_live_migration_record(volume) - if live_migration_details: - if volume['id'] in live_migration_details: - os.remove(lm_file_name) - - def get_live_migration_record(self, volume): - """get record of live migration destination from a temporary file - - :param volume: the volume dictionary - :returns: returns a single record - """ - returned_record = None - lm_file_name = self.get_live_migration_file_name(volume) - if os.path.isfile(lm_file_name): - with open(lm_file_name, "rb") as f: - live_migration_details = jsonutils.load(f) - if volume['id'] in live_migration_details: - returned_record = live_migration_details[volume['id']] - else: - LOG.debug("%(Volume)s doesn't exist in live migration " - "record.", - {'Volume': volume['id']}) - return returned_record - - def get_live_migration_file_name(self, volume): - """get name of temporary live migration file - - :param volume: the volume dictionary - :returns: returns file name - """ - lm_file_name = ("%(prefix)s-%(volid)s" - % {'prefix': LIVE_MIGRATION_FILE, - 'volid': volume['id'][:8]}) - return lm_file_name diff --git a/releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml b/releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml new file mode 100644 index 00000000000..f438a931d41 --- /dev/null +++ b/releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + VMAX driver version 3.0, replacing SMI-S with Unisphere REST. + This driver supports VMAX3 hybrid and All Flash arrays.