VMAX driver - Base functionality, replacing SMI-S with REST

In VMAX driver version 3.0, SMI-S will be replaced with unisphere
REST. Pike will support VMAX3 hybrid and All Flash arrays. This
submission is for base functionality only.

Change-Id: Ic0bdf57bd5f2b1af8e7954c70225921c2501060e
Implements: blueprint vmax-rest
This commit is contained in:
Helen Walsh 2017-04-10 20:18:27 +01:00
parent 34eefbe834
commit f6d9fbadb2
15 changed files with 8680 additions and 25470 deletions

View File

@ -290,7 +290,7 @@ def list_opts():
common_opts, common_opts,
cinder_volume_drivers_dell_emc_scaleio_driver.scaleio_opts, cinder_volume_drivers_dell_emc_scaleio_driver.scaleio_opts,
cinder_volume_drivers_dell_emc_unity_driver.UNITY_OPTS, cinder_volume_drivers_dell_emc_unity_driver.UNITY_OPTS,
cinder_volume_drivers_dell_emc_vmax_common.emc_opts, cinder_volume_drivers_dell_emc_vmax_common.vmax_opts,
cinder_volume_drivers_dell_emc_vnx_common.VNX_OPTS, cinder_volume_drivers_dell_emc_vnx_common.VNX_OPTS,
cinder_volume_drivers_dell_emc_xtremio.XTREMIO_OPTS, cinder_volume_drivers_dell_emc_xtremio.XTREMIO_OPTS,
cinder_volume_drivers_disco_disco.disco_opts, cinder_volume_drivers_disco_disco.disco_opts,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,828 +0,0 @@
# Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vmax import provision
from cinder.volume.drivers.dell_emc.vmax import utils
LOG = logging.getLogger(__name__)
DEFAULT_SG_PREFIX = 'OS_default_'
DEFAULT_SG_POSTFIX = '_SG'
class VMAXFast(object):
"""FAST Class for SMI-S based EMC volume drivers.
This FAST class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = utils.VMAXUtils(prtcl)
self.provision = provision.VMAXProvision(prtcl)
def _check_if_fast_supported(self, conn, storageSystemInstanceName):
"""Check to see if fast is supported on the array.
:param conn: the ecom connection
:param storageSystemInstanceName: the storage system Instance name
:returns: boolean -- isTieringPolicySupported
"""
tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
conn, storageSystemInstanceName)
isTieringPolicySupported = self.is_tiering_policy_enabled(
conn, tierPolicyServiceInstanceName)
if isTieringPolicySupported is None:
LOG.error("Cannot determine whether "
"Tiering Policy is supported on this array.")
if isTieringPolicySupported is False:
LOG.error("Tiering Policy is not supported on this array.")
return isTieringPolicySupported
def is_tiering_policy_enabled(self, conn, tierPolicyServiceInstanceName):
"""Checks to see if tiering policy is supported.
We will only check if there is a fast policy specified in
the config file.
:param conn: the connection information to the ecom server
:param tierPolicyServiceInstanceName: the tier policy service
instance name
:returns: boolean -- foundIsSupportsTieringPolicies
"""
foundIsSupportsTieringPolicies = None
tierPolicyCapabilityInstanceNames = conn.AssociatorNames(
tierPolicyServiceInstanceName,
ResultClass='CIM_TierPolicyServiceCapabilities',
AssocClass='CIM_ElementCapabilities')
tierPolicyCapabilityInstanceName = tierPolicyCapabilityInstanceNames[0]
tierPolicyCapabilityInstance = conn.GetInstance(
tierPolicyCapabilityInstanceName, LocalOnly=False)
propertiesList = (tierPolicyCapabilityInstance
.properties.items())
for properties in propertiesList:
if properties[0] == 'SupportsTieringPolicies':
cimProperties = properties[1]
foundIsSupportsTieringPolicies = cimProperties.value
break
if foundIsSupportsTieringPolicies is None:
LOG.error("Cannot determine if Tiering Policies "
"are supported.")
return foundIsSupportsTieringPolicies
def get_and_verify_default_storage_group(
self, conn, controllerConfigService, volumeInstanceName,
volumeName, fastPolicyName):
"""Retrieves and verifies the default storage group for a volume.
Given the volumeInstanceName get any associated storage group and
check that it is the default storage group. The default storage group
should have been already created. If not found error is logged.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller config service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param fastPolicyName: the fast policy name (String)
:returns: foundDefaultStorageGroupInstanceName, defaultSgName
"""
foundDefaultStorageGroupInstanceName = None
storageSystemInstanceName = self.utils.find_storage_system(
conn, controllerConfigService)
if not self._check_if_fast_supported(conn, storageSystemInstanceName):
LOG.error("FAST is not supported on this array.")
raise
defaultSgName = self.format_default_sg_string(fastPolicyName)
assocStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(conn, volumeInstanceName,
defaultSgName))
defaultStorageGroupInstanceName = (
self.utils.find_storage_masking_group(conn,
controllerConfigService,
defaultSgName))
if defaultStorageGroupInstanceName is None:
LOG.error(
"Unable to find default storage group "
"for FAST policy : %(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
raise
if assocStorageGroupInstanceName == defaultStorageGroupInstanceName:
foundDefaultStorageGroupInstanceName = (
assocStorageGroupInstanceName)
else:
LOG.warning(
"Volume: %(volumeName)s Does not belong "
"to storage group %(defaultSgName)s.",
{'volumeName': volumeName,
'defaultSgName': defaultSgName})
return foundDefaultStorageGroupInstanceName, defaultSgName
def format_default_sg_string(self, fastPolicyName):
"""Format the default storage group name
:param fastPolicyName: the fast policy name
:returns: defaultSgName
"""
return ("%(prefix)s%(fastPolicyName)s%(postfix)s"
% {'prefix': DEFAULT_SG_PREFIX,
'fastPolicyName': fastPolicyName,
'postfix': DEFAULT_SG_POSTFIX})
def add_volume_to_default_storage_group_for_fast_policy(
self, conn, controllerConfigService, volumeInstance,
volumeName, fastPolicyName, extraSpecs):
"""Add a volume to the default storage group for FAST policy.
The storage group must pre-exist. Once added to the storage group,
check the association to make sure it has been successfully added.
:param conn: the ecom connection
:param controllerConfigService: the controller configuration service
:param volumeInstance: the volume instance
:param volumeName: the volume name (String)
:param fastPolicyName: the fast policy name (String)
:param extraSpecs: additional info
:returns: assocStorageGroupInstanceName - the storage group
associated with the volume
"""
failedRet = None
defaultSgName = self.format_default_sg_string(fastPolicyName)
storageGroupInstanceName = self.utils.find_storage_masking_group(
conn, controllerConfigService, defaultSgName)
if storageGroupInstanceName is None:
LOG.error(
"Unable to get default storage group %(defaultSgName)s.",
{'defaultSgName': defaultSgName})
return failedRet
self.provision.add_members_to_masking_group(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance.path, volumeName, extraSpecs)
# Check to see if the volume is in the storage group.
assocStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(conn,
volumeInstance.path,
defaultSgName))
return assocStorageGroupInstanceName
def _create_default_storage_group(self, conn, controllerConfigService,
fastPolicyName, storageGroupName,
volumeInstance, extraSpecs):
"""Create a first volume for the storage group.
This is necessary because you cannot remove a volume if it is the
last in the group. Create the default storage group for the FAST policy
Associate the storage group with the tier policy rule.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param fastPolicyName: the fast policy name (String)
:param storageGroupName: the storage group name (String)
:param volumeInstance: the volume instance
:param extraSpecs: additional info
:returns: defaultstorageGroupInstanceName - instance name of the
default storage group
"""
failedRet = None
firstVolumeInstance = self._create_volume_for_default_volume_group(
conn, controllerConfigService, volumeInstance.path, extraSpecs)
if firstVolumeInstance is None:
LOG.error(
"Failed to create a first volume for storage "
"group : %(storageGroupName)s.",
{'storageGroupName': storageGroupName})
return failedRet
defaultStorageGroupInstanceName = (
self.provision.create_and_get_storage_group(
conn, controllerConfigService, storageGroupName,
firstVolumeInstance.path, extraSpecs))
if defaultStorageGroupInstanceName is None:
LOG.error(
"Failed to create default storage group for "
"FAST policy : %(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
return failedRet
storageSystemInstanceName = (
self.utils.find_storage_system(conn, controllerConfigService))
tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
conn, storageSystemInstanceName)
# Get the fast policy instance name.
tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
conn, tierPolicyServiceInstanceName, fastPolicyName)
if tierPolicyRuleInstanceName is None:
LOG.error(
"Unable to get policy rule for fast policy: "
"%(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
return failedRet
# Now associate it with a FAST policy.
self.add_storage_group_to_tier_policy_rule(
conn, tierPolicyServiceInstanceName,
defaultStorageGroupInstanceName, tierPolicyRuleInstanceName,
storageGroupName, fastPolicyName, extraSpecs)
return defaultStorageGroupInstanceName
def _create_volume_for_default_volume_group(
self, conn, controllerConfigService, volumeInstanceName,
extraSpecs):
"""Creates a volume for the default storage group for a fast policy.
Creates a small first volume for the default storage group for a
fast policy. This is necessary because you cannot remove
the last volume from a storage group and this scenario is likely.
:param conn: the connection information to the ecom server
:param controllerConfigService: the controller configuration service
:param volumeInstanceName: the volume instance name
:param extraSpecs: additional info
:returns: firstVolumeInstanceName - instance name of the first volume
in the storage group
"""
failedRet = None
storageSystemName = self.utils.find_storage_system_name_from_service(
controllerConfigService)
storageConfigurationInstanceName = (
self.utils.find_storage_configuration_service(
conn, storageSystemName))
poolInstanceName = self.utils.get_assoc_pool_from_volume(
conn, volumeInstanceName)
if poolInstanceName is None:
LOG.error("Unable to get associated pool of volume.")
return failedRet
volumeName = 'vol1'
volumeSize = '1'
volumeDict, _rc = (
self.provision.create_volume_from_pool(
conn, storageConfigurationInstanceName, volumeName,
poolInstanceName, volumeSize, extraSpecs))
firstVolumeInstanceName = self.utils.find_volume_instance(
conn, volumeDict, volumeName)
return firstVolumeInstanceName
def add_storage_group_to_tier_policy_rule(
self, conn, tierPolicyServiceInstanceName,
storageGroupInstanceName, tierPolicyRuleInstanceName,
storageGroupName, fastPolicyName, extraSpecs):
"""Add the storage group to the tier policy rule.
:param conn: the connection information to the ecom server
:param tierPolicyServiceInstanceName: tier policy service
:param storageGroupInstanceName: storage group instance name
:param tierPolicyRuleInstanceName: tier policy instance name
:param storageGroupName: the storage group name (String)
:param fastPolicyName: the fast policy name (String)
:param extraSpecs: additional info
:returns: int -- return code
:raises VolumeBackendAPIException:
"""
# 5 is ("Add InElements to Policy").
modificationType = '5'
rc, job = conn.InvokeMethod(
'ModifyStorageTierPolicyRule', tierPolicyServiceInstanceName,
PolicyRule=tierPolicyRuleInstanceName,
Operation=self.utils.get_num(modificationType, '16'),
InElements=[storageGroupInstanceName])
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error associating storage group : %(storageGroupName)s. "
"To fast Policy: %(fastPolicyName)s with error "
"description: %(errordesc)s.")
% {'storageGroupName': storageGroupName,
'fastPolicyName': fastPolicyName,
'errordesc': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return rc
def _get_service_level_tier_policy(
self, conn, tierPolicyServiceInstanceName, fastPolicyName):
"""Returns the existing tier policies for a storage system instance.
Given the storage system instance name, get the existing tier
policies on that array.
:param conn: the connection information to the ecom server
:param tierPolicyServiceInstanceName: the policy service
:param fastPolicyName: the fast policy name e.g BRONZE1
:returns: foundTierPolicyRuleInstanceName - the short name,
everything after the :
"""
foundTierPolicyRuleInstanceName = None
tierPolicyRuleInstanceNames = self._get_existing_tier_policies(
conn, tierPolicyServiceInstanceName)
for tierPolicyRuleInstanceName in tierPolicyRuleInstanceNames:
policyRuleName = tierPolicyRuleInstanceName['PolicyRuleName']
if fastPolicyName == policyRuleName:
foundTierPolicyRuleInstanceName = tierPolicyRuleInstanceName
break
return foundTierPolicyRuleInstanceName
def _get_existing_tier_policies(self, conn, tierPolicyServiceInstanceName):
"""Given the tier policy service, get the existing tier policies.
:param conn: the connection information to the ecom server
:param tierPolicyServiceInstanceName: the tier policy service
instance Name
:returns: list -- the tier policy rule instance names
"""
tierPolicyRuleInstanceNames = conn.AssociatorNames(
tierPolicyServiceInstanceName, ResultClass='Symm_TierPolicyRule')
return tierPolicyRuleInstanceNames
def get_associated_tier_policy_from_storage_group(
self, conn, storageGroupInstanceName):
"""Given the tier policy instance name get the storage groups.
:param conn: the connection information to the ecom server
:param storageGroupInstanceName: the storage group instance name
:returns: list -- the list of tier policy instance names
"""
tierPolicyInstanceName = None
tierPolicyInstanceNames = conn.AssociatorNames(
storageGroupInstanceName,
AssocClass='CIM_TierPolicySetAppliesToElement',
ResultClass='CIM_TierPolicyRule')
if (len(tierPolicyInstanceNames) > 0 and
len(tierPolicyInstanceNames) < 2):
tierPolicyInstanceName = tierPolicyInstanceNames[0]
return tierPolicyInstanceName
def get_associated_tier_from_tier_policy(
self, conn, tierPolicyRuleInstanceName):
"""Given the tierPolicyInstanceName get the associated tiers.
:param conn: the connection information to the ecom server
:param tierPolicyRuleInstanceName: the tier policy rule instance name
:returns: list -- a list of storage tier instance names
"""
storageTierInstanceNames = conn.AssociatorNames(
tierPolicyRuleInstanceName,
AssocClass='CIM_AssociatedTierPolicy')
if len(storageTierInstanceNames) == 0:
storageTierInstanceNames = None
LOG.warning(
"Unable to get storage tiers from tier policy rule.")
return storageTierInstanceNames
def get_policy_default_storage_group(
self, conn, controllerConfigService, policyName):
"""Returns the default storage group for a tier policy.
Given the tier policy instance name get the associated default
storage group.
:param conn: the connection information to the ecom server
:param controllerConfigService: ControllerConfigurationService
instance name
:param policyName: string value
:returns: storageGroupInstanceName - instance name of the default
storage group
"""
foundStorageMaskingGroupInstanceName = None
storageMaskingGroupInstances = conn.Associators(
controllerConfigService, ResultClass='CIM_DeviceMaskingGroup')
for storageMaskingGroupInstance in storageMaskingGroupInstances:
if ('_default_' in storageMaskingGroupInstance['ElementName'] and
policyName in storageMaskingGroupInstance['ElementName']):
# Check that it has not been recently deleted.
instance = self.utils.get_existing_instance(
conn, storageMaskingGroupInstance.path)
if instance is None:
# Storage Group doesn't exist any more.
foundStorageMaskingGroupInstanceName = None
else:
foundStorageMaskingGroupInstanceName = (
storageMaskingGroupInstance.path)
return foundStorageMaskingGroupInstanceName
def _get_associated_storage_groups_from_tier_policy(
self, conn, tierPolicyInstanceName):
"""Given the tier policy instance name get the storage groups.
:param conn: the connection information to the ecom server
:param tierPolicyInstanceName: tier policy instance name
:returns: list -- the list of storage instance names
"""
managedElementInstanceNames = conn.AssociatorNames(
tierPolicyInstanceName,
AssocClass='CIM_TierPolicySetAppliesToElement',
ResultClass='CIM_DeviceMaskingGroup')
return managedElementInstanceNames
def get_associated_pools_from_tier(
self, conn, storageTierInstanceName):
"""Given the storage tier instance name get the storage pools.
:param conn: the connection information to the ecom server
:param storageTierInstanceName: the storage tier instance name
:returns: list -- a list of storage tier instance names
"""
storagePoolInstanceNames = conn.AssociatorNames(
storageTierInstanceName,
AssocClass='CIM_MemberOfCollection',
ResultClass='CIM_StoragePool')
return storagePoolInstanceNames
def add_storage_group_and_verify_tier_policy_assoc(
self, conn, controllerConfigService, storageGroupInstanceName,
storageGroupName, fastPolicyName, extraSpecs):
"""Adds a storage group to a tier policy and verifies success.
Add a storage group to a tier policy rule and verify that it was
successful by getting the association.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller config service
:param storageGroupInstanceName: the storage group instance name
:param storageGroupName: the storage group name (String)
:param fastPolicyName: the fast policy name (String)
:param extraSpecs: additional info
:returns: assocTierPolicyInstanceName
"""
failedRet = None
assocTierPolicyInstanceName = None
storageSystemInstanceName = self.utils.find_storage_system(
conn, controllerConfigService)
tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
conn, storageSystemInstanceName)
# Get the fast policy instance name.
tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
conn, tierPolicyServiceInstanceName, fastPolicyName)
if tierPolicyRuleInstanceName is None:
LOG.error(
"Cannot find the fast policy %(fastPolicyName)s.",
{'fastPolicyName': fastPolicyName})
return failedRet
else:
LOG.debug(
"Adding storage group %(storageGroupInstanceName)s to "
"tier policy rule %(tierPolicyRuleInstanceName)s.",
{'storageGroupInstanceName': storageGroupInstanceName,
'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName})
# Associate the new storage group with the existing fast policy.
try:
self.add_storage_group_to_tier_policy_rule(
conn, tierPolicyServiceInstanceName,
storageGroupInstanceName, tierPolicyRuleInstanceName,
storageGroupName, fastPolicyName, extraSpecs)
except Exception:
LOG.exception(
"Failed to add storage group %(storageGroupInstanceName)s "
"to tier policy rule %(tierPolicyRuleInstanceName)s.",
{'storageGroupInstanceName': storageGroupInstanceName,
'tierPolicyRuleInstanceName': tierPolicyRuleInstanceName})
return failedRet
# Check that the storage group has been associated with with the
# tier policy rule.
assocTierPolicyInstanceName = (
self.get_associated_tier_policy_from_storage_group(
conn, storageGroupInstanceName))
LOG.debug(
"AssocTierPolicyInstanceName is "
"%(assocTierPolicyInstanceName)s.",
{'assocTierPolicyInstanceName': assocTierPolicyInstanceName})
return assocTierPolicyInstanceName
def get_associated_policy_from_storage_group(
self, conn, storageGroupInstanceName):
"""Get the tier policy instance name for a storage group instance name.
:param conn: the connection information to the ecom server
:param storageGroupInstanceName: storage group instance name
:returns: foundTierPolicyInstanceName - instance name of the
tier policy object
"""
foundTierPolicyInstanceName = None
tierPolicyInstanceNames = conn.AssociatorNames(
storageGroupInstanceName,
ResultClass='Symm_TierPolicyRule',
AssocClass='Symm_TierPolicySetAppliesToElement')
if len(tierPolicyInstanceNames) > 0:
foundTierPolicyInstanceName = tierPolicyInstanceNames[0]
return foundTierPolicyInstanceName
def delete_storage_group_from_tier_policy_rule(
self, conn, tierPolicyServiceInstanceName,
storageGroupInstanceName, tierPolicyRuleInstanceName,
extraSpecs):
"""Disassociate the storage group from its tier policy rule.
:param conn: connection the ecom server
:param tierPolicyServiceInstanceName: instance name of the tier policy
service
:param storageGroupInstanceName: instance name of the storage group
:param tierPolicyRuleInstanceName: instance name of the tier policy
associated with the storage group
:param extraSpecs: additional information
"""
modificationType = '6'
LOG.debug("Invoking ModifyStorageTierPolicyRule %s.",
tierPolicyRuleInstanceName)
try:
rc, job = conn.InvokeMethod(
'ModifyStorageTierPolicyRule', tierPolicyServiceInstanceName,
PolicyRule=tierPolicyRuleInstanceName,
Operation=self.utils.get_num(modificationType, '16'),
InElements=[storageGroupInstanceName])
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
LOG.error("Error disassociating storage group from "
"policy: %s.", errordesc)
else:
LOG.debug("Disassociated storage group from policy.")
else:
LOG.debug("ModifyStorageTierPolicyRule completed.")
except Exception as e:
LOG.info("Storage group not associated with the "
"policy. Exception is %s.", e)
def get_pool_associated_to_policy(
self, conn, fastPolicyName, arraySN,
storageConfigService, poolInstanceName):
"""Given a FAST policy check that the pool is linked to the policy.
If it's associated return the pool instance, if not return None.
First check if FAST is enabled on the array.
:param conn: the ecom connection
:param fastPolicyName: the fast policy name (String)
:param arraySN: the array serial number (String)
:param storageConfigService: the storage Config Service
:param poolInstanceName: the pool instance we want to check for
association with the fast storage tier
:returns: foundPoolInstanceName
"""
storageSystemInstanceName = self.utils.find_storage_system(
conn, storageConfigService)
if not self._check_if_fast_supported(conn, storageSystemInstanceName):
errorMessage = (_(
"FAST is not supported on this array."))
LOG.error(errorMessage)
exception.VolumeBackendAPIException(data=errorMessage)
tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
conn, storageSystemInstanceName)
tierPolicyRuleInstanceName = self._get_service_level_tier_policy(
conn, tierPolicyServiceInstanceName, fastPolicyName)
# Get the associated storage tiers from the tier policy rule.
storageTierInstanceNames = self.get_associated_tier_from_tier_policy(
conn, tierPolicyRuleInstanceName)
# For each gold storage tier get the associated pools.
foundPoolInstanceName = None
for storageTierInstanceName in storageTierInstanceNames:
assocStoragePoolInstanceNames = (
self.get_associated_pools_from_tier(conn,
storageTierInstanceName))
for assocStoragePoolInstanceName in assocStoragePoolInstanceNames:
if poolInstanceName == assocStoragePoolInstanceName:
foundPoolInstanceName = poolInstanceName
break
if foundPoolInstanceName is not None:
break
return foundPoolInstanceName
def is_tiering_policy_enabled_on_storage_system(
self, conn, storageSystemInstanceName):
"""Checks if tiering policy in enabled on a storage system.
True if FAST policy enabled on the given storage system;
False otherwise.
:param conn: the ecom connection
:param storageSystemInstanceName: a storage system instance name
:returns: boolean -- isTieringPolicySupported
"""
try:
tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
conn, storageSystemInstanceName)
isTieringPolicySupported = self.is_tiering_policy_enabled(
conn, tierPolicyServiceInstanceName)
except Exception as e:
LOG.error("Exception: %s.", e)
return False
return isTieringPolicySupported
def get_tier_policy_by_name(
self, conn, arrayName, policyName):
"""Given the name of the policy, get the TierPolicyRule instance name.
:param conn: the ecom connection
:param arrayName: the array
:param policyName: string -- the name of policy rule
:returns: tier policy instance name. None if not found
"""
tierPolicyInstanceNames = conn.EnumerateInstanceNames(
'Symm_TierPolicyRule')
for policy in tierPolicyInstanceNames:
if (policyName == policy['PolicyRuleName'] and
arrayName in policy['SystemName']):
return policy
return None
def get_capacities_associated_to_policy(self, conn, arrayName, policyName):
"""Gets the total and un-used capacities for all pools in a policy.
Given the name of the policy, get the total capacity and un-used
capacity in GB of all the storage pools associated with the policy.
:param conn: the ecom connection
:param arrayName: the array
:param policyName: the name of policy rule, a string value
:returns: int -- total capacity in GB of all pools associated with
the policy
:returns: int -- real physical capacity in GB of all pools
available to be used
:returns: int -- (Provisioned capacity-EMCSubscribedCapacity) in GB
is the capacity that has been provisioned
:returns: int -- the maximum oversubscription ration
"""
policyInstanceName = self.get_tier_policy_by_name(
conn, arrayName, policyName)
total_capacity_gb = 0
provisioned_capacity_gb = 0
free_capacity_gb = 0
array_max_over_subscription = None
tierInstanceNames = self.get_associated_tier_from_tier_policy(
conn, policyInstanceName)
for tierInstanceName in tierInstanceNames:
# Check that tier hasn't suddenly been deleted.
instance = self.utils.get_existing_instance(conn, tierInstanceName)
if instance is None:
# Tier doesn't exist any more.
break
poolInstanceNames = self.get_associated_pools_from_tier(
conn, tierInstanceName)
for poolInstanceName in poolInstanceNames:
# Check that pool hasn't suddenly been deleted.
storagePoolInstance = self.utils.get_existing_instance(
conn, poolInstanceName)
if storagePoolInstance is None:
# Pool doesn't exist any more.
break
total_capacity_gb += self.utils.convert_bits_to_gbs(
storagePoolInstance['TotalManagedSpace'])
provisioned_capacity_gb += self.utils.convert_bits_to_gbs(
storagePoolInstance['EMCSubscribedCapacity'])
free_capacity_gb += self.utils.convert_bits_to_gbs(
storagePoolInstance['RemainingManagedSpace'])
try:
array_max_over_subscription = (
self.utils.get_ratio_from_max_sub_per(
storagePoolInstance['EMCMaxSubscriptionPercent']))
except KeyError:
array_max_over_subscription = 65534
LOG.debug(
"PolicyName:%(policyName)s, pool: %(poolInstanceName)s, "
"provisioned_capacity_gb = %(provisioned_capacity_gb)lu.",
{'policyName': policyName,
'poolInstanceName': poolInstanceName,
'provisioned_capacity_gb': provisioned_capacity_gb})
return (total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb, array_max_over_subscription)
def get_or_create_default_storage_group(
self, conn, controllerConfigService, fastPolicyName,
volumeInstance, extraSpecs):
"""Create or get a default storage group for FAST policy.
:param conn: the ecom connection
:param controllerConfigService: the controller configuration service
:param fastPolicyName: the fast policy name (String)
:param volumeInstance: the volume instance
:param extraSpecs: additional info
:returns: defaultStorageGroupInstanceName - the default storage group
instance name
"""
defaultSgName = self.format_default_sg_string(fastPolicyName)
defaultStorageGroupInstanceName = (
self.utils.find_storage_masking_group(conn,
controllerConfigService,
defaultSgName))
if defaultStorageGroupInstanceName is None:
# Create it and associate it with the FAST policy in question.
defaultStorageGroupInstanceName = (
self._create_default_storage_group(conn,
controllerConfigService,
fastPolicyName,
defaultSgName,
volumeInstance,
extraSpecs))
return defaultStorageGroupInstanceName
def _get_associated_tier_policy_from_pool(self, conn, poolInstanceName):
"""Given the pool instance name get the associated FAST tier policy.
:param conn: the connection information to the ecom server
:param poolInstanceName: the pool instance name
:returns: the FAST Policy name (if it exists)
"""
fastPolicyName = None
storageTierInstanceNames = conn.AssociatorNames(
poolInstanceName,
AssocClass='CIM_MemberOfCollection',
ResultClass='CIM_StorageTier')
if len(storageTierInstanceNames) > 0:
tierPolicyInstanceNames = conn.AssociatorNames(
storageTierInstanceNames[0],
AssocClass='CIM_AssociatedTierPolicy')
if len(tierPolicyInstanceNames) > 0:
tierPolicyInstanceName = tierPolicyInstanceNames[0]
fastPolicyName = tierPolicyInstanceName['PolicyRuleName']
return fastPolicyName
def is_volume_in_default_SG(self, conn, volumeInstanceName):
"""Check if the volume is already part of the default storage group.
:param conn: the ecom connection
:param volumeInstanceName: the volume instance
:returns: boolean -- True if the volume is already in default
storage group. False otherwise
"""
sgInstanceNames = conn.AssociatorNames(
volumeInstanceName,
ResultClass='CIM_DeviceMaskingGroup')
if len(sgInstanceNames) == 0:
LOG.debug("volume %(vol)s is not in default sg.",
{'vol': volumeInstanceName})
return False
else:
for sgInstance in sgInstanceNames:
if DEFAULT_SG_PREFIX in sgInstance['InstanceID']:
LOG.debug("volume %(vol)s already in default sg.",
{'vol': volumeInstanceName})
return True
return False

View File

@ -1,4 +1,4 @@
# Copyright (c) 2012 - 2015 EMC Corporation. # Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -16,7 +16,6 @@
import ast import ast
from oslo_log import log as logging from oslo_log import log as logging
import six
from cinder import interface from cinder import interface
from cinder.volume import driver from cinder.volume import driver
@ -28,7 +27,7 @@ LOG = logging.getLogger(__name__)
@interface.volumedriver @interface.volumedriver
class VMAXFCDriver(driver.FibreChannelDriver): class VMAXFCDriver(driver.FibreChannelDriver):
"""EMC FC Drivers for VMAX using SMI-S. """FC Drivers for VMAX using REST.
Version history: Version history:
@ -76,9 +75,10 @@ class VMAXFCDriver(driver.FibreChannelDriver):
- Support for compression on All Flash - Support for compression on All Flash
- Volume replication 2.1 (bp add-vmax-replication) - Volume replication 2.1 (bp add-vmax-replication)
- rename and restructure driver (bp vmax-rename-dell-emc) - rename and restructure driver (bp vmax-rename-dell-emc)
3.0.0 - REST based driver
""" """
VERSION = "2.5.0" VERSION = "3.0.0"
# ThirdPartySystems wiki # ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_VMAX_CI" CI_WIKI_NAME = "EMC_VMAX_CI"
@ -86,64 +86,98 @@ class VMAXFCDriver(driver.FibreChannelDriver):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(VMAXFCDriver, self).__init__(*args, **kwargs) super(VMAXFCDriver, self).__init__(*args, **kwargs)
self.active_backend_id = kwargs.get('active_backend_id', None)
self.common = common.VMAXCommon( self.common = common.VMAXCommon(
'FC', 'FC',
self.VERSION, self.VERSION,
configuration=self.configuration, configuration=self.configuration)
active_backend_id=self.active_backend_id)
self.zonemanager_lookup_service = fczm_utils.create_lookup_service() self.zonemanager_lookup_service = fczm_utils.create_lookup_service()
def check_for_setup_error(self): def check_for_setup_error(self):
pass pass
def create_volume(self, volume): def create_volume(self, volume):
"""Creates a VMAX volume.""" """Creates a VMAX volume.
:param volume: the cinder volume object
:return: provider location dict
"""
return self.common.create_volume(volume) return self.common.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot): def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.""" """Creates a volume from a snapshot.
:param volume: the cinder volume object
:param snapshot: the cinder snapshot object
:return: provider location dict
"""
return self.common.create_volume_from_snapshot( return self.common.create_volume_from_snapshot(
volume, snapshot) volume, snapshot)
def create_cloned_volume(self, volume, src_vref): def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume.""" """Creates a cloned volume.
:param volume: the cinder volume object
:param src_vref: the source volume reference
:return: provider location dict
"""
return self.common.create_cloned_volume(volume, src_vref) return self.common.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume): def delete_volume(self, volume):
"""Deletes an VMAX volume.""" """Deletes a VMAX volume.
:param volume: the cinder volume object
"""
self.common.delete_volume(volume) self.common.delete_volume(volume)
def create_snapshot(self, snapshot): def create_snapshot(self, snapshot):
"""Creates a snapshot.""" """Creates a snapshot.
src_volume = snapshot['volume']
volpath = self.common.create_snapshot(snapshot, src_volume)
model_update = {} :param snapshot: the cinder snapshot object
snapshot['provider_location'] = six.text_type(volpath) :return: provider location dict
model_update['provider_location'] = snapshot['provider_location'] """
return model_update src_volume = snapshot.volume
return self.common.create_snapshot(snapshot, src_volume)
def delete_snapshot(self, snapshot): def delete_snapshot(self, snapshot):
"""Deletes a snapshot.""" """Deletes a snapshot.
src_volume = snapshot['volume']
:param snapshot: the cinder snapshot object
"""
src_volume = snapshot.volume
self.common.delete_snapshot(snapshot, src_volume) self.common.delete_snapshot(snapshot, src_volume)
def ensure_export(self, context, volume): def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume.""" """Driver entry point to get the export info for an existing volume.
:param context: the context
:param volume: the cinder volume object
"""
pass pass
def create_export(self, context, volume, connector): def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume.""" """Driver entry point to get the export info for a new volume.
:param context: the context
:param volume: the cinder volume object
:param connector: the connector object
"""
pass pass
def remove_export(self, context, volume): def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.""" """Driver entry point to remove an export for a volume.
:param context: the context
:param volume: the cinder volume object
"""
pass pass
def check_for_export(self, context, volume_id): @staticmethod
"""Make sure volume is exported.""" def check_for_export(context, volume_id):
"""Make sure volume is exported.
:param context: the context
:param volume_id: the volume id
"""
pass pass
@fczm_utils.add_fc_zone @fczm_utils.add_fc_zone
@ -176,6 +210,9 @@ class VMAXFCDriver(driver.FibreChannelDriver):
'target_wwn': ['1234567890123', '0987654321321'], 'target_wwn': ['1234567890123', '0987654321321'],
} }
} }
:param volume: the cinder volume object
:param connector: the connector object
:return: dict -- the target_wwns and initiator_target_map
""" """
device_info = self.common.initialize_connection( device_info = self.common.initialize_connection(
volume, connector) volume, connector)
@ -186,16 +223,14 @@ class VMAXFCDriver(driver.FibreChannelDriver):
Add relevant data to data dict, target_lun, target_wwn and Add relevant data to data dict, target_lun, target_wwn and
initiator_target_map. initiator_target_map.
:param device_info: device_info :param device_info: device_info
:param volume: the volume object :param volume: the volume object
:param connector: the connector object :param connector: the connector object
:returns: dict -- the target_wwns and initiator_target_map :returns: dict -- the target_wwns and initiator_target_map
""" """
device_number = device_info['hostlunid'] device_number = device_info['hostlunid']
storage_system = device_info['storagesystem']
target_wwns, init_targ_map = self._build_initiator_target_map( target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector) volume, connector)
data = {'driver_volume_type': 'fibre_channel', data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': device_number, 'data': {'target_lun': device_number,
@ -222,10 +257,8 @@ class VMAXFCDriver(driver.FibreChannelDriver):
:returns: dict -- the target_wwns and initiator_target_map if the :returns: dict -- the target_wwns and initiator_target_map if the
zone is to be removed, otherwise empty zone is to be removed, otherwise empty
""" """
data = {'driver_volume_type': 'fibre_channel', data = {'driver_volume_type': 'fibre_channel', 'data': {}}
'data': {}} zoning_mappings = self._get_zoning_mappings(volume, connector)
zoning_mappings = (
self._get_zoning_mappings(volume, connector))
if zoning_mappings: if zoning_mappings:
self.common.terminate_connection(volume, connector) self.common.terminate_connection(volume, connector)
@ -243,37 +276,41 @@ class VMAXFCDriver(driver.FibreChannelDriver):
zoning_mappings = {'port_group': None, zoning_mappings = {'port_group': None,
'initiator_group': None, 'initiator_group': None,
'target_wwns': None, 'target_wwns': None,
'init_targ_map': None} 'init_targ_map': None,
loc = volume['provider_location'] 'array': None}
loc = volume.provider_location
name = ast.literal_eval(loc) name = ast.literal_eval(loc)
storage_system = name['keybindings']['SystemName'] host = connector['host']
array = name['array']
device_id = name['device_id']
LOG.debug("Start FC detach process for volume: %(volume)s.", LOG.debug("Start FC detach process for volume: %(volume)s.",
{'volume': volume['name']}) {'volume': volume.name})
mvInstanceName = self.common.get_masking_view_by_volume( masking_views = self.common.get_masking_views_from_volume(
volume, connector) array, device_id, host)
if mvInstanceName: if masking_views:
portGroupInstanceName = ( portgroup = (
self.common.get_port_group_from_masking_view( self.common.get_port_group_from_masking_view(
mvInstanceName)) array, masking_views[0]))
initiatorGroupInstanceName = ( initiator_group = (
self.common.get_initiator_group_from_masking_view( self.common.get_initiator_group_from_masking_view(
mvInstanceName)) array, masking_views[0]))
LOG.debug("Found port group: %(portGroup)s " LOG.debug("Found port group: %(portGroup)s "
"in masking view %(maskingView)s.", "in masking view %(maskingView)s.",
{'portGroup': portGroupInstanceName, {'portGroup': portgroup,
'maskingView': mvInstanceName}) 'maskingView': masking_views[0]})
# Map must be populated before the terminate_connection # Map must be populated before the terminate_connection
target_wwns, init_targ_map = self._build_initiator_target_map( target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector) volume, connector)
zoning_mappings = {'port_group': portGroupInstanceName, zoning_mappings = {'port_group': portgroup,
'initiator_group': initiatorGroupInstanceName, 'initiator_group': initiator_group,
'target_wwns': target_wwns, 'target_wwns': target_wwns,
'init_targ_map': init_targ_map} 'init_targ_map': init_targ_map,
'array': array}
else: else:
LOG.warning("Volume %(volume)s is not in any masking view.", LOG.warning("Volume %(volume)s is not in any masking view.",
{'volume': volume['name']}) {'volume': volume.name})
return zoning_mappings return zoning_mappings
def _cleanup_zones(self, zoning_mappings): def _cleanup_zones(self, zoning_mappings):
@ -284,24 +321,14 @@ class VMAXFCDriver(driver.FibreChannelDriver):
""" """
LOG.debug("Looking for masking views still associated with " LOG.debug("Looking for masking views still associated with "
"Port Group %s.", zoning_mappings['port_group']) "Port Group %s.", zoning_mappings['port_group'])
if zoning_mappings['initiator_group']: masking_views = self.common.get_common_masking_views(
checkIgInstanceName = ( zoning_mappings['array'], zoning_mappings['port_group'],
self.common.check_ig_instance_name(
zoning_mappings['initiator_group']))
else:
checkIgInstanceName = None
# if it has not been deleted, check for remaining masking views
if checkIgInstanceName:
mvInstances = self._get_common_masking_views(
zoning_mappings['port_group'],
zoning_mappings['initiator_group']) zoning_mappings['initiator_group'])
if len(mvInstances) > 0: if masking_views:
LOG.debug("Found %(numViews)lu MaskingViews.", LOG.debug("Found %(numViews)d MaskingViews.",
{'numViews': len(mvInstances)}) {'numViews': len(masking_views)})
data = {'driver_volume_type': 'fibre_channel', data = {'driver_volume_type': 'fibre_channel', 'data': {}}
'data': {}}
else: # no masking views found else: # no masking views found
LOG.debug("No MaskingViews were found. Deleting zone.") LOG.debug("No MaskingViews were found. Deleting zone.")
data = {'driver_volume_type': 'fibre_channel', data = {'driver_volume_type': 'fibre_channel',
@ -312,41 +339,21 @@ class VMAXFCDriver(driver.FibreChannelDriver):
LOG.debug("Return FC data for zone removal: %(data)s.", LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data}) {'data': data})
else: # The initiator group has been deleted
LOG.debug("Initiator Group has been deleted. Deleting zone.")
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': zoning_mappings['target_wwns'],
'initiator_target_map':
zoning_mappings['init_targ_map']}}
LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data})
return data return data
def _get_common_masking_views( def _build_initiator_target_map(self, volume, connector):
self, portGroupInstanceName, initiatorGroupInstanceName): """Build the target_wwns and the initiator target map.
"""Check to see the existence of mv in list"""
mvInstances = []
mvInstancesByPG = self.common.get_masking_views_by_port_group(
portGroupInstanceName)
mvInstancesByIG = self.common.get_masking_views_by_initiator_group( :param volume: the cinder volume object
initiatorGroupInstanceName) :param connector: the connector object
:return: target_wwns -- list, init_targ_map -- dict
for mvInstanceByPG in mvInstancesByPG: """
if mvInstanceByPG in mvInstancesByIG: target_wwns, init_targ_map = [], {}
mvInstances.append(mvInstanceByPG)
return mvInstances
def _build_initiator_target_map(self, storage_system, volume, connector):
"""Build the target_wwns and the initiator target map."""
target_wwns = []
init_targ_map = {}
initiator_wwns = connector['wwpns'] initiator_wwns = connector['wwpns']
fc_targets = self.common.get_target_wwns_from_masking_view(
volume, connector)
if self.zonemanager_lookup_service: if self.zonemanager_lookup_service:
fc_targets = self.common.get_target_wwns_from_masking_view(
storage_system, volume, connector)
mapping = ( mapping = (
self.zonemanager_lookup_service. self.zonemanager_lookup_service.
get_device_mapping_from_network(initiator_wwns, fc_targets)) get_device_mapping_from_network(initiator_wwns, fc_targets))
@ -356,15 +363,18 @@ class VMAXFCDriver(driver.FibreChannelDriver):
for initiator in map_d['initiator_port_wwn_list']: for initiator in map_d['initiator_port_wwn_list']:
init_targ_map[initiator] = map_d['target_port_wwn_list'] init_targ_map[initiator] = map_d['target_port_wwn_list']
else: # No lookup service, pre-zoned case. else: # No lookup service, pre-zoned case.
target_wwns = self.common.get_target_wwns_list( target_wwns = fc_targets
storage_system, volume, connector)
for initiator in initiator_wwns: for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns init_targ_map[initiator] = target_wwns
return list(set(target_wwns)), init_targ_map return list(set(target_wwns)), init_targ_map
def extend_volume(self, volume, new_size): def extend_volume(self, volume, new_size):
"""Extend an existing volume.""" """Extend an existing volume.
:param volume: the cinder volume object
:param new_size: the required new size
"""
self.common.extend_volume(volume, new_size) self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False): def get_volume_stats(self, refresh=False):
@ -386,53 +396,14 @@ class VMAXFCDriver(driver.FibreChannelDriver):
data['driver_version'] = self.VERSION data['driver_version'] = self.VERSION
self._stats = data self._stats = data
def migrate_volume(self, ctxt, volume, host):
"""Migrate a volume from one Volume Backend to another.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param host: the host dict holding the relevant target(destination)
information
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
return self.common.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: Unused parameter.
:param host: the host dict holding the relevant
target(destination) information
:returns: boolean -- True if retype succeeded, False if error
"""
return self.common.retype(ctxt, volume, new_type, diff, host)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
self.common.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.common.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
return self.common.create_cgsnapshot(context, cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
return self.common.delete_cgsnapshot(context, cgsnapshot, snapshots)
def manage_existing(self, volume, external_ref): def manage_existing(self, volume, external_ref):
"""Manages an existing VMAX Volume (import to Cinder). """Manages an existing VMAX Volume (import to Cinder).
Renames the Volume to match the expected name for the volume. Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant. Also need to consider things like QoS, Emulation, account/tenant.
:param volume: the volume object
:param external_ref: the reference for the VMAX volume
:return: model_update
""" """
return self.common.manage_existing(volume, external_ref) return self.common.manage_existing(volume, external_ref)
@ -452,74 +423,3 @@ class VMAXFCDriver(driver.FibreChannelDriver):
Leave the volume intact on the backend array. Leave the volume intact on the backend array.
""" """
return self.common.unmanage(volume) return self.common.unmanage(volume)
def update_consistencygroup(self, context, group,
add_volumes, remove_volumes):
"""Updates LUNs in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
:param context: the context
:param group: the consistency group object to be created
:param volumes: volumes in the consistency group
:param cgsnapshot: the source consistency group snapshot
:param snapshots: snapshots of the source volumes
:param source_cg: the dictionary of a consistency group as source.
:param source_vols: a list of volume dictionaries in the source_cg.
"""
return self.common.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot, snapshots, source_cg,
source_vols)
def create_export_snapshot(self, context, snapshot, connector):
"""Driver entry point to get the export info for a new snapshot."""
pass
def remove_export_snapshot(self, context, snapshot):
"""Driver entry point to remove an export for a snapshot."""
pass
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
"""Allows connection to snapshot.
:param snapshot: the snapshot object
:param connector: the connector object
:param kwargs: additional parameters
:returns: data dict
"""
src_volume = snapshot['volume']
snapshot['host'] = src_volume['host']
return self.initialize_connection(snapshot, connector)
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Disallows connection to snapshot.
:param snapshot: the snapshot object
:param connector: the connector object
:param kwargs: additional parameters
"""
src_volume = snapshot['volume']
snapshot['host'] = src_volume['host']
return self.terminate_connection(snapshot, connector, **kwargs)
def backup_use_temp_snapshot(self):
return True
def failover_host(self, context, volumes, secondary_id=None):
"""Failover volumes to a secondary host/ backend.
:param context: the context
:param volumes: the list of volumes to be failed over
:param secondary_id: the backend to be failed over to, is 'default'
if fail back
:return: secondary_id, volume_update_list
"""
return self.common.failover_host(context, volumes, secondary_id)

View File

@ -1,347 +0,0 @@
# Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import socket
import ssl
import string
import struct
from eventlet import patcher
try:
import OpenSSL
except ImportError:
OpenSSL = None
from oslo_log import log as logging
import six
from six.moves import http_client
from six.moves import urllib
from cinder.i18n import _
# Handle case where we are running in a monkey patched environment
if OpenSSL and patcher.is_monkey_patched('socket'):
from eventlet.green.OpenSSL import SSL
try:
import pywbem
pywbemAvailable = True
except ImportError:
pywbemAvailable = False
LOG = logging.getLogger(__name__)
def to_bytes(s):
if isinstance(s, six.string_types):
return six.b(s)
else:
return s
def get_default_ca_certs():
"""Gets the default CA certificates if found, otherwise None.
Try to find out system path with ca certificates. This path is cached and
returned. If no path is found out, None is returned.
"""
if not hasattr(get_default_ca_certs, '_path'):
for path in (
'/etc/pki/ca-trust/extracted/openssl/ca-bundle.trust.crt',
'/etc/ssl/certs',
'/etc/ssl/certificates'):
if os.path.exists(path):
get_default_ca_certs._path = path
break
else:
get_default_ca_certs._path = None
return get_default_ca_certs._path
class OpenSSLConnectionDelegator(object):
"""An OpenSSL.SSL.Connection delegator.
Supplies an additional 'makefile' method which http_client requires
and is not present in OpenSSL.SSL.Connection.
Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
a delegator must be used.
"""
def __init__(self, *args, **kwargs):
self.connection = SSL.GreenConnection(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.connection, name)
def makefile(self, *args, **kwargs):
return socket._fileobject(self.connection, *args, **kwargs)
class HTTPSConnection(http_client.HTTPSConnection):
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, ca_certs=None, no_verification=False):
if not pywbemAvailable:
LOG.info(
'Module PyWBEM not installed. '
'Install PyWBEM using the python-pywbem package.')
if six.PY3:
excp_lst = (TypeError, ssl.SSLError)
else:
excp_lst = ()
try:
http_client.HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = None if key_file is None else key_file
self.cert_file = None if cert_file is None else cert_file
self.insecure = no_verification
self.ca_certs = (
None if ca_certs is None else six.text_type(ca_certs))
self.set_context()
# ssl exceptions are reported in various form in Python 3
# so to be compatible, we report the same kind as under
# Python2
except excp_lst as e:
raise pywbem.cim_http.Error(six.text_type(e))
@staticmethod
def host_matches_cert(host, x509):
"""Verify that the certificate matches host.
Verify that the x509 certificate we have received
from 'host' correctly identifies the server we are
connecting to, ie that the certificate's Common Name
or a Subject Alternative Name matches 'host'.
"""
def check_match(name):
# Directly match the name.
if name == host:
return True
# Support single wildcard matching.
if name.startswith('*.') and host.find('.') > 0:
if name[2:] == host.split('.', 1)[1]:
return True
common_name = x509.get_subject().commonName
# First see if we can match the CN.
if check_match(common_name):
return True
# Also try Subject Alternative Names for a match.
san_list = None
for i in range(x509.get_extension_count()):
ext = x509.get_extension(i)
if ext.get_short_name() == b'subjectAltName':
san_list = six.text_type(ext)
for san in ''.join(san_list.split()).split(','):
if san.startswith('DNS:'):
if check_match(san.split(':', 1)[1]):
return True
# Server certificate does not match host.
msg = (_("Host %(host)s does not match x509 certificate contents: "
"CommonName %(commonName)s.")
% {'host': host,
'commonName': common_name})
if san_list is not None:
msg = (_("%(message)s, subjectAltName: %(sanList)s.")
% {'message': msg,
'sanList': san_list})
raise pywbem.cim_http.AuthError(msg)
def verify_callback(self, connection, x509, errnum,
depth, preverify_ok):
if x509.has_expired():
msg = msg = (_("SSL Certificate expired on %s.")
% x509.get_notAfter())
raise pywbem.cim_http.AuthError(msg)
if depth == 0 and preverify_ok:
# We verify that the host matches against the last
# certificate in the chain.
return self.host_matches_cert(self.host, x509)
else:
# Pass through OpenSSL's default result.
return preverify_ok
def set_context(self):
"""Set up the OpenSSL context."""
self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
if self.insecure is not True:
self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
self.verify_callback)
else:
self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
lambda *args: True)
if self.cert_file:
try:
self.context.use_certificate_file(self.cert_file)
except Exception as e:
msg = (_("Unable to load cert from %(cert)s %(e)s.")
% {'cert': self.cert_file,
'e': e})
raise pywbem.cim_http.AuthError(msg)
if self.key_file is None:
# We support having key and cert in same file.
try:
self.context.use_privatekey_file(self.cert_file)
except Exception as e:
msg = (_("No key file specified and unable to load key "
"from %(cert)s %(e)s.")
% {'cert': self.cert_file,
'e': e})
raise pywbem.cim_http.AuthError(msg)
if self.key_file:
try:
self.context.use_privatekey_file(self.key_file)
except Exception as e:
msg = (_("Unable to load key from %(cert)s %(e)s.")
% {'cert': self.cert_file,
'e': e})
raise pywbem.cim_http.AuthError(msg)
if self.ca_certs:
try:
self.context.load_verify_locations(to_bytes(self.ca_certs))
except Exception as e:
msg = (_("Unable to load CA from %(cert)s %(e)s.")
% {'cert': self.cert_file,
'e': e})
raise pywbem.cim_http.AuthError(msg)
else:
self.context.set_default_verify_paths()
def connect(self):
result = socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM)
if result:
socket_family = result[0][0]
if socket_family == socket.AF_INET6:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
# If due to some reason the address lookup fails - we still
# connect to IPv4 socket. This retains the older behavior.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
# '0' microseconds
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
struct.pack('LL', 0, 0))
self.sock = OpenSSLConnectionDelegator(self.context, sock)
self.sock.connect((self.host, self.port))
def wbem_request(url, data, creds, headers=None, debug=0, x509=None,
verify_callback=None, ca_certs=None,
no_verification=False):
"""Send request over HTTP.
Send XML data over HTTP to the specified url. Return the
response in XML. Uses Python's build-in http_client. x509 may be a
dictionary containing the location of the SSL certificate and key
files.
"""
if headers is None:
headers = []
host, port, use_ssl = pywbem.cim_http.parse_url(url)
key_file = None
cert_file = None
if use_ssl and x509 is not None:
cert_file = x509.get('cert_file')
key_file = x509.get('key_file')
numTries = 0
localAuthHeader = None
tryLimit = 5
if isinstance(data, six.text_type):
data = data.encode('utf-8')
data = '<?xml version="1.0" encoding="utf-8" ?>\n' + data
if not no_verification and ca_certs is None:
ca_certs = get_default_ca_certs()
elif no_verification:
ca_certs = None
h = HTTPSConnection(
host,
port=port,
key_file=key_file,
cert_file=cert_file,
ca_certs=ca_certs,
no_verification=no_verification)
locallogin = None
while numTries < tryLimit:
numTries = numTries + 1
h.putrequest('POST', '/cimom')
h.putheader('Content-type', 'application/xml; charset="utf-8"')
h.putheader('Content-length', len(data))
if localAuthHeader is not None:
h.putheader(*localAuthHeader)
elif creds is not None:
h.putheader('Authorization', 'Basic %s' %
base64.encodestring('%s:%s' % (creds[0], creds[1]))
.replace('\n', ''))
elif locallogin is not None:
h.putheader('PegasusAuthorization', 'Local "%s"' % locallogin)
for hdr in headers:
if isinstance(hdr, six.text_type):
hdr = hdr.encode('utf-8')
s = map(lambda x: string.strip(x), string.split(hdr, ":", 1))
h.putheader(urllib.parse.quote(s[0]), urllib.parse.quote(s[1]))
try:
h.endheaders()
try:
h.send(data)
except socket.error as arg:
if arg[0] != 104 and arg[0] != 32:
raise
response = h.getresponse()
body = response.read()
h.close()
if response.status != http_client.OK:
raise pywbem.cim_http.Error('HTTP error')
except http_client.BadStatusLine as arg:
msg = (_("Bad Status line returned: %(arg)s.")
% {'arg': arg})
raise pywbem.cim_http.Error(msg)
except socket.sslerror as arg:
msg = (_("SSL error: %(arg)s.")
% {'arg': arg})
raise pywbem.cim_http.Error(msg)
except socket.error as arg:
msg = (_("Socket error: %(arg)s.")
% {'arg': arg})
raise pywbem.cim_http.Error(msg)
break
return body

View File

@ -1,4 +1,4 @@
# Copyright (c) 2012 - 2015 EMC Corporation. # Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
""" """
ISCSI Drivers for EMC VMAX arrays based on SMI-S. ISCSI Drivers for Dell EMC VMAX arrays based on REST.
""" """
from oslo_log import log as logging from oslo_log import log as logging
@ -25,15 +25,12 @@ from cinder import interface
from cinder.volume import driver from cinder.volume import driver
from cinder.volume.drivers.dell_emc.vmax import common from cinder.volume.drivers.dell_emc.vmax import common
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CINDER_CONF = '/etc/cinder/cinder.conf'
@interface.volumedriver @interface.volumedriver
class VMAXISCSIDriver(driver.ISCSIDriver): class VMAXISCSIDriver(driver.ISCSIDriver):
"""EMC ISCSI Drivers for VMAX using SMI-S. """ISCSI Drivers for VMAX using Rest.
Version history: Version history:
@ -83,10 +80,10 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
- Support for compression on All Flash - Support for compression on All Flash
- Volume replication 2.1 (bp add-vmax-replication) - Volume replication 2.1 (bp add-vmax-replication)
- rename and restructure driver (bp vmax-rename-dell-emc) - rename and restructure driver (bp vmax-rename-dell-emc)
3.0.0 - REST based driver
""" """
VERSION = "2.5.0" VERSION = "3.0.0"
# ThirdPartySystems wiki # ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_VMAX_CI" CI_WIKI_NAME = "EMC_VMAX_CI"
@ -94,64 +91,99 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(VMAXISCSIDriver, self).__init__(*args, **kwargs) super(VMAXISCSIDriver, self).__init__(*args, **kwargs)
self.active_backend_id = kwargs.get('active_backend_id', None)
self.common = ( self.common = (
common.VMAXCommon( common.VMAXCommon(
'iSCSI', 'iSCSI',
self.VERSION, self.VERSION,
configuration=self.configuration, configuration=self.configuration))
active_backend_id=self.active_backend_id))
def check_for_setup_error(self): def check_for_setup_error(self):
pass pass
def create_volume(self, volume): def create_volume(self, volume):
"""Creates a VMAX volume.""" """Creates a VMAX volume.
:param volume: the cinder volume object
:return: provider location dict
"""
return self.common.create_volume(volume) return self.common.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot): def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.""" """Creates a volume from a snapshot.
:param volume: the cinder volume object
:param snapshot: the cinder snapshot object
:return: provider location dict
"""
return self.common.create_volume_from_snapshot( return self.common.create_volume_from_snapshot(
volume, snapshot) volume, snapshot)
def create_cloned_volume(self, volume, src_vref): def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume.""" """Creates a cloned volume.
:param volume: the cinder volume object
:param src_vref: the source volume reference
:return: provider location dict
"""
return self.common.create_cloned_volume(volume, src_vref) return self.common.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume): def delete_volume(self, volume):
"""Deletes an VMAX volume.""" """Deletes a VMAX volume.
:param volume: the cinder volume object
"""
self.common.delete_volume(volume) self.common.delete_volume(volume)
def create_snapshot(self, snapshot): def create_snapshot(self, snapshot):
"""Creates a snapshot.""" """Creates a snapshot.
src_volume = snapshot['volume']
volpath = self.common.create_snapshot(snapshot, src_volume)
model_update = {} :param snapshot: the cinder snapshot object
snapshot['provider_location'] = six.text_type(volpath) :return: provider location dict
model_update['provider_location'] = snapshot['provider_location'] """
return model_update src_volume = snapshot.volume
return self.common.create_snapshot(snapshot, src_volume)
def delete_snapshot(self, snapshot): def delete_snapshot(self, snapshot):
"""Deletes a snapshot.""" """Deletes a snapshot.
src_volume = snapshot['volume']
:param snapshot: the cinder snapshot object
"""
src_volume = snapshot.volume
self.common.delete_snapshot(snapshot, src_volume) self.common.delete_snapshot(snapshot, src_volume)
def ensure_export(self, context, volume): def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume.""" """Driver entry point to get the export info for an existing volume.
:param context: the context
:param volume: the cinder volume object
"""
pass pass
def create_export(self, context, volume, connector): def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume.""" """Driver entry point to get the export info for a new volume.
:param context: the context
:param volume: the cinder volume object
:param connector: the connector object
"""
pass pass
def remove_export(self, context, volume): def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.""" """Driver entry point to remove an export for a volume.
:param context: the context
:param volume: the cinder volume object
"""
pass pass
def check_for_export(self, context, volume_id): @staticmethod
"""Make sure volume is exported.""" def check_for_export(context, volume_id):
"""Make sure volume is exported.
:param context: the context
:param volume_id: the volume id
"""
pass pass
def initialize_connection(self, volume, connector): def initialize_connection(self, volume, connector):
@ -183,40 +215,42 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
'target_luns': [1, 1], 'target_luns': [1, 1],
} }
} }
:param volume: the cinder volume object
:param connector: the connector object
:return: dict -- the iscsi dict
""" """
device_info = self.common.initialize_connection( device_info = self.common.initialize_connection(
volume, connector) volume, connector)
return self.get_iscsi_dict( return self.get_iscsi_dict(device_info, volume)
device_info, volume, connector)
def get_iscsi_dict(self, device_info, volume, connector): def get_iscsi_dict(self, device_info, volume):
"""Populate iscsi dict to pass to nova. """Populate iscsi dict to pass to nova.
:param device_info: device info dict :param device_info: device info dict
:param volume: volume object :param volume: volume object
:param connector: connector object
:return: iscsi dict :return: iscsi dict
""" """
try: try:
ip_and_iqn = device_info['ip_and_iqn'] ip_and_iqn = device_info['ip_and_iqn']
is_multipath = device_info['is_multipath'] is_multipath = device_info['is_multipath']
except KeyError as ex: host_lun_id = device_info['hostlunid']
exception_message = (_("Cannot get iSCSI ipaddresses or " except KeyError as e:
"multipath flag. Exception is %(ex)s. ") exception_message = (_("Cannot get iSCSI ipaddresses, multipath "
% {'ex': ex}) "flag, or hostlunid. Exception is %(e)s.")
% {'e': six.text_type(e)})
raise exception.VolumeBackendAPIException(data=exception_message) raise exception.VolumeBackendAPIException(data=exception_message)
iscsi_properties = self.smis_get_iscsi_properties( iscsi_properties = self.vmax_get_iscsi_properties(
volume, connector, ip_and_iqn, is_multipath) volume, ip_and_iqn, is_multipath, host_lun_id)
LOG.info("iSCSI properties are: %s", iscsi_properties) LOG.info("iSCSI properties are: %(props)s",
return { {'props': iscsi_properties})
'driver_volume_type': 'iscsi', return {'driver_volume_type': 'iscsi',
'data': iscsi_properties 'data': iscsi_properties}
}
def smis_get_iscsi_properties(self, volume, connector, ip_and_iqn, @staticmethod
is_multipath): def vmax_get_iscsi_properties(volume, ip_and_iqn,
is_multipath, host_lun_id):
"""Gets iscsi configuration. """Gets iscsi configuration.
We ideally get saved information in the volume entity, but fall back We ideally get saved information in the volume entity, but fall back
@ -231,48 +265,32 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
the authentication details. Right now, either auth_method is not the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP` present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials. meaning use CHAP with the specified credentials.
:param volume: the cinder volume object
:param ip_and_iqn: list of ip and iqn dicts
:param is_multipath: flag for multipath
:param host_lun_id: the host lun id of the device
:return: properties
""" """
device_info, __, __ = self.common.find_device_number(
volume, connector['host'])
isError = False
if device_info:
try:
lun_id = device_info['hostlunid']
except KeyError:
isError = True
else:
isError = True
if isError:
LOG.error("Unable to get the lun id")
exception_message = (_("Cannot find device number for volume "
"%(volumeName)s.")
% {'volumeName': volume['name']})
raise exception.VolumeBackendAPIException(data=exception_message)
properties = {} properties = {}
if len(ip_and_iqn) > 1 and is_multipath: if len(ip_and_iqn) > 1 and is_multipath:
properties['target_portals'] = ([t['ip'] + ":3260" for t in properties['target_portals'] = ([t['ip'] + ":3260" for t in
ip_and_iqn]) ip_and_iqn])
properties['target_iqns'] = ([t['iqn'].split(",")[0] for t in properties['target_iqns'] = ([t['iqn'].split(",")[0] for t in
ip_and_iqn]) ip_and_iqn])
properties['target_luns'] = [lun_id] * len(ip_and_iqn) properties['target_luns'] = [host_lun_id] * len(ip_and_iqn)
properties['target_discovered'] = True properties['target_discovered'] = True
properties['target_iqn'] = ip_and_iqn[0]['iqn'].split(",")[0] properties['target_iqn'] = ip_and_iqn[0]['iqn'].split(",")[0]
properties['target_portal'] = ip_and_iqn[0]['ip'] + ":3260" properties['target_portal'] = ip_and_iqn[0]['ip'] + ":3260"
properties['target_lun'] = lun_id properties['target_lun'] = host_lun_id
properties['volume_id'] = volume['id'] properties['volume_id'] = volume.id
LOG.info( LOG.info("ISCSI properties: %(properties)s.",
"ISCSI properties: %(properties)s.", {'properties': properties}) {'properties': properties})
LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume}) LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume})
if 'provider_auth' in volume: if hasattr(volume, 'provider_auth'):
auth = volume['provider_auth'] auth = volume.provider_auth
LOG.info(
"AUTH properties: %(authProps)s.", {'authProps': auth})
if auth is not None: if auth is not None:
(auth_method, auth_username, auth_secret) = auth.split() (auth_method, auth_username, auth_secret) = auth.split()
@ -281,22 +299,36 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
properties['auth_username'] = auth_username properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret properties['auth_password'] = auth_secret
LOG.info("AUTH properties: %s.", properties)
return properties return properties
def terminate_connection(self, volume, connector, **kwargs): def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector.""" """Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:param volume: the volume object
:param connector: the connector object
:returns: dict -- the target_wwns and initiator_target_map if the
zone is to be removed, otherwise empty
"""
self.common.terminate_connection(volume, connector) self.common.terminate_connection(volume, connector)
def extend_volume(self, volume, new_size): def extend_volume(self, volume, new_size):
"""Extend an existing volume.""" """Extend an existing volume.
:param volume: the cinder volume object
:param new_size: the required new size
"""
self.common.extend_volume(volume, new_size) self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False): def get_volume_stats(self, refresh=False):
"""Get volume stats. """Get volume stats.
If 'refresh' is True, run update the stats first. :param refresh: boolean -- If True, run update the stats first.
:returns: dict -- the stats dict
""" """
if refresh: if refresh:
self.update_volume_stats() self.update_volume_stats()
@ -311,46 +343,6 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
data['driver_version'] = self.VERSION data['driver_version'] = self.VERSION
self._stats = data self._stats = data
def migrate_volume(self, ctxt, volume, host):
"""Migrate a volume from one Volume Backend to another.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param host: the host dict holding the relevant target information
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
return self.common.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: Unused parameter in common.retype
:param host: the host dict holding the relevant target information
:returns: boolean -- True if retype succeeded, False if error
"""
return self.common.retype(ctxt, volume, new_type, diff, host)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
self.common.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.common.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
return self.common.create_cgsnapshot(context, cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
return self.common.delete_cgsnapshot(context, cgsnapshot, snapshots)
def manage_existing(self, volume, external_ref): def manage_existing(self, volume, external_ref):
"""Manages an existing VMAX Volume (import to Cinder). """Manages an existing VMAX Volume (import to Cinder).
@ -375,77 +367,3 @@ class VMAXISCSIDriver(driver.ISCSIDriver):
Leave the volume intact on the backend array. Leave the volume intact on the backend array.
""" """
return self.common.unmanage(volume) return self.common.unmanage(volume)
def update_consistencygroup(self, context, group,
add_volumes, remove_volumes):
"""Updates LUNs in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
:param context: the context
:param group: the consistency group object to be created
:param volumes: volumes in the consistency group
:param cgsnapshot: the source consistency group snapshot
:param snapshots: snapshots of the source volumes
:param source_cg: the dictionary of a consistency group as source.
:param source_vols: a list of volume dictionaries in the source_cg.
"""
return self.common.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot, snapshots, source_cg,
source_vols)
def create_export_snapshot(self, context, snapshot, connector):
"""Driver entry point to get the export info for a new snapshot."""
pass
def remove_export_snapshot(self, context, snapshot):
"""Driver entry point to remove an export for a snapshot."""
pass
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
"""Allows connection to snapshot.
:param snapshot: the snapshot object
:param connector: the connector object
:param kwargs: additional parameters
:returns: iscsi dict
"""
src_volume = snapshot['volume']
snapshot['host'] = src_volume['host']
device_info = self.common.initialize_connection(
snapshot, connector)
return self.get_iscsi_dict(
device_info, snapshot, connector)
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Disallows connection to snapshot.
:param snapshot: the snapshot object
:param connector: the connector object
:param kwargs: additional parameters
"""
src_volume = snapshot['volume']
snapshot['host'] = src_volume['host']
return self.common.terminate_connection(snapshot,
connector)
def backup_use_temp_snapshot(self):
return True
def failover_host(self, context, volumes, secondary_id=None):
"""Failover volumes to a secondary host/ backend.
:param context: the context
:param volumes: the list of volumes to be failed over
:param secondary_id: the backend to be failed over to, is 'default'
if fail back
:return: secondary_id, volume_update_list
"""
return self.common.failover_host(context, volumes, secondary_id)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
---
features:
- |
VMAX driver version 3.0, replacing SMI-S with Unisphere REST.
This driver supports VMAX3 hybrid and All Flash arrays.