OpenStack Block Storage (Cinder)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

7242 lines
328 KiB

# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from copy import deepcopy
import math
import random
import sys
import time
from oslo_config import cfg
from oslo_config import types
from oslo_log import log as logging
from oslo_utils import strutils
import six
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.utils import retry
from cinder.volume import configuration
from cinder.volume.drivers.dell_emc.powermax import masking
from cinder.volume.drivers.dell_emc.powermax import metadata as volume_metadata
from cinder.volume.drivers.dell_emc.powermax import migrate
from cinder.volume.drivers.dell_emc.powermax import performance
from cinder.volume.drivers.dell_emc.powermax import provision
from cinder.volume.drivers.dell_emc.powermax import rest
from cinder.volume.drivers.dell_emc.powermax import utils
from cinder.volume import volume_types
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
BACKENDNAME = 'volume_backend_name'
PREFIXBACKENDNAME = 'capabilities:volume_backend_name'
# Replication
REPLICATION_DISABLED = fields.ReplicationStatus.DISABLED
REPLICATION_ENABLED = fields.ReplicationStatus.ENABLED
REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER
FAILOVER_ERROR = fields.ReplicationStatus.FAILOVER_ERROR
REPLICATION_ERROR = fields.ReplicationStatus.ERROR
retry_exc_tuple = (exception.VolumeBackendAPIException,)
powermax_opts = [
cfg.IntOpt('interval',
default=3,
help='Use this value to specify '
'length of the interval in seconds.'),
cfg.IntOpt('retries',
default=200,
help='Use this value to specify '
'number of retries.'),
cfg.BoolOpt('initiator_check',
default=False,
help='Use this value to enable '
'the initiator_check.'),
cfg.StrOpt(utils.VMAX_WORKLOAD,
help='Workload, setting this as an extra spec in '
'pool_name is preferable.'),
cfg.IntOpt(utils.U4P_FAILOVER_TIMEOUT,
default=20.0,
help='How long to wait for the server to send data before '
'giving up.'),
cfg.IntOpt(utils.U4P_FAILOVER_RETRIES,
default=3,
help='The maximum number of retries each connection should '
'attempt. Note, this applies only to failed DNS lookups, '
'socket connections and connection timeouts, never to '
'requests where data has made it to the server.'),
cfg.IntOpt(utils.U4P_FAILOVER_BACKOFF_FACTOR,
default=1,
help='A backoff factor to apply between attempts after the '
'second try (most errors are resolved immediately by a '
'second try without a delay). Retries will sleep for: '
'{backoff factor} * (2 ^ ({number of total retries} - 1)) '
'seconds.'),
cfg.BoolOpt(utils.U4P_FAILOVER_AUTOFAILBACK,
default=True,
help='If the driver should automatically failback to the '
'primary instance of Unisphere when a successful '
'connection is re-established.'),
cfg.MultiOpt(utils.U4P_FAILOVER_TARGETS,
item_type=types.Dict(),
help='Dictionary of Unisphere failover target info.'),
cfg.IntOpt(utils.POWERMAX_SNAPVX_UNLINK_LIMIT,
default=3,
help='Use this value to specify '
'the maximum number of unlinks '
'for the temporary snapshots '
'before a clone operation.'),
cfg.StrOpt(utils.POWERMAX_ARRAY,
help='Serial number of the array to connect to.'),
cfg.StrOpt(utils.POWERMAX_SRP,
help='Storage resource pool on array to use for '
'provisioning.'),
cfg.StrOpt(utils.POWERMAX_SERVICE_LEVEL,
help='Service level to use for provisioning storage. '
'Setting this as an extra spec in pool_name '
'is preferable.'),
cfg.ListOpt(utils.POWERMAX_PORT_GROUPS,
bounds=True,
help='List of port groups containing frontend ports '
'configured prior for server connection.'),
cfg.ListOpt(utils.POWERMAX_ARRAY_TAG_LIST,
bounds=True,
help='List of user assigned name for storage array.'),
cfg.StrOpt(utils.POWERMAX_SHORT_HOST_NAME_TEMPLATE,
default='shortHostName',
help='User defined override for short host name.'),
cfg.StrOpt(utils.POWERMAX_PORT_GROUP_NAME_TEMPLATE,
default='portGroupName',
help='User defined override for port group name.'),
cfg.BoolOpt(utils.LOAD_BALANCE,
default=False,
help='Enable/disable load balancing for a PowerMax backend.'),
cfg.BoolOpt(utils.LOAD_BALANCE_RT,
default=False,
help='Enable/disable real-time performance metrics for Port '
'level load balancing for a PowerMax backend.'),
cfg.StrOpt(utils.PERF_DATA_FORMAT,
default='Avg',
help='Performance data format, not applicable for real-time '
'metrics. Available options are "avg" and "max".'),
cfg.IntOpt(utils.LOAD_LOOKBACK,
default=60,
help='How far in minutes to look back for diagnostic '
'performance metrics in load calculation, minimum of 0 '
'maximum of 1440 (24 hours).'),
cfg.IntOpt(utils.LOAD_LOOKBACK_RT,
default=1,
help='How far in minutes to look back for real-time '
'performance metrics in load calculation, minimum of 1 '
'maximum of 10.'),
cfg.StrOpt(utils.PORT_GROUP_LOAD_METRIC,
default='PercentBusy',
help='Metric used for port group load calculation.'),
cfg.StrOpt(utils.PORT_LOAD_METRIC,
default='PercentBusy',
help='Metric used for port load calculation.')]
CONF.register_opts(powermax_opts, group=configuration.SHARED_CONF_GROUP)
class PowerMaxCommon(object):
"""Common class for Rest based PowerMax volume drivers.
This common class is for Dell EMC PowerMax volume drivers
based on UniSphere Rest API.
It supports VMAX 3 and VMAX All Flash and PowerMax arrays.
"""
pool_info = {'backend_name': None,
'config_file': None,
'arrays_info': {},
'max_over_subscription_ratio': None,
'reserved_percentage': 0,
'replication_enabled': False}
def __init__(self, prtcl, version, configuration=None,
active_backend_id=None):
self.rest = rest.PowerMaxRest()
self.utils = utils.PowerMaxUtils()
self.masking = masking.PowerMaxMasking(prtcl, self.rest)
self.provision = provision.PowerMaxProvision(self.rest)
self.volume_metadata = volume_metadata.PowerMaxVolumeMetadata(
self.rest, version, LOG.isEnabledFor(logging.DEBUG))
self.migrate = migrate.PowerMaxMigrate(prtcl, self.rest)
# Configuration/Attributes
self.protocol = prtcl
self.configuration = configuration
self.configuration.append_config_values(powermax_opts)
self.active_backend_id = active_backend_id
self.version = version
self.version_dict = {}
self.ucode_level = None
self.next_gen = False
self.replication_enabled = False
self.rep_devices = []
self.failover = True if active_backend_id else False
self.promotion = False
self.powermax_array_tag_list = None
self.powermax_short_host_name_template = None
self.powermax_port_group_name_template = None
if active_backend_id == utils.PMAX_FAILOVER_START_ARRAY_PROMOTION:
self.promotion = True
# Gather environment info
self._get_replication_info()
self._get_u4p_failover_info()
self._gather_info()
self._get_performance_config()
self.rest.validate_unisphere_version()
def _gather_info(self):
"""Gather the relevant information for update_volume_stats."""
self._get_attributes_from_config()
array_info = self.get_attributes_from_cinder_config()
if array_info is None:
LOG.error("Unable to get attributes from cinder.conf. Please "
"refer to the current online documentation for correct "
"configuration and note that the xml file is no "
"longer supported.")
self.rest.set_rest_credentials(array_info)
if array_info:
serial_number = array_info['SerialNumber']
self.array_model, self.next_gen = (
self.rest.get_array_model_info(serial_number))
self.ucode_level = self.rest.get_array_ucode_version(serial_number)
if self.replication_enabled:
if serial_number in self.replication_targets:
msg = (_("The same array serial number (%s) is defined "
"for powermax_array and replication_device in "
"cinder.conf. Please ensure your "
"target_device_id points to a different "
"array." % serial_number))
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
finalarrayinfolist = self._get_slo_workload_combinations(
array_info)
self.pool_info['arrays_info'] = finalarrayinfolist
def _get_attributes_from_config(self):
"""Get relevent details from configuration file."""
self.interval = self.configuration.safe_get('interval')
self.retries = self.configuration.safe_get('retries')
self.snapvx_unlink_limit = self.configuration.safe_get(
utils.POWERMAX_SNAPVX_UNLINK_LIMIT)
self.powermax_array_tag_list = self.configuration.safe_get(
utils.POWERMAX_ARRAY_TAG_LIST)
self.powermax_short_host_name_template = self.configuration.safe_get(
utils.POWERMAX_SHORT_HOST_NAME_TEMPLATE)
self.powermax_port_group_name_template = self.configuration.safe_get(
utils.POWERMAX_PORT_GROUP_NAME_TEMPLATE)
self.pool_info['backend_name'] = (
self.configuration.safe_get('volume_backend_name'))
mosr = volume_utils.get_max_over_subscription_ratio(
self.configuration.safe_get('max_over_subscription_ratio'), True)
self.pool_info['max_over_subscription_ratio'] = mosr
self.pool_info['reserved_percentage'] = (
self.configuration.safe_get('reserved_percentage'))
LOG.debug(
"Updating volume stats on Cinder backend %(backendName)s.",
{'backendName': self.pool_info['backend_name']})
def _get_performance_config(self):
"""Gather performance configuration, if provided in cinder.conf."""
performance_config = {'load_balance': False}
self.performance = performance.PowerMaxPerformance(
self.rest, performance_config)
if self.configuration.safe_get(utils.LOAD_BALANCE):
LOG.info(
"Updating performance config for Cinder backend %(be)s.",
{'be': self.pool_info['backend_name']})
array_info = self.get_attributes_from_cinder_config()
self.performance.set_performance_configuration(
array_info['SerialNumber'], self.configuration)
def _get_u4p_failover_info(self):
"""Gather Unisphere failover target information, if provided."""
key_dict = {'san_ip': 'RestServerIp',
'san_api_port': 'RestServerPort',
'san_login': 'RestUserName',
'san_password': 'RestPassword',
'driver_ssl_cert_verify': 'SSLVerify',
'driver_ssl_cert_path': 'SSLPath'}
if self.configuration.safe_get('u4p_failover_target'):
u4p_targets = self.configuration.safe_get('u4p_failover_target')
formatted_target_list = list()
for target in u4p_targets:
formatted_target = {key_dict[key]: value for key, value in
target.items()}
try:
formatted_target['SSLVerify'] = formatted_target['SSLPath']
del formatted_target['SSLPath']
except KeyError:
if formatted_target['SSLVerify'] == 'False':
formatted_target['SSLVerify'] = False
else:
formatted_target['SSLVerify'] = True
formatted_target_list.append(formatted_target)
u4p_failover_config = dict()
u4p_failover_config['u4p_failover_targets'] = formatted_target_list
u4p_failover_config['u4p_failover_backoff_factor'] = (
self.configuration.safe_get('u4p_failover_backoff_factor'))
u4p_failover_config['u4p_failover_retries'] = (
self.configuration.safe_get('u4p_failover_retries'))
u4p_failover_config['u4p_failover_timeout'] = (
self.configuration.safe_get('u4p_failover_timeout'))
u4p_failover_config['u4p_failover_autofailback'] = (
self.configuration.safe_get('u4p_failover_autofailback'))
u4p_failover_config['u4p_primary'] = (
self.get_attributes_from_cinder_config())
self.rest.set_u4p_failover_config(u4p_failover_config)
else:
LOG.warning("There has been no failover instances of Unisphere "
"configured for this instance of Cinder. If your "
"primary instance of Unisphere goes down then your "
"PowerMax/VMAX will be inaccessible until the "
"Unisphere REST API is responsive again.")
def retest_primary_u4p(self):
"""Retest connection to the primary instance of Unisphere."""
primary_array_info = self.get_attributes_from_cinder_config()
temp_conn = rest.PowerMaxRest()
temp_conn.set_rest_credentials(primary_array_info)
LOG.debug(
"Running connection check to primary instance of Unisphere "
"at %(primary)s", {
'primary': primary_array_info['RestServerIp']})
sc, response = temp_conn.request(target_uri='/system/version',
method='GET', u4p_check=True,
request_object=None)
if sc and int(sc) == 200:
self._get_u4p_failover_info()
self.rest.set_rest_credentials(primary_array_info)
self.rest.u4p_in_failover = False
LOG.info("Connection to primary instance of Unisphere at "
"%(primary)s restored, available failover instances of "
"Unisphere reset to default.", {
'primary': primary_array_info['RestServerIp']})
else:
LOG.debug(
"Connection check to primary instance of Unisphere at "
"%(primary)s failed, maintaining session with backup "
"instance of Unisphere at %(bu_in_use)s", {
'primary': primary_array_info['RestServerIp'],
'bu_in_use': self.rest.base_uri})
temp_conn.session.close()
def _get_initiator_check_flag(self):
"""Reads the configuration for initator_check flag.
:returns: flag
"""
return self.configuration.safe_get('initiator_check')
def _get_replication_info(self):
"""Gather replication information, if provided."""
self.rep_configs = None
self.replication_targets = []
if hasattr(self.configuration, 'replication_device'):
self.rep_devices = self.configuration.safe_get(
'replication_device')
if self.rep_devices:
if len(self.rep_devices) > 1:
self.utils.validate_multiple_rep_device(self.rep_devices)
self.rep_configs = self.utils.get_replication_config(
self.rep_devices)
# use self.replication_enabled for update_volume_stats
self.replication_enabled = True
self.replication_targets = self.utils.get_replication_targets(
self.rep_configs)
LOG.debug("The replication configuration is %(rep_configs)s.",
{'rep_configs': self.rep_configs})
if self.next_gen:
for rc in self.rep_configs:
rc[utils.RDF_CONS_EXEMPT] = True
else:
for rc in self.rep_configs:
rc[utils.RDF_CONS_EXEMPT] = False
def _get_slo_workload_combinations(self, array_info):
"""Method to query the array for SLO and Workloads.
Takes the arrayinfolist object and generates a set which has
all available SLO & Workload combinations
:param array_info: the array information
:returns: finalarrayinfolist
:raises: VolumeBackendAPIException:
"""
try:
upgraded_afa = False
if self.array_model in utils.VMAX_HYBRID_MODELS:
sls = deepcopy(utils.HYBRID_SLS)
wls = deepcopy(utils.HYBRID_WLS)
elif self.array_model in utils.VMAX_AFA_MODELS:
wls = deepcopy(utils.AFA_WLS)
if not self.next_gen:
sls = deepcopy(utils.AFA_H_SLS)
else:
sls = deepcopy(utils.AFA_P_SLS)
upgraded_afa = True
elif self.array_model in utils.PMAX_MODELS:
sls, wls = deepcopy(utils.PMAX_SLS), deepcopy(utils.PMAX_WLS)
else:
raise exception.VolumeBackendAPIException(
message="Unable to determine array model.")
if self.next_gen:
LOG.warning(
"Workloads have been deprecated for arrays running "
"PowerMax OS uCode level 5978 or higher. Any supplied "
"workloads will be treated as None values. It is "
"recommended to create a new volume type without a "
"workload specified.")
# Add service levels:
pools = sls
# Array Specific SL/WL Combos
pools += (
['{}:{}'.format(x, y) for x in sls for y in wls
if x.lower() not in ['optimized', 'none']])
# Add Optimized & None combinations
pools += (
['{}:{}'.format(x, y) for x in ['Optimized', 'NONE', 'None']
for y in ['NONE', 'None']])
if upgraded_afa:
# Cleanup is required here for service levels that were not
# present in AFA HyperMax but added for AFA PowerMax, we
# do not need these SL/WL combinations for backwards
# compatibility but we do for Diamond SL
afa_pool = list()
for p in pools:
try:
pl = p.split(':')
if (pl[0] not in [
'Platinum', 'Gold', 'Silver', 'Bronze']) or (
pl[1] not in [
'OLTP', 'OLTP_REP', 'DSS', 'DSS_REP']):
afa_pool.append(p)
except IndexError:
# Pool has no workload present
afa_pool.append(p)
pools = afa_pool
# Build array pool of SL/WL combinations
array_pool = list()
for pool in pools:
_array_info = array_info.copy()
try:
slo, workload = pool.split(':')
_array_info['SLO'] = slo
_array_info['Workload'] = workload
except ValueError:
_array_info['SLO'] = pool
array_pool.append(_array_info)
except Exception as e:
exception_message = (_(
"Unable to get the SLO/Workload combinations from the array. "
"Exception received was %(e)s") % {'e': six.text_type(e)})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
return array_pool
def create_volume(self, volume):
"""Creates a EMC(PowerMax/VMAX) volume from a storage group.
:param volume: volume object
:returns: model_update - dict
"""
model_update, rep_driver_data = dict(), dict()
volume_id = volume.id
extra_specs = self._initial_setup(volume)
if 'qos' in extra_specs:
del extra_specs['qos']
# Volume_name naming convention is 'OS-UUID'.
volume_name = self.utils.get_volume_element_name(volume_id)
volume_size = volume.size
volume_dict, rep_update, rep_info_dict = self._create_volume(
volume, volume_name, volume_size, extra_specs)
if rep_update:
rep_driver_data = rep_update['replication_driver_data']
model_update.update(rep_update)
# Add volume to group
group_name = self._add_to_group(
volume, volume_dict['device_id'], volume_name, volume.group_id,
volume.group, extra_specs, rep_driver_data)
# Gather Metadata
model_update.update(
{'provider_location': six.text_type(volume_dict)})
model_update = self.update_metadata(
model_update, volume.metadata, self.get_volume_metadata(
volume_dict['array'], volume_dict['device_id']))
if rep_update:
model_update['metadata']['BackendID'] = extra_specs[
utils.REP_CONFIG].get(utils.BACKEND_ID, 'None')
array_tag_list = self.get_tags_of_storage_array(
extra_specs[utils.ARRAY])
self.volume_metadata.capture_create_volume(
volume_dict['device_id'], volume, group_name, volume.group_id,
extra_specs, rep_info_dict, 'create',
array_tag_list=array_tag_list)
LOG.info("Leaving create_volume: %(name)s. Volume dict: %(dict)s.",
{'name': volume_name, 'dict': volume_dict})
return model_update
def _add_to_group(
self, volume, device_id, volume_name, group_id, group,
extra_specs, rep_driver_data=None):
"""Add a volume to a volume group
:param volume: volume object
:param device_id: the device id
:param volume_name: volume name
:param group_id: the group id
:param group: group object
:param extra_specs: extra specifications
:param rep_driver_data: replication data (optional)
:returns: group_id - string
"""
group_name = None
if group_id is not None:
if group and (volume_utils.is_group_a_cg_snapshot_type(group)
or group.is_replicated):
group_name = self._add_new_volume_to_volume_group(
volume, device_id, volume_name,
extra_specs, rep_driver_data)
return group_name
def _add_new_volume_to_volume_group(self, volume, device_id, volume_name,
extra_specs, rep_driver_data=None):
"""Add a new volume to a volume group.
This may also be called after extending a replicated volume.
:param volume: the volume object
:param device_id: the device id
:param volume_name: the volume name
:param extra_specs: the extra specifications
:param rep_driver_data: the replication driver data, optional
:returns: group_name string
"""
self.utils.check_replication_matched(volume, extra_specs)
group_name = self.provision.get_or_create_volume_group(
extra_specs[utils.ARRAY], volume.group, extra_specs)
self.masking.add_volume_to_storage_group(
extra_specs[utils.ARRAY], device_id,
group_name, volume_name, extra_specs)
# Add remote volume to remote group, if required
if volume.group.is_replicated:
self.masking.add_remote_vols_to_volume_group(
volume, volume.group, extra_specs, rep_driver_data)
return group_name
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
:param volume: volume object
:param snapshot: snapshot object
:returns: model_update
:raises: VolumeBackendAPIException:
"""
LOG.debug("Entering create_volume_from_snapshot.")
model_update, rep_info_dict = {}, {}
extra_specs = self._initial_setup(volume)
# Check if legacy snapshot
sourcedevice_id = self._find_device_on_array(
snapshot, extra_specs)
from_snapvx = False if sourcedevice_id else True
clone_dict, rep_update, rep_info_dict = self._create_cloned_volume(
volume, snapshot, extra_specs, from_snapvx=from_snapvx)
# Update model with replication session info if applicable
if rep_update:
model_update.update(rep_update)
model_update.update(
{'provider_location': six.text_type(clone_dict)})
model_update = self.update_metadata(
model_update, volume.metadata, self.get_volume_metadata(
clone_dict['array'], clone_dict['device_id']))
if rep_update:
model_update['metadata']['BackendID'] = extra_specs[
utils.REP_CONFIG].get(utils.BACKEND_ID, 'None')
array_tag_list = self.get_tags_of_storage_array(
extra_specs[utils.ARRAY])
self.volume_metadata.capture_create_volume(
clone_dict['device_id'], volume, None, None,
extra_specs, rep_info_dict, 'createFromSnapshot',
source_snapshot_id=snapshot.id, array_tag_list=array_tag_list)
return model_update
def create_cloned_volume(self, clone_volume, source_volume):
"""Creates a clone of the specified volume.
:param clone_volume: clone volume Object
:param source_volume: volume object
:returns: model_update, dict
"""
model_update, rep_info_dict = {}, {}
rep_driver_data = None
extra_specs = self._initial_setup(clone_volume)
array = extra_specs[utils.ARRAY]
source_device_id = self._find_device_on_array(
source_volume, extra_specs)
if not self.next_gen and (
extra_specs.get('rep_mode', None) == utils.REP_METRO):
self._sync_check(array, source_device_id, extra_specs)
else:
self._clone_check(array, source_device_id, extra_specs)
clone_dict, rep_update, rep_info_dict = self._create_cloned_volume(
clone_volume, source_volume, extra_specs)
# Update model with replication session info if applicable
if rep_update:
rep_driver_data = rep_update['replication_driver_data']
model_update.update(rep_update)
# Add volume to group
group_name = self._add_to_group(
clone_volume, clone_dict['device_id'], clone_volume.name,
clone_volume.group_id, clone_volume.group, extra_specs,
rep_driver_data)
model_update.update(
{'provider_location': six.text_type(clone_dict)})
model_update = self.update_metadata(
model_update, clone_volume.metadata, self.get_volume_metadata(
clone_dict['array'], clone_dict['device_id']))
if rep_update:
model_update['metadata']['BackendID'] = extra_specs[
utils.REP_CONFIG].get(utils.BACKEND_ID, 'None')
array_tag_list = self.get_tags_of_storage_array(
extra_specs[utils.ARRAY])
self.volume_metadata.capture_create_volume(
clone_dict['device_id'], clone_volume, group_name,
source_volume.group_id, extra_specs, rep_info_dict,
'createFromVolume',
temporary_snapvx=clone_dict.get('snap_name'),
source_device_id=clone_dict.get('source_device_id'),
array_tag_list=array_tag_list)
return model_update
def delete_volume(self, volume):
"""Deletes a EMC(PowerMax/VMAX) volume.
:param volume: volume object
"""
LOG.info("Deleting Volume: %(volume)s",
{'volume': volume.name})
volume_name = self._delete_volume(volume)
self.volume_metadata.capture_delete_info(volume)
LOG.info("Leaving delete_volume: %(volume_name)s.",
{'volume_name': volume_name})
def create_snapshot(self, snapshot, volume):
"""Creates a snapshot.
:param snapshot: snapshot object
:param volume: volume Object to create snapshot from
:returns: dict -- the cloned volume dictionary
"""
extra_specs = self._initial_setup(volume)
snapshot_dict, __, __ = self._create_cloned_volume(
snapshot, volume, extra_specs, is_snapshot=True)
model_update = {
'provider_location': six.text_type(snapshot_dict)}
snapshot_metadata = self.get_snapshot_metadata(
extra_specs.get('array'), snapshot_dict.get('source_id'),
snapshot_dict.get('snap_name'))
model_update = self.update_metadata(
model_update, snapshot.metadata, snapshot_metadata)
if snapshot.metadata:
model_update['metadata'].update(snapshot.metadata)
snapshot_metadata.update(
{'snap_display_name': snapshot_dict.get('snap_name')})
self.volume_metadata.capture_snapshot_info(
volume, extra_specs, 'createSnapshot', snapshot_metadata)
return model_update
def delete_snapshot(self, snapshot, volume):
"""Deletes a snapshot.
:param snapshot: snapshot object
:param volume: source volume
"""
LOG.info("Delete Snapshot: %(snapshotName)s.",
{'snapshotName': snapshot.name})
extra_specs = self._initial_setup(volume)
sourcedevice_id, snap_name, snap_id_list = self._parse_snap_info(
extra_specs[utils.ARRAY], snapshot)
if not sourcedevice_id and not snap_name:
# Check if legacy snapshot
sourcedevice_id = self._find_device_on_array(
snapshot, extra_specs)
if sourcedevice_id:
self._delete_volume(snapshot)
else:
LOG.info("No snapshot found on the array")
elif not sourcedevice_id or not snap_name:
LOG.info("No snapshot found on the array")
else:
# Ensure snap has not been recently deleted
for snap_id in snap_id_list:
self.provision.delete_volume_snap_check_for_links(
extra_specs[utils.ARRAY], snap_name,
sourcedevice_id, extra_specs, snap_id)
LOG.info("Leaving delete_snapshot: %(ssname)s.",
{'ssname': snap_name})
self.volume_metadata.capture_snapshot_info(
volume, extra_specs, 'deleteSnapshot', None)
def _remove_members(self, array, volume, device_id,
extra_specs, connector, is_multiattach,
async_grp=None, host_template=None):
"""This method unmaps a volume from a host.
Removes volume from the storage group that belongs to a masking view.
:param array: the array serial number
:param volume: volume object
:param device_id: the PowerMax/VMAX volume device id
:param extra_specs: extra specifications
:param connector: the connector object
:param is_multiattach: flag to indicate if this is a multiattach case
:param async_grp: the name if the async group, if applicable
"""
volume_name = volume.name
LOG.debug("Detaching volume %s.", volume_name)
reset = False if is_multiattach else True
if is_multiattach:
storage_group_names = self.rest.get_storage_groups_from_volume(
array, device_id)
self.masking.remove_and_reset_members(
array, volume, device_id, volume_name,
extra_specs, reset, connector, async_grp=async_grp,
host_template=host_template)
if is_multiattach:
self.masking.return_volume_to_fast_managed_group(
array, device_id, extra_specs)
self.migrate.cleanup_staging_objects(
array, storage_group_names, extra_specs)
def _unmap_lun(self, volume, connector):
"""Unmaps a volume from the host.
:param volume: the volume Object
:param connector: the connector Object
"""
mv_list, sg_list = None, None
extra_specs = self._initial_setup(volume)
rep_config = None
rep_extra_specs = None
current_host_occurances = 0
if 'qos' in extra_specs:
del extra_specs['qos']
if self.utils.is_replication_enabled(extra_specs):
backend_id = self._get_replicated_volume_backend_id(volume)
rep_config = self.utils.get_rep_config(
backend_id, self.rep_configs)
extra_specs[utils.FORCE_VOL_EDIT] = True
rep_extra_specs = self._get_replication_extra_specs(
extra_specs, rep_config)
if self.utils.is_volume_failed_over(volume):
extra_specs = rep_extra_specs
volume_name = volume.name
mgmt_sg_name = None
LOG.info("Unmap volume: %(volume)s.", {'volume': volume})
if connector is not None:
host_name = connector.get('host')
attachment_list = volume.volume_attachment
LOG.debug("Volume attachment list: %(atl)s. "
"Attachment type: %(at)s",
{'atl': attachment_list, 'at': type(attachment_list)})
try:
att_list = attachment_list.objects
except AttributeError:
att_list = attachment_list
if att_list is not None:
host_list = [att.connector['host'] for att in att_list if
att is not None and att.connector is not None]
current_host_occurances = host_list.count(host_name)
else:
LOG.warning("Cannot get host name from connector object - "
"assuming force-detach.")
host_name = None
device_info, is_multiattach = (
self.find_host_lun_id(volume, host_name, extra_specs))
if 'hostlunid' not in device_info:
LOG.info("Volume %s is not mapped. No volume to unmap.",
volume_name)
return
if current_host_occurances > 1:
LOG.info("Volume is attached to multiple instances on "
"this host. Not removing the volume from the "
"masking view.")
else:
array = extra_specs[utils.ARRAY]
if self.utils.does_vol_need_rdf_management_group(extra_specs):
mgmt_sg_name = self.utils.get_rdf_management_group_name(
rep_config)
self._remove_members(
array, volume, device_info['device_id'], extra_specs,
connector, is_multiattach, async_grp=mgmt_sg_name,
host_template=self.powermax_short_host_name_template)
if (self.utils.is_metro_device(rep_config, extra_specs) and
not self.promotion):
# Need to remove from remote masking view
device_info, __ = (self.find_host_lun_id(
volume, host_name, extra_specs, rep_extra_specs))
if 'hostlunid' in device_info:
self._remove_members(
rep_extra_specs[utils.ARRAY], volume,
device_info['device_id'], rep_extra_specs, connector,
is_multiattach, async_grp=mgmt_sg_name,
host_template=self.powermax_short_host_name_template)
else:
# Make an attempt to clean up initiator group
self.masking.attempt_ig_cleanup(
connector, self.protocol,
rep_extra_specs[utils.ARRAY], True,
host_template=self.powermax_short_host_name_template)
if is_multiattach and LOG.isEnabledFor(logging.DEBUG):
mv_list, sg_list = (
self._get_mvs_and_sgs_from_volume(
extra_specs[utils.ARRAY],
device_info['device_id']))
self.volume_metadata.capture_detach_info(
volume, extra_specs, device_info['device_id'], mv_list,
sg_list)
def _unmap_lun_promotion(self, volume, connector):
"""Unmaps a volume from the host during promotion.
:param volume: the volume Object
:param connector: the connector Object
"""
extra_specs = self._initial_setup(volume)
if not self.utils.is_replication_enabled(extra_specs):
LOG.error('Unable to terminate connections for non-replicated '
'volumes during promotion failover. Could not unmap '
'volume %s', volume.id)
else:
mode = extra_specs[utils.REP_MODE]
if mode == utils.REP_METRO:
self._unmap_lun(volume, connector)
else:
# During a promotion scenario only Metro volumes will have
# connections present on their remote volumes.
loc = ast.literal_eval(volume.provider_location)
device_id = loc.get('device_id')
promotion_key = [utils.PMAX_FAILOVER_START_ARRAY_PROMOTION]
self.volume_metadata.capture_detach_info(
volume, extra_specs, device_id, promotion_key,
promotion_key)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns device and connection info.
The volume may be already mapped, if this is so the deviceInfo tuple
is returned. If the volume is not already mapped then we need to
gather information to either 1. Create an new masking view or 2. Add
the volume to an existing storage group within an already existing
maskingview.
The naming convention is the following:
.. code-block:: none
initiator_group_name = OS-<shortHostName>-<shortProtocol>-IG
e.g OS-myShortHost-I-IG
storage_group_name = OS-<shortHostName>-<srpName>-<shortProtocol>-SG
e.g OS-myShortHost-SRP_1-I-SG
port_group_name = OS-<target>-PG The port_group_name will come from
the cinder.conf or as an extra spec on the volume
type. These are precreated. If the portGroup does not
exist then an error will be returned to the user
maskingview_name = OS-<shortHostName>-<srpName>-<shortProtocol>-MV
e.g OS-myShortHost-SRP_1-I-MV
:param volume: volume Object
:param connector: the connector Object
:returns: dict -- device_info_dict - device information dict
"""
LOG.info("Initialize connection: %(vol)s.", {'vol': volume.name})
extra_specs = self._initial_setup(volume, init_conn=True)
is_multipath = connector.get('multipath', False)
rep_config = extra_specs.get(utils.REP_CONFIG)
rep_extra_specs = self._get_replication_extra_specs(
extra_specs, rep_config)
remote_port_group = None
if (self.utils.is_metro_device(rep_config, extra_specs)
and not is_multipath and self.protocol.lower() == 'iscsi'):
exception_message = _(
"Either multipathing is not correctly/currently "
"enabled on your system or the volume was created "
"prior to multipathing being enabled. Please refer "
"to the online PowerMax Cinder driver documentation "
"for this release for further details.")
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
if self.utils.is_volume_failed_over(volume):
extra_specs = rep_extra_specs
device_info_dict, is_multiattach = (
self.find_host_lun_id(volume, connector.get('host'), extra_specs,
connector=connector))
masking_view_dict = self._populate_masking_dict(
volume, connector, extra_specs)
masking_view_dict[utils.IS_MULTIATTACH] = is_multiattach
if self.rest.is_next_gen_array(extra_specs['array']):
masking_view_dict['workload'] = 'NONE'
temp_pool = masking_view_dict['storagegroup_name']
splitPool = temp_pool.split('+')
if len(splitPool) == 4:
splitPool[1] = 'NONE'
masking_view_dict['storagegroup_name'] = '+'.join(splitPool)
if ('hostlunid' in device_info_dict and
device_info_dict['hostlunid'] is not None):
hostlunid = device_info_dict['hostlunid']
LOG.info("Volume %(volume)s is already mapped to host %(host)s. "
"The hostlunid is %(hostlunid)s.",
{'volume': volume.name, 'host': connector['host'],
'hostlunid': hostlunid})
port_group_name = (
self.get_port_group_from_masking_view(
extra_specs[utils.ARRAY],
device_info_dict['maskingview']))
if self.utils.is_metro_device(rep_config, extra_specs):
remote_info_dict, is_multiattach = (
self.find_host_lun_id(volume, connector.get('host'),
extra_specs, rep_extra_specs))
if remote_info_dict.get('hostlunid') is None:
# Need to attach on remote side
metro_host_lun, remote_port_group = (
self._attach_metro_volume(
volume, connector, is_multiattach, extra_specs,
rep_extra_specs))
else:
metro_host_lun = remote_info_dict['hostlunid']
remote_port_group = self.get_port_group_from_masking_view(
rep_extra_specs[utils.ARRAY],
remote_info_dict['maskingview'])
device_info_dict['metro_hostlunid'] = metro_host_lun
else:
if is_multiattach and extra_specs[utils.SLO]:
# Need to move volume to a non-fast managed storagegroup
# before attach on subsequent host(s)
masking_view_dict = self.masking.pre_multiattach(
extra_specs[utils.ARRAY],
masking_view_dict[utils.DEVICE_ID], masking_view_dict,
extra_specs)
device_info_dict, port_group_name = (
self._attach_volume(
volume, connector, extra_specs, masking_view_dict))
if self.utils.is_metro_device(rep_config, extra_specs):
# Need to attach on remote side
metro_host_lun, remote_port_group = self._attach_metro_volume(
volume, connector, is_multiattach, extra_specs,
rep_extra_specs)
device_info_dict['metro_hostlunid'] = metro_host_lun
if self.protocol.lower() == 'iscsi':
device_info_dict['ip_and_iqn'] = (
self._find_ip_and_iqns(
extra_specs[utils.ARRAY], port_group_name))
if self.utils.is_metro_device(rep_config, extra_specs):
device_info_dict['metro_ip_and_iqn'] = (
self._find_ip_and_iqns(
rep_extra_specs[utils.ARRAY], remote_port_group))
device_info_dict['is_multipath'] = is_multipath
array_tag_list = self.get_tags_of_storage_array(
extra_specs[utils.ARRAY])
if array_tag_list:
masking_view_dict['array_tag_list'] = array_tag_list
if is_multiattach and LOG.isEnabledFor(logging.DEBUG):
masking_view_dict['mv_list'], masking_view_dict['sg_list'] = (
self._get_mvs_and_sgs_from_volume(
extra_specs[utils.ARRAY],
masking_view_dict[utils.DEVICE_ID]))
elif not is_multiattach and LOG.isEnabledFor(logging.DEBUG):
masking_view_dict['tag_list'] = self.get_tags_of_storage_group(
extra_specs[utils.ARRAY], masking_view_dict[utils.SG_NAME])
self.volume_metadata.capture_attach_info(
volume, extra_specs, masking_view_dict, connector['host'],
is_multipath, is_multiattach)
return device_info_dict
def get_tags_of_storage_group(self, array, storage_group_name):
"""Get the tag information from a storage group
:param array: serial number of array
:param storage_group_name: storage group name
:returns: tag list
"""
try:
storage_group = self.rest.get_storage_group(
array, storage_group_name)
except Exception:
return None
return storage_group.get('tags')
def get_tags_of_storage_array(self, array):
"""Get the tag information from an array
:param array: serial number of array
:returns: tag list
"""
tag_name_list = None
try:
tag_name_list = self.rest.get_array_tags(array)
except Exception:
pass
return tag_name_list
def _attach_metro_volume(self, volume, connector, is_multiattach,
extra_specs, rep_extra_specs):
"""Helper method to attach a metro volume.
Metro protected volumes point to two PowerMax/VMAX devices on
different arrays, which are presented as a single device to the host.
This method masks the remote device to the host.
:param volume: the volume object
:param connector: the connector dict
:param is_multiattach: flag to indicate if this a multiattach case
:param extra_specs: the extra specifications
:param rep_extra_specs: replication extra specifications
:returns: hostlunid, remote_port_group
"""
remote_mv_dict = self._populate_masking_dict(
volume, connector, extra_specs, rep_extra_specs)
remote_mv_dict[utils.IS_MULTIATTACH] = (
True if is_multiattach else False)
if is_multiattach and rep_extra_specs[utils.SLO]:
# Need to move volume to a non-fast managed sg
# before attach on subsequent host(s)
remote_mv_dict = self.masking.pre_multiattach(
rep_extra_specs[utils.ARRAY], remote_mv_dict[utils.DEVICE_ID],
remote_mv_dict, rep_extra_specs)
remote_info_dict, remote_port_group = (
self._attach_volume(
volume, connector, extra_specs, remote_mv_dict,
rep_extra_specs=rep_extra_specs))
remote_port_group = self.get_port_group_from_masking_view(
rep_extra_specs[utils.ARRAY], remote_info_dict['maskingview'])
return remote_info_dict['hostlunid'], remote_port_group
def _attach_volume(self, volume, connector, extra_specs,
masking_view_dict, rep_extra_specs=None):
"""Attach a volume to a host.
:param volume: the volume object
:param connector: the connector object
:param extra_specs: extra specifications
:param masking_view_dict: masking view information
:param rep_extra_specs: rep extra specs are passed if metro device
:returns: dict -- device_info_dict
String -- port group name
:raises: VolumeBackendAPIException
"""
m_specs = extra_specs if rep_extra_specs is None else rep_extra_specs
rollback_dict = self.masking.setup_masking_view(
masking_view_dict[utils.ARRAY], volume,
masking_view_dict, m_specs)
# Find host lun id again after the volume is exported to the host.
device_info_dict, __ = self.find_host_lun_id(
volume, connector.get('host'), extra_specs, rep_extra_specs)
if 'hostlunid' not in device_info_dict:
# Did not successfully attach to host, so a rollback is required.
error_message = (_("Error Attaching volume %(vol)s. Cannot "
"retrieve hostlunid.") % {'vol': volume.id})
LOG.error(error_message)
self.masking.check_if_rollback_action_for_masking_required(
masking_view_dict[utils.ARRAY], volume,
masking_view_dict[utils.DEVICE_ID], rollback_dict)
raise exception.VolumeBackendAPIException(
message=error_message)
return device_info_dict, rollback_dict[utils.PORTGROUPNAME]
def terminate_connection(self, volume, connector):
"""Disallow connection from connector.
:param volume: the volume Object
:param connector: the connector Object
"""
volume_name = volume.name
LOG.info("Terminate connection: %(volume)s.",
{'volume': volume_name})
if self.promotion:
self._unmap_lun_promotion(volume, connector)
else:
self._unmap_lun(volume, connector)
def extend_volume(self, volume, new_size):
"""Extends an existing volume.
:param volume: the volume Object
:param new_size: the new size to increase the volume to
:raises: VolumeBackendAPIException:
"""
# Set specific attributes for extend operation
ex_specs = self._initial_setup(volume)
array = ex_specs[utils.ARRAY]
device_id = self._find_device_on_array(volume, ex_specs)
vol_name = volume.name
orig_vol_size = volume.size
rep_enabled = self.utils.is_replication_enabled(ex_specs)
rdf_grp_no = None
legacy_extend = False
# Run validation and capabilities checks
self._extend_vol_validation_checks(
array, device_id, vol_name, ex_specs, orig_vol_size, new_size)
# Get extend workflow dependent on array gen and replication status
if rep_enabled:
rep_config = ex_specs[utils.REP_CONFIG]
rdf_grp_no, __ = self.get_rdf_details(array, rep_config)
self._validate_rdfg_status(array, ex_specs)
r1_ode, r1_ode_metro, r2_ode, r2_ode_metro = (
self._array_ode_capabilities_check(array, rep_config, True))
if self.next_gen:
if self.utils.is_metro_device(rep_config, ex_specs):
if not r1_ode_metro or not r2_ode or not r2_ode_metro:
legacy_extend = True
else:
legacy_extend = True
# Handle the extend process using workflow info from previous steps
if legacy_extend:
rep_config = ex_specs[utils.REP_CONFIG]
if rep_config.get('allow_extend', False):
LOG.info("Legacy extend volume %(volume)s to %(new_size)d GBs",
{'volume': vol_name, 'new_size': int(new_size)})
self._extend_legacy_replicated_vol(
array, volume, device_id, vol_name, new_size, ex_specs,
rdf_grp_no)
else:
exception_message = (
"Extending a replicated volume on this backend is not "
"permitted. Please set 'allow_extend:True' in your "
"PowerMax replication target_backend configuration.")
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
else:
LOG.info("ODE extend volume %(volume)s to %(new_size)d GBs",
{'volume': vol_name,
'new_size': int(new_size)})
self.provision.extend_volume(
array, device_id, new_size, ex_specs, rdf_grp_no)
self.volume_metadata.capture_extend_info(
volume, new_size, device_id, ex_specs, array)
LOG.debug("Leaving extend_volume: %(volume_name)s. ",
{'volume_name': vol_name})
def _extend_vol_validation_checks(self, array, device_id, vol_name,
ex_specs, orig_vol_size, new_size):
"""Run validation checks on settings for extend volume operation.
:param array: the array serial number
:param device_id: the device id
:param vol_name: the volume name
:param ex_specs: extra specifications
:param orig_vol_size: the original volume size
:param new_size: the new size the volume should be
:raises: VolumeBackendAPIException:
"""
# 1 - Check device exists
if device_id is None:
exception_message = (_(
"Cannot find Volume: %(volume_name)s. Extend operation. "
"Exiting....") % {'volume_name': vol_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
# 2 - Check if volume is part of an on-going clone operation or if vol
# has source snapshots but not next-gen array
self._sync_check(array, device_id, ex_specs)
__, snapvx_src, __ = self.rest.is_vol_in_rep_session(array, device_id)
if snapvx_src:
if not self.next_gen:
exception_message = (
_("The volume: %(volume)s is a snapshot source. "
"Extending a volume with snapVx snapshots is only "
"supported on PowerMax/VMAX from OS version 5978 "
"onwards. Exiting...") % {'volume': vol_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
# 3 - Check new size is larger than old size
if int(orig_vol_size) >= int(new_size):
exception_message = (_(
"Your original size: %(orig_vol_size)s GB is greater "
"than or the same as: %(new_size)s GB. Only extend ops are "
"supported. Exiting...") % {'orig_vol_size': orig_vol_size,
'new_size': new_size})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
def _array_ode_capabilities_check(self, array, rep_config,
rep_enabled=False):
"""Given an array, check Online Device Expansion (ODE) support.
:param array: the array serial number
:param rep_config: the replication configuration
:param rep_enabled: if replication is enabled for backend
:returns: r1_ode: (bool) If R1 array supports ODE
:returns: r1_ode_metro: (bool) If R1 array supports ODE with Metro vols
:returns: r2_ode: (bool) If R2 array supports ODE
:returns: r2_ode_metro: (bool) If R2 array supports ODE with Metro vols
"""
r1_ucode = self.ucode_level.split('.')
r1_ode, r1_ode_metro = False, False
r2_ode, r2_ode_metro = False, False
if self.next_gen:
r1_ode = True
if rep_enabled:
__, r2_array = self.get_rdf_details(array, rep_config)
r2_ucode = self.rest.get_array_ucode_version(r2_array)
if int(r1_ucode[2]) > utils.UCODE_5978_ELMSR:
r1_ode_metro = True
r2_ucode = r2_ucode.split('.')
if self.rest.is_next_gen_array(r2_array):
r2_ode = True
if int(r2_ucode[2]) > utils.UCODE_5978_ELMSR:
r2_ode_metro = True
return r1_ode, r1_ode_metro, r2_ode, r2_ode_metro
@coordination.synchronized('emc-{rdf_group_no}-rdf')
def _extend_legacy_replicated_vol(
self, array, volume, device_id, volume_name, new_size, extra_specs,
rdf_group_no):
"""Extend a legacy OS volume without Online Device Expansion
:param array: the array serial number
:param volume: the volume objcet
:param device_id: the volume device id
:param volume_name: the volume name
:param new_size: the new size the volume should be
:param extra_specs: extra specifications
:param rdf_group_no: the RDF group number
"""
try:
# Break the RDF device pair relationship and cleanup R2
LOG.info("Breaking replication relationship...")
self.break_rdf_device_pair_session(
array, device_id, volume_name, extra_specs, volume)
# Extend the R1 volume
LOG.info("Extending source volume...")
self.provision.extend_volume(
array, device_id, new_size, extra_specs)
# Setup volume replication again for source volume
LOG.info("Recreating replication relationship...")
rep_status, __, __, rep_extra_specs, resume_rdf = (
self.configure_volume_replication(
array, volume, device_id, extra_specs))
# If first/only volume in SG then RDF protect SG
if rep_status == 'first_vol_in_rdf_group':
self._protect_storage_group(
array, device_id, volume, volume_name, rep_extra_specs)
# If more than one volume in SG then resume replication
if resume_rdf:
self.rest.srdf_resume_replication(
array, rep_extra_specs['mgmt_sg_name'],
rep_extra_specs['rdf_group_no'], extra_specs)
except Exception as e:
exception_message = (_("Error extending volume. Error received "
"was %(e)s") % {'e': e})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
def update_volume_stats(self):
"""Retrieve stats info."""
if self.rest.u4p_in_failover and self.rest.u4p_failover_autofailback:
self.retest_primary_u4p()
pools = []
# Dictionary to hold the arrays for which the SRP details
# have already been queried.
arrays = {}
total_capacity_gb = 0
free_capacity_gb = 0
provisioned_capacity_gb = 0
location_info = None
backend_name = self.pool_info['backend_name']
max_oversubscription_ratio = (
self.pool_info['max_over_subscription_ratio'])
reserved_percentage = self.pool_info['reserved_percentage']
array_reserve_percent = None
array_info_list = self.pool_info['arrays_info']
already_queried = False
for array_info in array_info_list:
if self.failover:
rep_config = self.rep_configs[0]
array_info = self.get_secondary_stats_info(
rep_config, array_info)
# Add both SLO & Workload name in the pool name
# Only insert the array details in the dict once
if array_info['SerialNumber'] not in arrays:
(location_info, total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb,
array_reserve_percent) = self._update_srp_stats(array_info)
arrays[array_info['SerialNumber']] = (
[total_capacity_gb, free_capacity_gb,
provisioned_capacity_gb, array_reserve_percent])
else:
already_queried = True
try:
pool_name = ("%(slo)s+%(workload)s+%(srpName)s+%(array)s"
% {'slo': array_info['SLO'],
'workload': array_info['Workload'],
'srpName': array_info['srpName'],
'array': array_info['SerialNumber']})
except KeyError:
pool_name = ("%(slo)s+%(srpName)s+%(array)s"
% {'slo': array_info['SLO'],
'srpName': array_info['srpName'],
'array': array_info['SerialNumber']})
if already_queried:
# The dictionary will only have one key per PowerMax/VMAX
# Construct the location info
pool = self._construct_location_info_and_pool(
array_info, pool_name, arrays, max_oversubscription_ratio,
reserved_percentage)
else:
pool = {'pool_name': pool_name,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
'QoS_support': False,
'location_info': location_info,
'consistencygroup_support': False,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'consistent_group_snapshot_enabled': True,
'max_over_subscription_ratio':
max_oversubscription_ratio,
'reserved_percentage': reserved_percentage,
'replication_enabled': self.replication_enabled,
'group_replication_enabled': self.replication_enabled,
'consistent_group_replication_enabled':
self.replication_enabled
}
if array_reserve_percent:
if isinstance(reserved_percentage, int):
if array_reserve_percent > reserved_percentage:
pool['reserved_percentage'] = array_reserve_percent
else:
pool['reserved_percentage'] = array_reserve_percent
pools.append(pool)
pools = self.utils.add_legacy_pools(pools)
if self.promotion:
primary_array = self.configuration.safe_get('powermax_array')
pools = self.utils.add_promotion_pools(pools, primary_array)
data = {'vendor_name': "Dell EMC",
'driver_version': self.version,
'storage_protocol': 'unknown',
'volume_backend_name': backend_name or
self.__class__.__name__,
# Use zero capacities here so we always use a pool.
'total_capacity_gb': 0,
'free_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'reserved_percentage': 0,
'replication_enabled': self.replication_enabled,
'replication_targets': self.replication_targets,
'pools': pools}
return data
def _construct_location_info_and_pool(
self, array_info, pool_name, arrays, max_oversubscription_ratio,
reserved_percentage):
"""Construct the location info string and the pool dict
:param array_info: array information dict
:param pool_name: pool name
:param arrays: arrays dict
:param max_oversubscription_ratio: max oversubscription ratio
:param reserved_percentage: reserved percentage
:returns: pool - dict
"""
try:
temp_location_info = (
("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s"
% {'arrayName': array_info['SerialNumber'],
'srpName': array_info['srpName'],
'slo': array_info['SLO'],
'workload': array_info['Workload']}))
except KeyError:
temp_location_info = (
("%(arrayName)s#%(srpName)s#%(slo)s"
% {'arrayName': array_info['SerialNumber'],
'srpName': array_info['srpName'],
'slo': array_info['SLO']}))
pool = {'pool_name': pool_name,
'total_capacity_gb':
arrays[array_info['SerialNumber']][0],
'free_capacity_gb':
arrays[array_info['SerialNumber']][1],
'provisioned_capacity_gb':
arrays[array_info['SerialNumber']][2],
'QoS_support': False,
'location_info': temp_location_info,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'consistent_group_snapshot_enabled': True,
'max_over_subscription_ratio':
max_oversubscription_ratio,
'reserved_percentage': reserved_percentage,
'replication_enabled': self.replication_enabled,
'multiattach': True}
if arrays[array_info['SerialNumber']][3]:
if reserved_percentage:
if (arrays[array_info['SerialNumber']][3] >
reserved_percentage):
pool['reserved_percentage'] = (
arrays[array_info['SerialNumber']][3])
else:
pool['reserved_percentage'] = (
arrays[array_info['SerialNumber']][3])
return pool
def _update_srp_stats(self, array_info):
"""Update SRP stats.
:param array_info: array information
:returns: location_info
:returns: totalManagedSpaceGbs
:returns: remainingManagedSpaceGbs
:returns: provisionedManagedSpaceGbs
:returns: array_reserve_percent
:returns: wlpEnabled
"""
(totalManagedSpaceGbs, remainingManagedSpaceGbs,
provisionedManagedSpaceGbs, array_reserve_percent) = (
self.provision.get_srp_pool_stats(
array_info['SerialNumber'], array_info))
LOG.info("Capacity stats for SRP pool %(srpName)s on array "
"%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, "
"free_capacity_gb=%(free_capacity_gb)lu, "
"provisioned_capacity_gb=%(provisioned_capacity_gb)lu",
{'srpName': array_info['srpName'],
'arrayName': array_info['SerialNumber'],
'total_capacity_gb': totalManagedSpaceGbs,
'free_capacity_gb': remainingManagedSpaceGbs,
'provisioned_capacity_gb': provisionedManagedSpaceGbs})
try:
location_info = ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s"
% {'arrayName': array_info['SerialNumber'],
'srpName': array_info['srpName'],
'slo': array_info['SLO'],
'workload': array_info['Workload']})
except KeyError:
location_info = ("%(arrayName)s#%(srpName)s#%(slo)s"
% {'arrayName': array_info['SerialNumber'],
'srpName': array_info['srpName'],
'slo': array_info['SLO']})
return (location_info, totalManagedSpaceGbs,
remainingManagedSpaceGbs, provisionedManagedSpaceGbs,
array_reserve_percent)
def _set_config_file_and_get_extra_specs(self, volume,
volume_type_id=None):
"""Given the volume object get the associated volumetype.
Given the volume object get the associated volumetype and the
extra specs associated with it.
Based on the name of the config group, register the config file
:param volume: the volume object including the volume_type_id
:param volume_type_id: Optional override of volume.volume_type_id
:returns: dict -- the extra specs dict
:returns: dict -- QoS specs
"""
qos_specs = {}
extra_specs = self.utils.get_volumetype_extra_specs(
volume, volume_type_id)
type_id = volume.volume_type_id
if type_id:
res = volume_types.get_volume_type_qos_specs(type_id)
qos_specs = res['qos_specs']
# If there are no extra specs then the default case is assumed.
if extra_specs:
if extra_specs.get('replication_enabled') == '<is> True':
extra_specs[utils.IS_RE] = True
backend_id = self._get_replicated_volume_backend_id(volume)
rep_config = self.utils.get_rep_config(
backend_id, self.rep_configs)
if rep_config is None:
msg = _('Could not determine which rep_device to use '
'from cinder.conf')
raise exception.VolumeBackendAPIException(msg)
extra_specs[utils.REP_CONFIG] = rep_config
if rep_config.get('mode'):
extra_specs[utils.REP_MODE] = rep_config['mode']
if rep_config.get(utils.METROBIAS):
extra_specs[utils.METROBIAS] = (
rep_config[utils.METROBIAS])
return extra_specs, qos_specs
def _get_replicated_volume_backend_id(self, volume):
"""Given a volume, return its rep device backend id.
:param volume: volume used to retrieve backend id -- volume
:returns: backend id -- str
"""
backend_id = utils.BACKEND_ID_LEGACY_REP
volume_extra_specs = self.utils.get_volumetype_extra_specs(volume)
if volume_extra_specs:
volume_backend_id = volume_extra_specs.get(
utils.REPLICATION_DEVICE_BACKEND_ID)
if volume_backend_id:
backend_id = volume_backend_id
return backend_id
def _find_device_on_array(self, volume, extra_specs, remote_device=False):
"""Given the volume get the PowerMax/VMAX device Id.
:param volume: volume object
:param extra_specs: the extra Specs
:param remote_device: find remote device for replicated volumes
:returns: array, device_id
"""
founddevice_id = None
volume_name = volume.id
try:
name_id = volume._name_id
except AttributeError:
name_id = None
if remote_device:
loc = volume.replication_driver_data
else:
loc = volume.provider_location
if isinstance(loc, six.string_types):
name = ast.literal_eval(loc)
array = extra_specs[utils.ARRAY]
if name.get('device_id'):
device_id = name['device_id']
elif name.get('keybindings'):
device_id = name['keybindings']['DeviceID']
else:
device_id = None
try:
founddevice_id = self.rest.check_volume_device_id(
array, device_id, volume_name, name_id)
except exception.VolumeBackendAPIException:
pass
if founddevice_id is None:
LOG.debug("Volume %(volume_name)s not found on the array.",
{'volume_name': volume_name})
else:
LOG.debug("Volume name: %(volume_name)s Volume device id: "
"%(founddevice_id)s.",
{'volume_name': volume_name,
'founddevice_id': founddevice_id})
return founddevice_id
def find_host_lun_id(self, volume, host, extra_specs,
rep_extra_specs=None, connector=None):
"""Given the volume dict find the host lun id for a volume.
:param volume: the volume dict
:param host: host from connector (can be None on a force-detach)
:param extra_specs: the extra specs
:param rep_extra_specs: rep extra specs, passed in if metro device
:param connector: connector object can be none.
:returns: dict -- the data dict
"""
maskedvols = {}
is_multiattach = False
volume_name = volume.name
device_id = self._find_device_on_array(volume, extra_specs)
if connector:
if self.migrate.do_migrate_if_candidate(
extra_specs[utils.ARRAY], extra_specs[utils.SRP],
device_id, volume, connector):
LOG.debug("MIGRATE - Successfully migrated from device "
"%(dev)s from legacy shared storage groups, "
"pre Pike release.",
{'dev': device_id})
if rep_extra_specs:
rdf_pair_info = self.rest.get_rdf_pair_volume(
extra_specs[utils.ARRAY], rep_extra_specs['rdf_group_no'],
device_id)
device_id = rdf_pair_info.get('remoteVolumeName', None)
extra_specs = rep_extra_specs
host_name = self.utils.get_host_name_label(
host, self.powermax_short_host_name_template) if host else None
if device_id:
array = extra_specs[utils.ARRAY]
# Return only masking views for this host
host_maskingviews, all_masking_view_list = (
self._get_masking_views_from_volume(
array, device_id, host_name))
if not host_maskingviews:
# Backward compatibility if a new template was added to
# an existing backend.
host_name = self.utils.get_host_short_name(
host) if host else None
host_maskingviews, all_masking_view_list = (
self._get_masking_views_from_volume_for_host(
all_masking_view_list, host_name))
for maskingview in host_maskingviews:
host_lun_id = self.rest.find_mv_connections_for_vol(
array, maskingview, device_id)
if host_lun_id is not None:
devicedict = {'hostlunid': host_lun_id,
'maskingview': maskingview,
'array': array,
'device_id': device_id}
maskedvols = devicedict
if not maskedvols:
LOG.debug(
"Host lun id not found for volume: %(volume_name)s "
"with the device id: %(device_id)s on host: %(host)s.",
{'volume_name': volume_name,
'device_id': device_id, 'host': host_name})
if len(all_masking_view_list) > len(host_maskingviews):
other_maskedvols = []
for maskingview in all_masking_view_list:
host_lun_id = self.rest.find_mv_connections_for_vol(
array, maskingview, device_id)
if host_lun_id is not None:
devicedict = {'hostlunid': host_lun_id,
'maskingview': maskingview,
'array': array,
'device_id': device_id}
other_maskedvols.append(devicedict)
if len(other_maskedvols) > 0:
LOG.debug("Volume is masked to a different host "
"than %(host)s - Live Migration or Multi-Attach "
"use case.", {'host': host})
is_multiattach = True
else:
exception_message = (_("Cannot retrieve volume %(vol)s "
"from the array.") % {'vol': volume_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(exception_message)
return maskedvols, is_multiattach
def get_masking_views_from_volume(self, array, volume, device_id, host):
"""Get all masking views from a volume.
:param array: array serial number
:param volume: the volume object
:param device_id: the volume device id
:param host: the host
:returns: masking view list, is metro
"""
is_metro = False
extra_specs = self._initial_setup(volume)
mv_list, __ = self._get_masking_views_from_volume(array, device_id,
host)
if self.utils.is_metro_device(
extra_specs.get(utils.REP_CONFIG), extra_specs):
is_metro = True
return mv_list, is_metro
def _get_masking_views_from_volume(self, array, device_id, host):
"""Helper function to retrieve masking view list for a volume.
:param array: array serial number
:param device_id: the volume device id
:param host: the host
:returns: masking view list, all masking view list
"""
LOG.debug("Getting masking views from volume")
mvs, __ = self._get_mvs_and_sgs_from_volume(array, device_id)
return self._get_masking_views_from_volume_for_host(mvs, host)
def _get_masking_views_from_volume_for_host(
self, masking_views, host_name):
"""Check all masking views for host_name
:param masking_views: list of masking view
:param host_name: the host name for comparision
:returns: masking view list, all masking view list
"""
LOG.debug("Getting masking views from volume for host %(host)s ",
{'host': host_name})
host_masking_view_list, all_masking_view_list = [], []
for masking_view in masking_views:
all_masking_view_list.append(masking_view)
if host_name:
if host_name.lower() in masking_view.lower():
host_masking_view_list.append(masking_view)
host_masking_view_list = (host_masking_view_list if host_name else
all_masking_view_list)
return host_masking_view_list, all_masking_view_list
def _get_mvs_and_sgs_from_volume(self, array, device_id):
"""Helper function to retrieve masking views and storage groups.
:param array: array serial number
:param device_id: the volume device id
:returns: masking view list, storage group list
"""
final_masking_view_list = []
storage_group_list = self.rest.get_storage_groups_from_volume(
array, device_id)
for sg in storage_group_list:
masking_view_list = self.rest.get_masking_views_from_storage_group(
array, sg)
final_masking_view_list.extend(masking_view_list)
return final_masking_view_list, storage_group_list
def _initial_setup(self, volume, volume_type_id=None,
init_conn=False):
"""Necessary setup to accumulate the relevant information.
The volume object has a host in which we can parse the
config group name. The config group name is the key to our EMC
configuration file. The emc configuration file contains srp name
and array name which are mandatory fields.
:param volume: the volume object -- obj
:param volume_type_id: optional override of volume.volume_type_id
-- str
:param init_conn: if extra specs are for initialize connection -- bool
:returns: dict -- extra spec dict
:raises: VolumeBackendAPIException:
"""
try:
array_info = self.get_attributes_from_cinder_config()
if array_info:
extra_specs, qos_specs = (
self._set_config_file_and_get_extra_specs(
volume, volume_type_id))
else:
exception_message = (_(
"Unable to get corresponding record for srp. Please "
"refer to the current online documentation for correct "
"configuration and note that the xml file is no longer "
"supported."))
raise exception.VolumeBackendAPIException(
message=exception_message)
extra_specs = self._set_vmax_extra_specs(
extra_specs, array_info, init_conn)
if qos_specs and qos_specs.get('consumer') != "front-end":
extra_specs['qos'] = qos_specs.get('specs')
except Exception:
exception_message = (_(
"Unable to get configuration information necessary to "
"create a volume: %(errorMessage)s.")
% {'errorMessage': sys.exc_info()[1]})
raise exception.VolumeBackendAPIException(
message=exception_message)
return extra_specs
def _populate_masking_dict(self, volume, connector,
extra_specs, rep_extra_specs=None):
"""Get all the names of the maskingview and sub-components.
:param volume: the volume object
:param connector: the connector object
:param extra_specs: extra specifications
:param rep_extra_specs: replication extra specs, if metro volume
:returns: dict -- a dictionary with masking view information
"""
masking_view_dict = {}
volume_name = volume.name
device_id = self._find_device_on_array(volume, extra_specs)
if rep_extra_specs is not None:
rdf_pair_info = self.rest.get_rdf_pair_volume(
extra_specs[utils.ARRAY], rep_extra_specs['rdf_group_no'],
device_id)
device_id = rdf_pair_info.get('remoteVolumeName', None)
extra_specs = rep_extra_specs
if not device_id:
exception_message = (_("Cannot retrieve volume %(vol)s "
"from the array. ") % {'vol': volume_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(exception_message)
protocol = self.utils.get_short_protocol_type(self.protocol)
short_host_name = self.utils.get_host_name_label(
connector['host'], self.powermax_short_host_name_template)
masking_view_dict[utils.USED_HOST_NAME] = short_host_name
masking_view_dict[utils.SLO] = extra_specs[utils.SLO]
masking_view_dict[utils.WORKLOAD] = 'NONE' if self.next_gen else (
extra_specs[utils.WORKLOAD])
masking_view_dict[utils.ARRAY] = extra_specs[utils.ARRAY]
masking_view_dict[utils.SRP] = extra_specs[utils.SRP]
if not extra_specs[utils.PORTGROUPNAME]:
LOG.warning("You must supply a valid pre-created port group "
"in cinder.conf or as an extra spec. Port group "
"cannot be left empty as creating a new masking "
"view will fail.")
masking_view_dict[utils.PORT_GROUP_LABEL] = (
self.utils.get_port_name_label(
extra_specs[utils.PORTGROUPNAME],
self.powermax_port_group_name_template))
masking_view_dict[utils.PORTGROUPNAME] = (
extra_specs[utils.PORTGROUPNAME])
masking_view_dict[utils.INITIATOR_CHECK] = (
self._get_initiator_check_flag())
child_sg_name, do_disable_compression, rep_enabled = (
self.utils.get_child_sg_name(
short_host_name, extra_specs,
masking_view_dict[utils.PORT_GROUP_LABEL]))
masking_view_dict[utils.DISABLECOMPRESSION] = do_disable_compression
masking_view_dict[utils.IS_RE] = rep_enabled
mv_prefix = (
"OS-%(shortHostName)s-%(protocol)s-%(pg)s"
% {'shortHostName': short_host_name,
'protocol': protocol,
'pg': masking_view_dict[utils.PORT_GROUP_LABEL]})
masking_view_dict[utils.SG_NAME] = child_sg_name
masking_view_dict[utils.MV_NAME] = ("%(prefix)s-MV"
% {'prefix': mv_prefix})
masking_view_dict[utils.PARENT_SG_NAME] = ("%(prefix)s-SG"
% {'prefix': mv_prefix})
masking_view_dict[utils.IG_NAME] = (
("OS-%(shortHostName)s-%(protocol)s-IG"
% {'shortHostName': short_host_name,
'protocol': protocol}))
masking_view_dict[utils.CONNECTOR] = connector
masking_view_dict[utils.DEVICE_ID] = device_id
masking_view_dict[utils.VOL_NAME] = volume_name
return masking_view_dict
def _create_cloned_volume(
self, volume, source_volume, extra_specs, is_snapshot=False,
from_snapvx=False):
"""Create a clone volume from the source volume.
:param volume: clone volume
:param source_volume: source of the clone volume
:param extra_specs: extra specs
:param is_snapshot: boolean -- Defaults to False
:param from_snapvx: bool -- Defaults to False
:returns: dict -- cloneDict the cloned volume dictionary
:raises: VolumeBackendAPIException:
"""
clone_name = volume.name
snap_name = None
rep_update, rep_info_dict = dict(), dict()
LOG.info("Create a replica from Volume: Clone Volume: %(clone_name)s "
"from Source Volume: %(source_name)s.",
{'clone_name': clone_name,
'source_name': source_volume.name})
array = extra_specs[utils.ARRAY]
is_clone_license = self.rest.is_snapvx_licensed(array)
if not is_clone_license:
exception_message = (_(
"SnapVx feature is not licensed on %(array)s.")
% {'array': array})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
if from_snapvx:
source_device_id, snap_name, __ = self._parse_snap_info(
array, source_volume)
else:
source_device_id = self._find_device_on_array(
source_volume, extra_specs)
if not source_device_id:
exception_message = (_(
"Cannot find source device on %(array)s.")
% {'array': array})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
# Perform any snapvx cleanup if required before creating the clone
if is_snapshot or from_snapvx:
self._clone_check(array, source_device_id, extra_specs)
if not is_snapshot:
clone_dict, rep_update, rep_info_dict = self._create_replica(
array, volume, source_device_id, extra_specs,
snap_name=snap_name)
else:
clone_dict = self._create_snapshot(
array, volume, source_device_id, extra_specs)
LOG.debug("Leaving _create_cloned_volume: Volume: "
"%(clone_name)s Source Device Id: %(source_name)s ",
{'clone_name': clone_name,
'source_name': source_device_id})
return clone_dict, rep_update, rep_info_dict
def _parse_snap_info(self, array, snapshot):
"""Given a snapshot object, parse the provider_location.
:param array: the array serial number
:param snapshot: the snapshot object
:returns: sourcedevice_id -- str
foundsnap_name -- str
found_snap_id_list -- list
"""
foundsnap_name = None
sourcedevice_id = None
found_snap_id_list = list()
volume_name = snapshot.id
loc = snapshot.provider_location
if isinstance(loc, six.string_types):
name = ast.literal_eval(loc)
try:
sourcedevice_id = name['source_id']
snap_name = name['snap_name']
except KeyError:
LOG.info("Error retrieving snapshot details. Assuming "
"legacy structure of snapshot...")
return None, None, None
try:
snap_detail_list = self.rest.get_volume_snaps(
array, sourcedevice_id, snap_name)
for snap_details in snap_detail_list:
foundsnap_name = snap_name
found_snap_id_list.append(snap_details.get(
'snap_id') if self.rest.is_snap_id else (
snap_details.get('generation')))
except Exception as e:
LOG.info("Exception in retrieving snapshot: %(e)s.",
{'e': e})
foundsnap_name = None
if not foundsnap_name or not sourcedevice_id or not found_snap_id_list:
LOG.debug("Error retrieving snapshot details. "
"Snapshot name: %(snap)s",
{'snap': volume_name})
else:
LOG.debug("Source volume: %(volume_name)s Snap name: "
"%(foundsnap_name)s.",
{'volume_name': sourcedevice_id,
'foundsnap_name': foundsnap_name,
'snap_ids': found_snap_id_list})
return sourcedevice_id, foundsnap_name, found_snap_id_list
def _create_snapshot(self, array, snapshot,
source_device_id, extra_specs):
"""Create a snap Vx of a volume.
:param array: the array serial number
:param snapshot: the snapshot object
:param source_device_id: the source device id
:param extra_specs: the extra specifications
:returns: snap_dict
"""
clone_name = self.utils.get_volume_element_name(snapshot.id)
snap_name = self.utils.truncate_string(clone_name, 19)
try:
self.provision.create_volume_snapvx(array, source_device_id,
snap_name, extra_specs)
except Exception as e:
exception_message = (_("Error creating snap Vx of %(vol)s. "
"Exception received: %(e)s.")
% {'vol': source_device_id,
'e': six.text_type(e)})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
snap_dict = {'snap_name': snap_name, 'source_id': source_device_id}
return snap_dict
def _delete_volume(self, volume):
"""Helper function to delete the specified volume.
Pass in host if is snapshot
:param volume: volume object to be deleted
:returns: volume_name (string vol name)
"""
source_device_id = None
volume_name = volume.name
extra_specs = self._initial_setup(volume)
prov_loc = volume.provider_location
if isinstance(prov_loc, six.string_types):
name = ast.literal_eval(prov_loc)
source_device_id = name.get('source_device_id')
device_id = self._find_device_on_array(volume, extra_specs)
if device_id is None:
LOG.error("Volume %(name)s not found on the array. "
"No volume to delete.",
{'name': volume_name})
return volume_name
array = extra_specs[utils.ARRAY]
if self.utils.is_replication_enabled(extra_specs):
self._validate_rdfg_status(array, extra_specs)
# Check if the volume being deleted is a
# source or target for copy session
self._sync_check(array, device_id, extra_specs,
source_device_id=source_device_id)
# Remove from any storage groups and cleanup replication
self._remove_vol_and_cleanup_replication(
array, device_id, volume_name, extra_specs, volume)
# Check if volume is in any storage group
sg_list = self.rest.get_storage_groups_from_volume(array, device_id)
if sg_list:
LOG.error("Device %(device_id)s is in storage group(s) "
"%(sg_list)s prior to delete. Delete will fail.",
{'device_id': device_id, 'sg_list': sg_list})
self._delete_from_srp(
array, device_id, volume_name, extra_specs)
return volume_name
def _create_volume(self, volume, volume_name, volume_size, extra_specs):
"""Create a volume.
:param volume: the volume
:param volume_name: the volume name
:param volume_size: the volume size
:param extra_specs: extra specifications
:returns: volume_dict, rep_update, rep_info_dict --dict
"""
# Set Create Volume options
is_re, rep_mode, storagegroup_name = False, None, None
rep_info_dict, rep_update = dict(), dict()
# Get Array details
array = extra_specs[utils.ARRAY]
array_model, next_gen = self.rest.get_array_model_info(array)
if next_gen:
extra_specs[utils.WORKLOAD] = 'NONE'
# Verify valid SL/WL combination
is_valid_slo, is_valid_workload = self.provision.verify_slo_workload(
array, extra_specs[utils.SLO],
extra_specs[utils.WORKLOAD], next_gen, array_model)
if not is_valid_slo or not is_valid_workload:
exception_message = (_(
"Either SLO: %(slo)s or workload %(workload)s is invalid. "
"Examine previous error statement for valid values.")
% {'slo': extra_specs[utils.SLO],
'workload': extra_specs[utils.WORKLOAD]})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
LOG.debug("Create Volume: %(volume)s Srp: %(srp)s "
"Array: %(array)s "
"Size: %(size)lu.",
{'volume': volume_name,
'srp': extra_specs[utils.SRP],
'array': array,
'size': volume_size})
do_disable_compression = self.utils.is_compression_disabled(
extra_specs)
if self.utils.is_replication_enabled(extra_specs):
is_re, rep_mode = True, extra_specs['rep_mode']
storagegroup_name = self.masking.get_or_create_default_storage_group(
array, extra_specs[utils.SRP], extra_specs[utils.SLO],
extra_specs[utils.WORKLOAD], extra_specs,
do_disable_compression, is_re, rep_mode)
if not is_re:
volume_dict = self._create_non_replicated_volume(
array, volume, volume_name, storagegroup_name,
volume_size, extra_specs)
else:
volume_dict, rep_update, rep_info_dict = (
self._create_replication_enabled_volume(
array, volume, volume_name, volume_size, extra_specs,
storagegroup_name, rep_mode))
# Compare volume ID against identifier on array. Update if needed.
# This can occur in cases where multiple edits are occurring at once.
found_device_id = self.rest.find_volume_device_id(array, volume_name)
returning_device_id = volume_dict['device_id']
if found_device_id != returning_device_id:
volume_dict['device_id'] = found_device_id
return volume_dict, rep_update, rep_info_dict
@coordination.synchronized("emc-nonrdf-vol-{storagegroup_name}-{array}")
def _create_non_replicated_volume(
self, array, volume, volume_name, storagegroup_name, volume_size,
extra_specs):
"""Create a volume without replication enabled
:param array: the primary array -- string
:param volume: the volume -- dict
:param volume_name: the volume name -- string
:param storagegroup_name: the storage group name -- string
:param volume_size: the volume size -- string
:param extra_specs: extra specifications -- dict
:return: volume_dict -- dict
:raises: VolumeBackendAPIException:
"""
existing_devices = self.rest.get_volumes_in_storage_group(
array, storagegroup_name)
try:
volume_dict = self.provision.create_volume_from_sg(
array, volume_name, storagegroup_name,
volume_size, extra_specs, rep_info=None)
return volume_dict
except Exception as e:
try:
# Attempt cleanup of storage group post exception.
updated_devices = set(self.rest.get_volumes_in_storage_group(
array, storagegroup_name))
devices_to_delete = [device for device in updated_devices
if device not in existing_devices]
if devices_to_delete:
self._cleanup_non_rdf_volume_create_post_failure(
volume, volume_name, extra_specs, devices_to_delete)
elif not existing_devices:
self.rest.delete_storage_group(array, storagegroup_name)
finally:
# Pass actual exception that was raised now that cleanup
# attempt is finished. Mainly VolumeBackendAPIException raised
# from error status codes returned from the various REST jobs.
raise e
@coordination.synchronized('emc-rdf-vol-{storagegroup_name}-{array}')
def _create_replication_enabled_volume(
self, array, volume, volume_name, volume_size, extra_specs,
storagegroup_name, rep_mode):
"""Create a volume with replication enabled
:param array: the primary array
:param volume: the volume
:param volume_name: the volume name
:param volume_size: the volume size
:param extra_specs: extra specifications
:param storagegroup_name: the storage group name
:param rep_mode: the replication mode
:returns: volume_dict, rep_update, rep_info_dict --dict
:raises: VolumeBackendAPIException:
"""
def _is_first_vol_in_replicated_sg():
vol_dict = dict()
first_vol, rep_ex_specs, rep_info, rdfg_empty = (
self.prepare_replication_details(extra_specs))
if first_vol:
vol_dict = self.provision.create_volume_from_sg(
array, volume_name, storagegroup_name,
volume_size, extra_specs, rep_info)
rep_vol = deepcopy(vol_dict)
rep_vol.update({'device_uuid': volume_name,
'storage_group': storagegroup_name,
'size': volume_size})
if first_vol and rdfg_empty:
# First volume in SG, first volume in RDFG
self.srdf_protect_storage_group(
extra_specs, rep_ex_specs, rep_vol)
elif not rdfg_empty and not rep_info:
# First volume in SG, not first in RDFG
__, __, __, rep_ex_specs, resume_rdf = (
self.configure_volume_replication(
array, volume, vol_dict['device_id'], extra_specs))
if resume_rdf:
self.rest.srdf_resume_replication(
array, rep_ex_specs['mgmt_sg_name'],
rep_ex_specs['rdf_group_no'], extra_specs)
return first_vol, rep_ex_specs, vol_dict
existing_devices = self.rest.get_volumes_in_storage_group(
array, storagegroup_name)
try:
is_first_volume, rep_extra_specs, volume_info_dict = (
_is_first_vol_in_replicated_sg())
if not is_first_volume:
self._validate_rdfg_status(array, extra_specs)
__, rep_extra_specs, rep_info_dict, __ = (
self.prepare_replication_details(extra_specs))
volume_info_dict = self.provision.create_volume_from_sg(
array, volume_name, storagegroup_name,
volume_size, extra_specs, rep_info_dict)
rep_vol_dict = deepcopy(volume_info_dict)
rep_vol_dict.update({'device_uuid': volume_name,
'storage_group': storagegroup_name,
'size': volume_size})
remote_device_id = self.get_and_set_remote_device_uuid(
extra_specs, rep_extra_specs, rep_vol_dict)
rep_vol_dict.update({'remote_device_id': remote_device_id})
rep_update, rep_info_dict = self.gather_replication_updates(
extra_specs, rep_extra_specs, rep_vol_dict)
if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]:
self._add_volume_to_rdf_management_group(
array, volume_info_dict['device_id'], volume_name,
rep_extra_specs['array'], remote_device_id,
extra_specs)
return volume_info_dict, rep_update, rep_info_dict
except Exception as e:
try:
# Attempt cleanup of rdfg & storage group post exception
updated_devices = set(self.rest.get_volumes_in_storage_group(
array, storagegroup_name))
devices_to_delete = [device for device in updated_devices
if device not in existing_devices]
if devices_to_delete:
self._cleanup_rdf_volume_create_post_failure(
volume, volume_name, extra_specs, devices_to_delete)
elif not existing_devices:
self.rest.delete_storage_group(array, storagegroup_name)
finally:
# Pass actual exception that was raised now that cleanup
# attempt is finished. Mainly VolumeBackendAPIException raised
# from error status codes returned from the various REST jobs.
raise e
def _set_vmax_extra_specs(self, extra_specs, pool_record,
init_conn=False):
"""Set the PowerMax/VMAX extra specs.
The pool_name extra spec must be set, otherwise a default slo/workload
will be chosen. The portgroup can either be passed as an extra spec
on the volume type (e.g. 'storagetype:portgroupname = os-pg1-pg'), or
can be chosen from a list provided in the cinder.conf
:param extra_specs: extra specifications -- dict
:param pool_record: pool record -- dict
:param: init_conn: if extra specs are for initialize connection -- bool
:returns: the extra specifications -- dict
"""
# set extra_specs from pool_record
extra_specs[utils.SRP] = pool_record['srpName']
extra_specs[utils.ARRAY] = pool_record['SerialNumber']
extra_specs[utils.PORTGROUPNAME] = (
self._select_port_group_for_extra_specs(extra_specs, pool_record,
init_conn))
self._validate_storage_group_tag_list(extra_specs)
extra_specs[utils.INTERVAL] = self.interval
LOG.debug("The interval is set at: %(intervalInSecs)s.",
{'intervalInSecs': self.interval})
extra_specs[utils.RETRIES] = self.retries
LOG.debug("Retries are set at: %(retries)s.",
{'retries': self.retries})
# Set pool_name slo and workload
if 'pool_name' in extra_specs:
pool_name = extra_specs['pool_name']
pool_details = pool_name.split('+')
slo_from_extra_spec = pool_details[0]
workload_from_extra_spec = pool_details[1]
# Check if legacy pool chosen
if (workload_from_extra_spec == pool_record['srpName'] or
self.next_gen):
workload_from_extra_spec = 'NONE'
elif pool_record.get('ServiceLevel'):
slo_from_extra_spec = pool_record['ServiceLevel']
workload_from_extra_spec = pool_record.get('Workload', 'None')
# If workload is None in cinder.conf, convert to string
if not workload_from_extra_spec or self.next_gen:
workload_from_extra_spec = 'NONE'
LOG.info("Pool_name is not present in the extra_specs "
"- using slo/ workload from cinder.conf: %(slo)s/%(wl)s.",
{'slo': slo_from_extra_spec,
'wl': workload_from_extra_spec})
else:
slo_list = self.rest.get_slo_list(
pool_record['SerialNumber'], self.next_gen, self.array_model)
if 'Optimized' in slo_list:
slo_from_extra_spec = 'Optimized'
elif 'Diamond' in slo_list:
slo_from_extra_spec = 'Diamond'
else:
slo_from_extra_spec = 'None'
workload_from_extra_spec = 'NONE'
LOG.warning("Pool_name is not present in the extra_specs "
"so no slo/ workload information is present "
"using default slo/ workload combination: "
"%(slo)s/%(wl)s.",
{'slo': slo_from_extra_spec,
'wl': workload_from_extra_spec})
# Standardize slo and workload 'NONE' naming conventions
if workload_from_extra_spec.lower() == 'none':
workload_from_extra_spec = 'NONE'
if slo_from_extra_spec.lower() == 'none':
slo_from_extra_spec = None
extra_specs[utils.SLO] = slo_from_extra_spec
extra_specs[utils.WORKLOAD] = workload_from_extra_spec
if self.rest.is_compression_capable(extra_specs[utils.ARRAY]):
if extra_specs.get(utils.DISABLECOMPRESSION):
# If not True remove it.
if not strutils.bool_from_string(
extra_specs[utils.DISABLECOMPRESSION]):
extra_specs.pop(utils.DISABLECOMPRESSION, None)
else:
extra_specs.pop(utils.DISABLECOMPRESSION, None)
self._check_and_add_tags_to_storage_array(
extra_specs[utils.ARRAY], self.powermax_array_tag_list,
extra_specs)
LOG.debug("SRP is: %(srp)s, Array is: %(array)s "
"SLO is: %(slo)s, Workload is: %(workload)s.",
{'srp': extra_specs[utils.SRP],
'array': extra_specs[utils.ARRAY],
'slo': extra_specs[utils.SLO],
'workload': extra_specs[utils.WORKLOAD]})
if self.version_dict:
self.volume_metadata.print_pretty_table(self.version_dict)
else:
self.version_dict = (
self.volume_metadata.gather_version_info(
extra_specs[utils.ARRAY]))
return extra_specs
def _select_port_group_for_extra_specs(self, extra_specs, pool_record,
init_conn=False):
"""Determine Port Group for operation extra specs.
:param extra_specs: existing extra specs -- dict
:param pool_record: pool record -- dict
:param init_conn: if extra specs are for initialize connection -- bool
:returns: Port Group -- str
:raises: exception.VolumeBackendAPIException
"""
port_group = None
conf_port_groups = pool_record.get(utils.PORT_GROUP, [])
vt_port_group = extra_specs.get(utils.PORTGROUPNAME, None)
# Scenario 1: Port Group is set in volume-type extra specs, over-rides
# any settings in cinder.conf
if vt_port_group:
port_group = vt_port_group
LOG.info("Using Port Group '%(pg)s' from volume-type extra specs.",
{'pg': port_group})
# Scenario 2: Port Group(s) set in cinder.conf and not in volume-type
elif conf_port_groups:
# Scenario 2-1: There is only one Port Group defined, no load
# balance or random selection required
if len(conf_port_groups) == 1:
port_group = conf_port_groups[0]
LOG.info(
"Using Port Group '%(pg)s' from cinder.conf backend "
"configuration.", {'pg': port_group})
# Scenario 2-2: Else more than one Port Group in cinder.conf
else:
# Scenario 2-2-1: If load balancing is enabled and the extra
# specs are for initialize_connection() method then use load
# balance selection
if init_conn and (
self.performance.config.get('load_balance', False)):
try:
load, metric, port_group = (
self.performance.process_port_group_load(
extra_specs[utils.ARRAY], conf_port_groups))
LOG.info(
"Selecting Port Group %(pg)s with %(met)s load of "
"%(load)s", {'pg': port_group, 'met': metric,
'load': load})
except exception.VolumeBackendAPIException:
LOG.error(
"There has been a problem calculating Port Group "
"load, reverting to default random selection.")
# Scenario 2-2-2: If the call is not for initialize_connection,
# load balancing is not enabled, or there was an error while
# calculating PG load, revert to random PG selection method
if not port_group:
port_group = random.choice(conf_port_groups)
# Port group not extracted from volume-type or cinder.conf, raise
if not port_group: