VNX: New Cinder driver in Newton

Previous VNX driver implemented all array operations
in driver, which complicated the drive logic.
In this patch, we leverage a library named
storops to interact with VNX array.
New changes below:

* Consolidate VNX driver entry, both
FC and iSCSI driver use the same entry name:
volume_driver =
cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver

* A new driver option is introduced:
storage_protocol = <fc|iscsi>

* some existing bugs no longer existed.
Co-authored-By: Tang Tina <tina.tang@emc.com>
Co-authored-By: Liang Ryan <ryan.liang@emc.com>

DocImpact
Implements: blueprint new-vnx-driver
Closes-bug: #1569245
Closes-bug: #1524160
Closes-bug: #1589338
Change-Id: I9f31db708b022b16debaa4f6c5a87d95e5ac2a4f
This commit is contained in:
Peter Wang 2016-05-04 18:13:10 +08:00 committed by Ryan LIANG
parent 9da9ebb345
commit 572b84c073
30 changed files with 9612 additions and 12267 deletions

View File

@ -78,10 +78,10 @@ from cinder.volume.drivers import drbdmanagedrv as \
cinder_volume_drivers_drbdmanagedrv
from cinder.volume.drivers.emc import emc_vmax_common as \
cinder_volume_drivers_emc_emcvmaxcommon
from cinder.volume.drivers.emc import emc_vnx_cli as \
cinder_volume_drivers_emc_emcvnxcli
from cinder.volume.drivers.emc import scaleio as \
cinder_volume_drivers_emc_scaleio
from cinder.volume.drivers.emc.vnx import common as \
cinder_volume_drivers_emc_vnx_common
from cinder.volume.drivers.emc import xtremio as \
cinder_volume_drivers_emc_xtremio
from cinder.volume.drivers import eqlx as cinder_volume_drivers_eqlx
@ -253,7 +253,6 @@ def list_opts():
cinder_context.context_opts,
cinder_scheduler_driver.scheduler_driver_opts,
cinder_volume_drivers_scality.volume_opts,
cinder_volume_drivers_emc_emcvnxcli.loc_opts,
cinder_volume_drivers_vmware_vmdk.vmdk_opts,
cinder_volume_drivers_lenovo_lenovocommon.common_opts,
cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts,
@ -275,6 +274,7 @@ def list_opts():
cinder_api_views_versions.versions_opts,
cinder_volume_drivers_nimble.nimble_opts,
cinder_volume_drivers_windows_windows.windows_opts,
cinder_volume_drivers_emc_vnx_common.EMC_VNX_OPTS,
cinder_volume_drivers_san_hp_hpmsacommon.common_opts,
cinder_volume_drivers_san_hp_hpmsacommon.iscsi_opts,
cinder_image_glance.glance_opts,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,27 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception
from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops
fake_vnx = mock.Mock()
fake_storops.exception = fake_exception
fake_storops.vnx = fake_vnx
sys.modules['storops'] = fake_storops
sys.modules['storops.vnx'] = fake_vnx

View File

@ -0,0 +1,119 @@
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import enum
import six
class Enum(enum.Enum):
@classmethod
def verify(cls, value, allow_none=True):
if value is None and not allow_none:
raise ValueError(
'None is not allowed here for %s.') % cls.__name__
elif value is not None and not isinstance(value, cls):
raise ValueError('%(value)s is not an instance of %(name)s.') % {
'value': value, 'name': cls.__name__}
@classmethod
def get_all(cls):
return list(cls)
@classmethod
def get_opt(cls, value):
option_map = cls.get_option_map()
if option_map is None:
raise NotImplementedError(
'Option map is not defined for %s.') % cls.__name__
ret = option_map.get(value, None)
if ret is None:
raise ValueError('%(value)s is not a valid option for %(name)s.'
) % {'value': value, 'name': cls.__name__}
return ret
@classmethod
def parse(cls, value):
if isinstance(value, six.string_types):
ret = cls.from_str(value)
elif isinstance(value, six.integer_types):
ret = cls.from_int(value)
elif isinstance(value, cls):
ret = value
elif value is None:
ret = None
else:
raise ValueError(
'Not supported value type: %s.') % type(value)
return ret
def is_equal(self, value):
if isinstance(value, six.string_types):
ret = self.value.lower() == value.lower()
else:
ret = self.value == value
return ret
@classmethod
def from_int(cls, value):
ret = None
int_index = cls.get_int_index()
if int_index is not None:
try:
ret = int_index[value]
except IndexError:
pass
else:
try:
ret = next(i for i in cls.get_all() if i.is_equal(value))
except StopIteration:
pass
if ret is None:
raise ValueError
return ret
@classmethod
def from_str(cls, value):
ret = None
if value is not None:
for item in cls.get_all():
if item.is_equal(value):
ret = item
break
else:
cls._raise_invalid_value(value)
return ret
@classmethod
def _raise_invalid_value(cls, value):
msg = ('%(value)s is not a valid value for %(name)s.'
) % {'value': value, 'name': cls.__name__}
raise ValueError(msg)
@classmethod
def get_option_map(cls):
raise None
@classmethod
def get_int_index(cls):
return None
@classmethod
def values(cls):
return [m.value for m in cls.__members__.values()]
@classmethod
def enum_name(cls):
return cls.__name__

View File

@ -0,0 +1,172 @@
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class StoropsException(Exception):
message = 'Storops Error.'
class VNXException(StoropsException):
message = "VNX Error."
class VNXStorageGroupError(VNXException):
pass
class VNXAttachAluError(VNXException):
pass
class VNXAluAlreadyAttachedError(VNXAttachAluError):
message = (
'LUN already exists in the specified storage group',
'Requested LUN has already been added to this Storage Group')
class VNXDetachAluError(VNXStorageGroupError):
pass
class VNXDetachAluNotFoundError(VNXDetachAluError):
message = 'No such Host LUN in this Storage Group'
class VNXCreateStorageGroupError(VNXStorageGroupError):
pass
class VNXStorageGroupNameInUseError(VNXCreateStorageGroupError):
message = 'Storage Group name already in use'
class VNXNoHluAvailableError(VNXStorageGroupError):
pass
class VNXMigrationError(VNXException):
pass
class VNXTargetNotReadyError(VNXMigrationError):
message = 'The destination LUN is not available for migration'
class VNXSnapError(VNXException):
pass
class VNXDeleteAttachedSnapError(VNXSnapError):
error_code = 0x716d8003
class VNXCreateSnapError(VNXException):
message = 'Cannot create the snapshot.'
class VNXAttachSnapError(VNXSnapError):
message = 'Cannot attach the snapshot.'
class VNXDetachSnapError(VNXSnapError):
message = 'Cannot detach the snapshot.'
class VNXSnapAlreadyMountedError(VNXSnapError):
error_code = 0x716d8055
class VNXSnapNameInUseError(VNXSnapError):
error_code = 0x716d8005
class VNXSnapNotExistsError(VNXSnapError):
message = 'The specified snapshot does not exist.'
class VNXLunError(VNXException):
pass
class VNXCreateLunError(VNXLunError):
pass
class VNXLunNameInUseError(VNXCreateLunError):
error_code = 0x712d8d04
class VNXLunExtendError(VNXLunError):
pass
class VNXLunExpandSizeError(VNXLunExtendError):
error_code = 0x712d8e04
class VNXLunPreparingError(VNXLunError):
error_code = 0x712d8e0e
class VNXLunNotFoundError(VNXLunError):
message = 'Could not retrieve the specified (pool lun).'
class VNXDeleteLunError(VNXLunError):
pass
class VNXCompressionError(VNXLunError):
pass
class VNXCompressionAlreadyEnabledError(VNXCompressionError):
message = 'Compression on the specified LUN is already turned on.'
class VNXConsistencyGroupError(VNXException):
pass
class VNXCreateConsistencyGroupError(VNXConsistencyGroupError):
pass
class VNXConsistencyGroupNameInUseError(VNXCreateConsistencyGroupError):
error_code = 0x716d8021
class VNXConsistencyGroupNotFoundError(VNXConsistencyGroupError):
message = 'Cannot find the consistency group'
class VNXPingNodeError(VNXException):
pass
class VNXMirrorException(VNXException):
pass
class VNXMirrorNameInUseError(VNXMirrorException):
message = 'Mirror name already in use'
class VNXMirrorPromotePrimaryError(VNXMirrorException):
message = 'Cannot remove or promote a primary image.'
class VNXMirrorNotFoundError(VNXMirrorException):
message = 'Mirror not found'

View File

@ -0,0 +1,76 @@
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.tests.unit.volume.drivers.emc.vnx import fake_enum
class VNXSystem(object):
pass
class VNXEnum(fake_enum.Enum):
pass
class VNXSPEnum(VNXEnum):
SP_A = 'SP A'
SP_B = 'SP B'
CONTROL_STATION = 'Celerra'
class VNXProvisionEnum(VNXEnum):
# value of spec "provisioning:type"
THIN = 'thin'
THICK = 'thick'
COMPRESSED = 'compressed'
DEDUPED = 'deduplicated'
class VNXMigrationRate(VNXEnum):
LOW = 'low'
MEDIUM = 'medium'
HIGH = 'high'
ASAP = 'asap'
class VNXTieringEnum(VNXEnum):
NONE = 'none'
HIGH_AUTO = 'starthighthenauto'
AUTO = 'auto'
HIGH = 'highestavailable'
LOW = 'lowestavailable'
NO_MOVE = 'nomovement'
class VNXMirrorViewRecoveryPolicy(VNXEnum):
MANUAL = 'manual'
AUTO = 'automatic'
class VNXMirrorViewSyncRate(VNXEnum):
HIGH = 'high'
MEDIUM = 'medium'
LOW = 'low'
class VNXMirrorImageState(VNXEnum):
SYNCHRONIZED = 'Synchronized'
OUT_OF_SYNC = 'Out-of-Sync'
SYNCHRONIZING = 'Synchronizing'
CONSISTENT = 'Consistent'
SCRAMBLED = 'Scrambled'
INCOMPLETE = 'Incomplete'
LOCAL_ONLY = 'Local Only'
EMPTY = 'Empty'

View File

@ -0,0 +1,442 @@
###########################################################
# Common
###########################################################
volume: &volume_base
_type: 'volume'
_properties: &volume_base_properties
status: 'creating'
size: 1
id:
_uuid: volume_id
provider_auth: 'None'
host: 'host@backendsec#unit_test_pool'
project_id:
_uuid: project_id
provider_location: &provider_location
_build_provider_location: &provider_location_dict
id: 1
type: 'lun'
system: 'fake_serial'
base_lun_name: 'test'
version: '07.00.00'
display_name: 'volume-1'
display_description: 'test volume'
volume_type_id:
consistencygroup_id:
volume_attachment:
_properties: {}
volume_metadata:
_properties: {}
host: &host_base
_properties:
host: 'host@backendsec#unit_test_pool'
consistency_group: &cg_base
_type: 'cg'
_properties: &cg_base_properties
id:
_uuid: consistency_group_id
status: 'creating'
name: 'cg_name'
host: 'host@backend#unit_test_pool'
consistency_group_with_type: &cg_base_with_type
_type: 'cg'
_properties:
<<: *cg_base_properties
volume_type_id: 'type1'
snapshot: &snapshot_base
_type: 'snapshot'
_properties: &snapshot_base_properties
id:
_uuid: snapshot_id
status: available
name: 'snapshot_name'
volume:
_type: 'volume'
_properties:
<<: *volume_base_properties
name: 'attached_volume_name'
volume_name: 'attached_volume_name'
cg_snapshot: &cg_snapshot_base
_type: 'cg_snapshot'
_properties: &cg_snapshot_base_properties
id:
_uuid: cgsnapshot_id
status: 'creating'
###########################################################
# TestCommonAdapter, TestISCSIAdapter, TestFCAdapter
###########################################################
test_mock_driver_input_inner:
volume: *volume_base
test_create_volume: &test_create_volume
volume: *volume_base
test_create_volume_error: *test_create_volume
test_create_thick_volume: *test_create_volume
test_migrate_volume:
volume: *volume_base
test_migrate_volume_host_assisted:
volume: *volume_base
test_delete_volume_not_force: &test_delete_volume_not_force
volume: *volume_base
test_delete_volume_force: *test_delete_volume_not_force
test_retype_need_migration_when_host_changed:
volume: *volume_base
host:
_properties:
host: 'host@backendsec#another_pool'
test_retype_need_migration_for_smp_volume:
volume:
_type: 'volume'
_properties:
<<: *volume_base_properties
provider_location:
_build_provider_location:
<<: *provider_location_dict
type: 'smp'
host: *host_base
test_retype_need_migration_when_provision_changed:
volume: *volume_base
host: *host_base
test_retype_not_need_migration_when_provision_changed:
volume: *volume_base
host: *host_base
test_retype_not_need_migration:
volume: *volume_base
host: *host_base
test_retype_need_migration:
volume:
_type: 'volume'
_properties:
<<: *volume_base_properties
volume_type_id:
_uuid: volume_type_id
host: *host_base
test_retype_lun_has_snap:
volume: *volume_base
host: *host_base
test_retype_turn_on_compression_change_tier:
volume: *volume_base
host: *host_base
test_retype_change_tier:
volume: *volume_base
host: *host_base
test_create_consistencygroup:
cg: *cg_base
test_delete_consistencygroup:
cg: *cg_base
test_delete_consistencygroup_with_volume:
cg: *cg_base
vol1: *volume_base
vol2: *volume_base
test_delete_consistencygroup_error:
cg: *cg_base
vol1: *volume_base
vol2: *volume_base
test_delete_consistencygroup_volume_error:
cg: *cg_base
vol1: *volume_base
vol2: *volume_base
test_extend_volume:
volume: *volume_base
test_create_snapshot_adapter:
snapshot: *snapshot_base
test_delete_snapshot_adapter:
snapshot: *snapshot_base
test_create_cgsnapshot: &cg_snap_and_snaps
cg_snap: *cg_snapshot_base
snap1: *snapshot_base
snap2: *snapshot_base
test_delete_cgsnapshot: *cg_snap_and_snaps
test_manage_existing_lun_no_exist:
volume: *volume_base
test_manage_existing_invalid_pool:
volume: *volume_base
test_manage_existing_get_size:
volume: *volume_base
test_manage_existing_type_mismatch:
volume:
_type: 'volume'
_properties:
<<: *volume_base_properties
volume_type_id:
_uuid: volume_type_id
test_manage_existing:
volume:
_type: 'volume'
_properties:
<<: *volume_base_properties
volume_type_id:
_uuid: volume_type_id
test_manage_existing_smp:
volume: *volume_base
test_create_cloned_volume:
volume: *volume_base
src_vref:
_type: volume
_properties:
<<: *volume_base_properties
id:
_uuid: volume2_id
size: 2
test_create_cloned_volume_snapcopy:
volume:
_type: volume
_properties:
<<: *volume_base_properties
src_vref:
_type: volume
_properties:
<<: *volume_base_properties
id:
_uuid: volume2_id
size: 2
test_create_volume_from_snapshot:
volume: *volume_base
snapshot: *snapshot_base
test_create_volume_from_snapshot_snapcopy:
volume: *volume_base
snapshot: *snapshot_base
test_get_base_lun_name:
volume: *volume_base
test_create_cg_from_cgsnapshot:
vol1:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: volume_id
vol2:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: volume2_id
cg: *cg_base
cg_snap: *cg_snapshot_base
snap1:
_type: 'snapshot'
_properties:
<<: *snapshot_base_properties
id:
_uuid: snapshot_id
snap2:
_type: 'snapshot'
_properties:
<<: *snapshot_base_properties
id:
_uuid: snapshot2_id
test_create_cloned_cg:
vol1:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: consistency_group_id
cg: *cg_base
src_cg:
_type: 'cg'
_properties:
<<: *cg_base_properties
id:
_uuid: consistency_group2_id
name: 'src_cg_name'
src_vol1:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: consistency_group2_id
test_assure_host_access:
volume: *volume_base
test_assure_host_access_without_auto_register_new_sg:
volume: *volume_base
test_assure_host_access_without_auto_register:
volume: *volume_base
test_auto_register_initiator:
volume: *volume_base
test_auto_register_initiator_no_white_list:
volume: *volume_base
test_remove_host_access:
volume: *volume_base
test_remove_host_access_sg_absent:
volume: *volume_base
test_remove_host_access_volume_not_in_sg:
volume: *volume_base
test_update_consistencygroup:
cg: *cg_base
volume_add:
<<: *volume_base
_properties:
<<: *volume_base_properties
provider_location:
_build_provider_location:
<<: *provider_location_dict
id: 1
volume_remove:
<<: *volume_base
_properties:
<<: *volume_base_properties
provider_location:
_build_provider_location:
<<: *provider_location_dict
id: 2
test_create_export_snapshot:
snapshot: *snapshot_base
test_remove_export_snapshot:
snapshot: *snapshot_base
test_initialize_connection_snapshot:
snapshot: *snapshot_base
test_terminate_connection_snapshot:
snapshot: *snapshot_base
test_setup_lun_replication:
vol1:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: volume_id
volume_type_id:
_uuid: volume_type_id
test_cleanup_replication:
vol1:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: volume2_id
volume_type_id:
_uuid: volume_type_id
test_failover_host:
vol1:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: volume3_id
volume_type_id:
_uuid: volume_type_id
test_failover_host_invalid_backend_id:
vol1:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: volume4_id
volume_type_id:
_uuid: volume_type_id
test_failover_host_failback:
vol1:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: volume5_id
volume_type_id:
_uuid: volume_type_id
test_get_pool_name:
volume: *volume_base
test_update_migrated_volume:
volume: *volume_base
new_volume: *volume_base
test_update_migrated_volume_smp:
volume: *volume_base
new_volume:
<<: *volume_base
_properties:
<<: *volume_base_properties
provider_location:
_build_provider_location:
<<: *provider_location_dict
type: smp
###########################################################
# TestUtils
###########################################################
test_validate_cg_type:
cg: *cg_base_with_type
###########################################################
# TestClient
###########################################################
test_get_lun_id:
volume: *volume_base
test_get_lun_id_without_provider_location:
volume:
<<: *volume_base
_properties:
<<: *volume_base_properties
provider_location:

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,441 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from cinder.tests.unit.consistencygroup import fake_cgsnapshot
from cinder.tests.unit.consistencygroup import fake_consistencygroup
from cinder.tests.unit import fake_constants
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception as lib_ex
from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
from cinder.tests.unit.volume.drivers.emc.vnx import utils
from cinder.volume.drivers.emc.vnx import adapter
from cinder.volume.drivers.emc.vnx import client
from cinder.volume.drivers.emc.vnx import common
from cinder.volume.drivers.emc.vnx import utils as vnx_utils
SYMBOL_TYPE = '_type'
SYMBOL_PROPERTIES = '_properties'
SYMBOL_METHODS = '_methods'
SYMBOL_SIDE_EFFECT = '_side_effect'
SYMBOL_RAISE = '_raise'
SYMBOL_CONTEXT = '_context'
UUID = '_uuid'
SYMBOL_ENUM = '_enum'
def _is_driver_object(obj_body):
return isinstance(obj_body, dict) and SYMBOL_PROPERTIES in obj_body
class DriverResourceMock(dict):
fake_func_mapping = {}
def __init__(self, yaml_file):
yaml_dict = utils.load_yaml(yaml_file)
if not isinstance(yaml_dict, dict):
return
for case_name, case_res in yaml_dict.items():
if not isinstance(case_res, dict):
continue
self[case_name] = {}
for obj_name, obj_body in case_res.items():
self[case_name][obj_name] = self._parse_driver_object(obj_body)
def _parse_driver_object(self, obj_body):
if isinstance(obj_body, dict):
obj_body = {k: self._parse_driver_object(v)
for k, v in obj_body.items()}
if _is_driver_object(obj_body):
return self._create_object(obj_body)
else:
return obj_body
elif isinstance(obj_body, list):
return map(self._parse_driver_object, obj_body)
else:
return obj_body
def _create_object(self, obj_body):
props = obj_body[SYMBOL_PROPERTIES]
for prop_name, prop_value in props.items():
if isinstance(prop_value, dict) and prop_value:
# get the first key as the convert function
func_name = list(prop_value.keys())[0]
if func_name.startswith('_'):
func = getattr(self, func_name)
props[prop_name] = func(prop_value[func_name])
if (SYMBOL_TYPE in obj_body and
obj_body[SYMBOL_TYPE] in self.fake_func_mapping):
return self.fake_func_mapping[obj_body[SYMBOL_TYPE]](**props)
else:
return props
@staticmethod
def _uuid(uuid_key):
uuid_key = uuid_key.upper()
return getattr(fake_constants, uuid_key)
def _fake_volume_wrapper(*args, **kwargs):
expected_attrs_key = {'volume_attachment': 'volume_attachment',
'volume_metadata': 'metadata'}
return fake_volume.fake_volume_obj(
None,
expected_attrs=[
v for (k, v) in expected_attrs_key.items() if k in kwargs],
**kwargs)
def _fake_cg_wrapper(*args, **kwargs):
return fake_consistencygroup.fake_consistencyobject_obj(None, **kwargs)
def _fake_snapshot_wrapper(*args, **kwargs):
return fake_snapshot.fake_snapshot_obj(None,
expected_attrs=(
['volume'] if 'volume' in kwargs
else None),
**kwargs)
def _fake_cg_snapshot_wrapper(*args, **kwargs):
return fake_cgsnapshot.fake_cgsnapshot_obj(None, **kwargs)
class EnumBuilder(object):
def __init__(self, enum_dict):
enum_dict = enum_dict[SYMBOL_ENUM]
for k, v in six.iteritems(enum_dict):
self.klazz = k
self.value = v
def __call__(self, *args, **kwargs):
return getattr(storops, self.klazz).parse(self.value)
class CinderResourceMock(DriverResourceMock):
# fake_func in the mapping should be like func(*args, **kwargs)
fake_func_mapping = {'volume': _fake_volume_wrapper,
'cg': _fake_cg_wrapper,
'snapshot': _fake_snapshot_wrapper,
'cg_snapshot': _fake_cg_snapshot_wrapper}
def __init__(self, yaml_file):
super(CinderResourceMock, self).__init__(yaml_file)
@staticmethod
def _build_provider_location(props):
return vnx_utils.build_provider_location(
props.get('system'), props.get('type'),
six.text_type(props.get('id')),
six.text_type(props.get('base_lun_name')),
props.get('version'))
class ContextMock(object):
"""Mocks the return value of a context function."""
def __enter__(self):
pass
def __exit__(self, exc_type, exc_valu, exc_tb):
pass
class MockBase(object):
"""Base object of all the Mocks.
This mock convert the dict to object when the '_type' is
included in the dict
"""
def _is_mock_object(self, yaml_info):
return (isinstance(yaml_info, dict) and
(SYMBOL_PROPERTIES in yaml_info or
SYMBOL_METHODS in yaml_info))
def _is_object_with_type(self, yaml_dict):
return isinstance(yaml_dict, dict) and SYMBOL_TYPE in yaml_dict
def _is_object_with_enum(self, yaml_dict):
return isinstance(yaml_dict, dict) and SYMBOL_ENUM in yaml_dict
def _build_mock_object(self, yaml_dict):
if self._is_object_with_type(yaml_dict):
return FakePort(yaml_dict)
elif self._is_object_with_enum(yaml_dict):
return EnumBuilder(yaml_dict)()
elif self._is_mock_object(yaml_dict):
return StorageObjectMock(yaml_dict)
elif isinstance(yaml_dict, dict):
return {k: self._build_mock_object(v)
for k, v in yaml_dict.items()}
elif isinstance(yaml_dict, list):
return [self._build_mock_object(each) for each in yaml_dict]
else:
return yaml_dict
class StorageObjectMock(object):
PROPS = 'props'
def __init__(self, yaml_dict):
self.__dict__[StorageObjectMock.PROPS] = {}
props = yaml_dict.get(SYMBOL_PROPERTIES, None)
if props:
for k, v in props.items():
setattr(self, k, StoragePropertyMock(k, v)())
methods = yaml_dict.get(SYMBOL_METHODS, None)
if methods:
for k, v in methods.items():
setattr(self, k, StorageMethodMock(k, v))
def __setattr__(self, key, value):
self.__dict__[StorageObjectMock.PROPS][key] = value
def __getattr__(self, item):
try:
super(StorageObjectMock, self).__getattr__(item)
except AttributeError:
return self.__dict__[StorageObjectMock.PROPS][item]
except KeyError:
raise KeyError('%(item)s not exist in mock object.'
) % {'item': item}
class FakePort(StorageObjectMock):
def __eq__(self, other):
o_sp = other.sp
o_port_id = other.port_id
o_vport_id = other.vport_id
ret = True
ret &= self.sp == o_sp
ret &= self.port_id == o_port_id
ret &= self.vport_id == o_vport_id
return ret
def __hash__(self):
return hash((self.sp, self.port_id, self.vport_id))
class StoragePropertyMock(mock.PropertyMock, MockBase):
def __init__(self, name, property_body):
return_value = property_body
side_effect = None
# only support return_value and side_effect for property
if (isinstance(property_body, dict) and
SYMBOL_SIDE_EFFECT in property_body):
side_effect = self._build_mock_object(
property_body[SYMBOL_SIDE_EFFECT])
return_value = None
if side_effect is not None:
super(StoragePropertyMock, self).__init__(
name=name,
side_effect=side_effect)
else:
return_value = self._build_mock_object(return_value)
super(StoragePropertyMock, self).__init__(
name=name,
return_value=return_value)
class StorageMethodMock(mock.Mock, MockBase):
def __init__(self, name, method_body):
return_value = method_body
exception = None
side_effect = None
# support return_value, side_effect and exception for method
if isinstance(method_body, dict):
if (SYMBOL_SIDE_EFFECT in method_body or
SYMBOL_RAISE in method_body):
exception = method_body.get(SYMBOL_RAISE, None)
side_effect = method_body.get(SYMBOL_SIDE_EFFECT, None)
return_value = None
if exception is not None:
ex = None
if isinstance(exception, dict) and exception:
ex_name = list(exception.keys())[0]
ex_tmp = [getattr(ex_module, ex_name, None)
for ex_module in [lib_ex, common]]
try:
ex = [each for each in ex_tmp if each is not None][0]
super(StorageMethodMock, self).__init__(
name=name,
side_effect=ex(exception[ex_name]))
except IndexError:
raise KeyError('Exception %(ex_name)s not found.'
% {'ex_name': ex_name})
else:
raise KeyError('Invalid Exception body, should be a dict.')
elif side_effect is not None:
super(StorageMethodMock, self).__init__(
name=name,
side_effect=self._build_mock_object(side_effect))
elif return_value is not None:
super(StorageMethodMock, self).__init__(
name=name,
return_value=(ContextMock() if return_value == SYMBOL_CONTEXT
else self._build_mock_object(return_value)))
else:
super(StorageMethodMock, self).__init__(
name=name, return_value=None)
class StorageResourceMock(dict, MockBase):
def __init__(self, yaml_file):
yaml_dict = utils.load_yaml(yaml_file)
if not isinstance(yaml_dict, dict):
return
for section, sec_body in yaml_dict.items():
if isinstance(sec_body, dict):
self[section] = {obj_name: self._build_mock_object(obj_body)
for obj_name, obj_body
in sec_body.items()}
else:
self[section] = {}
cinder_res = CinderResourceMock('mocked_cinder.yaml')
DRIVER_RES_MAPPING = {
'TestResMock': cinder_res,
'TestCommonAdapter': cinder_res,
'TestISCSIAdapter': cinder_res,
'TestFCAdapter': cinder_res,
'TestUtils': cinder_res,
'TestClient': cinder_res
}
def mock_driver_input(func):
@six.wraps(func)
def decorated(cls, *args, **kwargs):
return func(cls,
DRIVER_RES_MAPPING[cls.__class__.__name__][func.__name__],
*args, **kwargs)
return decorated
vnx_res = StorageResourceMock('mocked_vnx.yaml')
STORAGE_RES_MAPPING = {
'TestResMock': StorageResourceMock('test_res_mock.yaml'),
'TestCondition': vnx_res,
'TestClient': vnx_res,
'TestCommonAdapter': vnx_res,
'TestISCSIAdapter': vnx_res,
'TestFCAdapter': vnx_res,
'TestTaskflow': vnx_res,
'TestExtraSpecs': vnx_res,
}
DEFAULT_STORAGE_RES = 'vnx'
def _build_client():
return client.Client(ip='192.168.1.2',
username='sysadmin',
password='sysadmin',
scope='global',
naviseccli=None,
sec_file=None)
def patch_client(func):
@six.wraps(func)
@utils.patch_looping_call
def decorated(cls, *args, **kwargs):
storage_res = (
STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__])
with utils.patch_vnxsystem as patched_vnx:
if DEFAULT_STORAGE_RES in storage_res:
patched_vnx.return_value = storage_res[DEFAULT_STORAGE_RES]
client = _build_client()
return func(cls, client, storage_res, *args, **kwargs)
return decorated
PROTOCOL_COMMON = 'Common'
PROTOCOL_MAPPING = {
PROTOCOL_COMMON: adapter.CommonAdapter,
common.PROTOCOL_ISCSI: adapter.ISCSIAdapter,
common.PROTOCOL_FC: adapter.FCAdapter
}
def patch_adapter_init(protocol):
def inner_patch_adapter(func):
@six.wraps(func)
@utils.patch_looping_call
def decorated(cls, *args, **kwargs):
storage_res = (
STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__])
with utils.patch_vnxsystem as patched_vnx:
if DEFAULT_STORAGE_RES in storage_res:
patched_vnx.return_value = storage_res[DEFAULT_STORAGE_RES]
adapter = PROTOCOL_MAPPING[protocol](cls.configuration)
return func(cls, adapter, storage_res, *args, **kwargs)
return decorated
return inner_patch_adapter
def _patch_adapter_prop(adapter, client):
try:
adapter.serial_number = client.get_serial()
except KeyError:
adapter.serial_number = 'faked_serial_number'
def patch_adapter(protocol):
def inner_patch_adapter(func):
@six.wraps(func)
@utils.patch_looping_call
def decorated(cls, *args, **kwargs):
storage_res = (
STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__])
with utils.patch_vnxsystem:
client = _build_client()
adapter = PROTOCOL_MAPPING[protocol](cls.configuration, None)
if DEFAULT_STORAGE_RES in storage_res:
client.vnx = storage_res[DEFAULT_STORAGE_RES]
adapter.client = client
_patch_adapter_prop(adapter, client)
return func(cls, adapter, storage_res, *args, **kwargs)
return decorated
return inner_patch_adapter
patch_common_adapter = patch_adapter(PROTOCOL_COMMON)
patch_iscsi_adapter = patch_adapter(common.PROTOCOL_ISCSI)
patch_fc_adapter = patch_adapter(common.PROTOCOL_FC)
def mock_storage_resources(func):
@six.wraps(func)
def decorated(cls, *args, **kwargs):
storage_res = (
STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__])
return func(cls, storage_res, *args, **kwargs)
return decorated

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,463 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception \
as storops_ex
from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
from cinder.tests.unit.volume.drivers.emc.vnx import utils
from cinder.volume.drivers.emc.vnx import client as vnx_client
from cinder.volume.drivers.emc.vnx import common as vnx_common
class TestCondition(test.TestCase):
@res_mock.patch_client
def test_is_lun_io_ready_false(self, client, mocked):
r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
self.assertFalse(r)
@res_mock.patch_client
def test_is_lun_io_ready_true(self, client, mocked):
r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
self.assertTrue(r)
@res_mock.patch_client
def test_is_lun_io_ready_exception(self, client, mocked):
self.assertRaises(exception.VolumeBackendAPIException,
vnx_client.Condition.is_lun_io_ready,
mocked['lun'])
class TestClient(test.TestCase):
def setUp(self):
super(TestClient, self).setUp()
def tearDown(self):
super(TestClient, self).tearDown()
@res_mock.patch_client
def test_create_lun(self, client, mocked):
client.create_lun(pool='pool1', name='test', size=1, provision=None,
tier=None, cg_id=None, ignore_thresholds=False)
client.vnx.get_pool.assert_called_once_with(name='pool1')
pool = client.vnx.get_pool(name='pool1')
pool.create_lun.assert_called_with(lun_name='test',
size_gb=1,
provision=None,
tier=None,
ignore_thresholds=False)
@res_mock.patch_client
def test_create_lun_error(self, client, mocked):
self.assertRaises(storops_ex.VNXCreateLunError,
client.create_lun,
pool='pool1',
name='test',
size=1,
provision=None,
tier=None,
cg_id=None,
ignore_thresholds=False)
client.vnx.get_pool.assert_called_once_with(name='pool1')
@res_mock.patch_client
def test_create_lun_already_existed(self, client, mocked):
client.create_lun(pool='pool1', name='lun3', size=1, provision=None,
tier=None, cg_id=None, ignore_thresholds=False)
client.vnx.get_lun.assert_called_once_with(name='lun3')
@res_mock.patch_client
def test_create_lun_in_cg(self, client, mocked):
client.create_lun(
pool='pool1', name='test', size=1, provision=None,
tier=None, cg_id='cg1', ignore_thresholds=False)
@res_mock.patch_client
def test_create_lun_compression(self, client, mocked):
client.create_lun(pool='pool1', name='lun2', size=1,
provision=storops.VNXProvisionEnum.COMPRESSED,
tier=None, cg_id=None,
ignore_thresholds=False)
@res_mock.patch_client
def test_migrate_lun(self, client, mocked):
client.migrate_lun(src_id=1,
dst_id=2)
lun = client.vnx.get_lun()
lun.migrate.assert_called_with(2, storops.VNXMigrationRate.HIGH)
@utils.patch_sleep
@res_mock.patch_client
def test_migrate_lun_with_retry(self, client, mocked, mock_sleep):
lun = client.vnx.get_lun()
self.assertRaises(storops_ex.VNXTargetNotReadyError,
client.migrate_lun,
src_id=4,
dst_id=5)
lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
mock_sleep.assert_called_with(15)
@res_mock.patch_client
def test_session_finished_faulted(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertTrue(r)
@res_mock.patch_client
def test_session_finished_migrating(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertFalse(r)
@res_mock.patch_client
def test_session_finished_not_existed(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertTrue(r)
@utils.patch_sleep
@res_mock.patch_client
def test_migrate_lun_error(self, client, mocked, mock_sleep):
lun = client.vnx.get_lun()
self.assertRaises(storops_ex.VNXMigrationError,
client.migrate_lun,
src_id=4,
dst_id=5)
lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
mock_sleep.assert_not_called()
@res_mock.patch_client
def test_verify_migration(self, client, mocked):
r = client.verify_migration(1, 2, 'test_wwn')
self.assertTrue(r)
@res_mock.patch_client
def test_verify_migration_false(self, client, mocked):
r = client.verify_migration(1, 2, 'fake_wwn')
self.assertFalse(r)
@res_mock.patch_client
def test_cleanup_migration(self, client, mocked):
client.cleanup_migration(1, 2)
@res_mock.patch_client
def test_get_lun_by_name(self, client, mocked):
lun = client.get_lun(name='lun_name_test_get_lun_by_name')
self.assertEqual(888, lun.lun_id)
@res_mock.patch_client
def test_delete_lun(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_smp(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_lun_not_exist(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_lun_exception(self, client, mocked):
self.assertRaisesRegexp(storops_ex.VNXDeleteLunError,
'General lun delete error.',
client.delete_lun, mocked['lun'].name)
@res_mock.patch_client
def test_enable_compression(self, client, mocked):
lun_obj = mocked['lun']
client.enable_compression(lun_obj)
lun_obj.enable_compression.assert_called_with(ignore_thresholds=True)
@res_mock.patch_client
def test_enable_compression_on_compressed_lun(self, client, mocked):
lun_obj = mocked['lun']
client.enable_compression(lun_obj)
@res_mock.patch_client
def test_get_vnx_enabler_status(self, client, mocked):
re = client.get_vnx_enabler_status()
self.assertTrue(re.dedup_enabled)
self.assertFalse(re.compression_enabled)
self.assertTrue(re.thin_enabled)
self.assertFalse(re.fast_enabled)
self.assertTrue(re.snap_enabled)
@res_mock.patch_client
def test_lun_has_snapshot_true(self, client, mocked):
re = client.lun_has_snapshot(mocked['lun'])
self.assertTrue(re)
@res_mock.patch_client
def test_lun_has_snapshot_false(self, client, mocked):
re = client.lun_has_snapshot(mocked['lun'])
self.assertFalse(re)
@res_mock.patch_client
def test_create_cg(self, client, mocked):
cg = client.create_consistency_group('cg_name')
self.assertIsNotNone(cg)
@res_mock.patch_client
def test_create_cg_already_existed(self, client, mocked):
cg = client.create_consistency_group('cg_name_already_existed')
self.assertIsNotNone(cg)
@res_mock.patch_client
def test_delete_cg(self, client, mocked):
client.delete_consistency_group('deleted_name')
@res_mock.patch_client
def test_delete_cg_not_existed(self, client, mocked):
client.delete_consistency_group('not_existed')
@res_mock.patch_client
def test_expand_lun(self, client, _ignore):
client.expand_lun('lun', 10, poll=True)
@res_mock.patch_client
def test_expand_lun_not_poll(self, client, _ignore):
client.expand_lun('lun', 10, poll=False)
@res_mock.patch_client
def test_expand_lun_already_expanded(self, client, _ignore):
client.expand_lun('lun', 10)
@utils.patch_no_sleep
@res_mock.patch_client
def test_expand_lun_not_ops_ready(self, client, _ignore):
self.assertRaises(storops_ex.VNXLunPreparingError,
client.expand_lun, 'lun', 10)
lun = client.vnx.get_lun()
lun.expand.assert_called_once_with(10, ignore_thresholds=True)
# Called twice
lun.expand.assert_called_once_with(10, ignore_thresholds=True)
@res_mock.patch_client
def test_create_snapshot(self, client, _ignore):
client.create_snapshot('lun_test_create_snapshot',
'snap_test_create_snapshot')
lun = client.vnx.get_lun()
lun.create_snap.assert_called_once_with('snap_test_create_snapshot',
allow_rw=True,
auto_delete=False)
@res_mock.patch_client
def test_create_snapshot_snap_name_exist_error(self, client, _ignore):
client.create_snapshot('lun_name', 'snapshot_name')
@res_mock.patch_client
def test_delete_snapshot(self, client, _ignore):
client.delete_snapshot('snapshot_name')
@res_mock.patch_client
def test_delete_snapshot_delete_attached_error(self, client, _ignore):
self.assertRaises(storops_ex.VNXDeleteAttachedSnapError,
client.delete_snapshot, 'snapshot_name')
@res_mock.patch_client
def test_copy_snapshot(self, client, mocked):
client.copy_snapshot('old_name', 'new_name')
@res_mock.patch_client
def test_create_mount_point(self, client, mocked):
client.create_mount_point('lun_name', 'smp_name')
@res_mock.patch_client
def test_attach_mount_point(self, client, mocked):
client.attach_snapshot('smp_name', 'snap_name')
@res_mock.patch_client
def test_detach_mount_point(self, client, mocked):
client.detach_snapshot('smp_name')
@res_mock.patch_client
def test_modify_snapshot(self, client, mocked):
client.modify_snapshot('snap_name', True, True)
@utils.patch_no_sleep
@res_mock.patch_client
def test_create_cg_snapshot(self, client, mocked):
snap = client.create_cg_snapshot('cg_snap_name', 'cg_name')
self.assertIsNotNone(snap)
@utils.patch_no_sleep
@res_mock.patch_client
def test_create_cg_snapshot_already_existed(self, client, mocked):
snap = client.create_cg_snapshot('cg_snap_name', 'cg_name')
self.assertIsNotNone(snap)
@utils.patch_no_sleep
@res_mock.patch_client
def test_delete_cg_snapshot(self, client, mocked):
client.delete_cg_snapshot(cg_snap_name='test_snap')
@res_mock.patch_client
def test_create_sg(self, client, mocked):
client.create_storage_group('sg_name')
@res_mock.patch_client
def test_create_sg_name_in_use(self, client, mocked):
self.assertRaisesRegexp(storops_ex.VNXStorageGroupNameInUseError,
'Storage group sg_name already exists. '
'Message: ',
client.create_storage_group('sg_name'))
@res_mock.patch_client
def test_get_storage_group(self, client, mocked):
sg = client.get_storage_group('sg_name')
self.assertEqual('sg_name', sg.name)
@res_mock.patch_client
def test_register_initiator(self, client, mocked):
host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip')
client.register_initiator(mocked['sg'], host,
{'host_initiator': 'port_1'})
@res_mock.patch_client
def test_register_initiator_exception(self, client, mocked):
host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip')
client.register_initiator(mocked['sg'], host,
{'host_initiator': 'port_1'})
@res_mock.patch_client
def test_ping_node(self, client, mocked):
self.assertTrue(client.ping_node(mocked['iscsi_port'], 'ip'))
@res_mock.patch_client
def test_ping_node_fail(self, client, mocked):
self.assertFalse(client.ping_node(mocked['iscsi_port'], 'ip'))
@res_mock.patch_client
def test_add_lun_to_sg(self, client, mocked):
lun = 'not_care'
self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3))
@res_mock.patch_client
def test_add_lun_to_sg_alu_already_attached(self, client, mocked):
lun = 'not_care'
self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3))
@res_mock.patch_client
def test_add_lun_to_sg_alu_in_use(self, client, mocked):
self.assertRaisesRegexp(storops_ex.VNXNoHluAvailableError,
'No HLU available.',
client.add_lun_to_sg,
mocked['sg'],
mocked['lun'],
3)
@res_mock.patch_client
def test_update_consistencygroup_no_lun_in_cg(self, client, mocked):
lun_1 = mocked['lun_1']
lun_2 = mocked['lun_2']
def _get_lun(lun_id):
return list(filter(
lambda x: x.lun_id == lun_id, (lun_1, lun_2)))[0]
client.get_lun = _get_lun
cg = mocked['cg']
client.update_consistencygroup(cg, [lun_1.lun_id, lun_2.lun_id], [])
cg.replace_member.assert_called_once_with(lun_1, lun_2)
@res_mock.patch_client
def test_update_consistencygroup_lun_in_cg(self, client, mocked):
lun_1 = mocked['lun_1']
lun_2 = mocked['lun_2']
def _get_lun(lun_id):
return list(filter(
lambda x: x.lun_id == lun_id, (lun_1, lun_2)))[0]
client.get_lun = _get_lun
cg = mocked['cg']
client.update_consistencygroup(cg, [lun_2.lun_id], [lun_1.lun_id])
cg.replace_member.assert_called_once_with(lun_2)
@res_mock.patch_client
def test_update_consistencygroup_remove_all(self, client, mocked):
lun_1 = mocked['lun_1']
def _get_lun(lun_id):
return list(filter(lambda x: x.lun_id == lun_id, (lun_1,)))[0]
client.get_lun = _get_lun
cg = mocked['cg']
client.update_consistencygroup(cg, [], [lun_1.lun_id])
cg.delete_member.assert_called_once_with(lun_1)
@res_mock.patch_client
def test_get_available_ip(self, client, mocked):
ip = client.get_available_ip()
self.assertEqual('192.168.1.5', ip)
@res_mock.patch_client
def test_create_mirror(self, client, mocked):
mv = client.create_mirror('test_mirror_name', 11)
self.assertIsNotNone(mv)
@res_mock.patch_client
def test_create_mirror_already_created(self, client, mocked):
mv = client.create_mirror('error_mirror', 12)
self.assertIsNotNone(mv)
@res_mock.patch_client
def test_delete_mirror(self, client, mocked):
client.delete_mirror('mirror_name')
@res_mock.patch_client
def test_delete_mirror_already_deleted(self, client, mocked):
client.delete_mirror('mirror_name_deleted')
@res_mock.patch_client
def test_add_image(self, client, mocked):
client.add_image('mirror_namex', '192.168.1.11', 31)
@res_mock.patch_client
def test_remove_image(self, client, mocked):
client.remove_image('mirror_remove')
@res_mock.patch_client
def test_fracture_image(self, client, mocked):
client.fracture_image('mirror_fracture')
@res_mock.patch_client
def test_sync_image(self, client, mocked):
client.sync_image('mirror_sync')
@res_mock.patch_client
def test_promote_image(self, client, mocked):
client.promote_image('mirror_promote')
@res_mock.mock_driver_input
@res_mock.patch_client
def test_get_lun_id(self, client, mocked, cinder_input):
lun_id = client.get_lun_id(cinder_input['volume'])
self.assertEqual(1, lun_id)
@res_mock.mock_driver_input
@res_mock.patch_client
def test_get_lun_id_without_provider_location(self, client, mocked,
cinder_input):
lun_id = client.get_lun_id(cinder_input['volume'])
self.assertIsInstance(lun_id, int)
self.assertEqual(mocked['lun'].lun_id, lun_id)

View File

@ -0,0 +1,297 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
from cinder.volume.drivers.emc.vnx import client
from cinder.volume.drivers.emc.vnx import common
class TestExtraSpecs(test.TestCase):
def test_valid_extra_spec(self):
extra_spec = {
'provisioning:type': 'deduplicated',
'storagetype:tiering': 'nomovement',
}
spec_obj = common.ExtraSpecs(extra_spec)
self.assertEqual(storops.VNXProvisionEnum.DEDUPED,
spec_obj.provision)
self.assertEqual(storops.VNXTieringEnum.NO_MOVE,
spec_obj.tier)
def test_extra_spec_case_insensitive(self):
extra_spec = {
'provisioning:type': 'Thin',
'storagetype:tiering': 'StartHighThenAuto',
}
spec_obj = common.ExtraSpecs(extra_spec)
self.assertEqual(storops.VNXProvisionEnum.THIN,
spec_obj.provision)
self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO,
spec_obj.tier)
def test_empty_extra_spec(self):
extra_spec = {}
common.ExtraSpecs.set_defaults(storops.VNXProvisionEnum.THICK,
storops.VNXTieringEnum.HIGH_AUTO)
spec_obj = common.ExtraSpecs(extra_spec)
self.assertEqual(storops.VNXProvisionEnum.THICK, spec_obj.provision)
self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, spec_obj.tier)
def test_invalid_provision(self):
extra_spec = {
'provisioning:type': 'invalid',
}
self.assertRaises(exception.InvalidVolumeType,
common.ExtraSpecs,
extra_spec)
def test_invalid_tiering(self):
extra_spec = {
'storagetype:tiering': 'invalid',
}
self.assertRaises(exception.InvalidVolumeType,
common.ExtraSpecs,
extra_spec)
def test_validate_extra_spec_dedup_and_tier_failed(self):
spec_obj = common.ExtraSpecs({
'storagetype:pool': 'fake_pool',
'provisioning:type': 'deduplicated',
'storagetype:tiering': 'auto',
})
enabler_status = common.VNXEnablerStatus(
dedup=True, fast=True, thin=True)
self.assertRaises(exception.InvalidVolumeType,
spec_obj.validate,
enabler_status)
def test_tier_is_not_set_to_default_for_dedup_provision(self):
common.ExtraSpecs.set_defaults(storops.VNXProvisionEnum.THICK,
storops.VNXTieringEnum.HIGH_AUTO)
spec_obj = common.ExtraSpecs({'provisioning:type': 'deduplicated'})
self.assertEqual(storops.VNXProvisionEnum.DEDUPED, spec_obj.provision)
self.assertIsNone(spec_obj.tier)
def test_validate_extra_spec_is_valid(self):
spec_obj = common.ExtraSpecs({
'storagetype:pool': 'fake_pool',
'provisioning:type': 'thin',
'storagetype:tiering': 'auto',
})
enabler_status = common.VNXEnablerStatus(
dedup=True, fast=True, thin=True)
re = spec_obj.validate(enabler_status)
self.assertTrue(re)
def test_validate_extra_spec_dedup_invalid(self):
spec_obj = common.ExtraSpecs({
'provisioning:type': 'deduplicated',
})
enabler_status = common.VNXEnablerStatus(dedup=False)
self.assertRaises(exception.InvalidVolumeType,
spec_obj.validate,
enabler_status)
def test_validate_extra_spec_compress_invalid(self):
spec_obj = common.ExtraSpecs({
'provisioning:type': 'compressed',
})
enabler_status = common.VNXEnablerStatus(compression=False)
self.assertRaises(exception.InvalidVolumeType,
spec_obj.validate,
enabler_status)
def test_validate_extra_spec_no_thin_invalid(self):
spec_obj = common.ExtraSpecs({
'provisioning:type': 'compressed',
})
enabler_status = common.VNXEnablerStatus(compression=True, thin=False)
self.assertRaises(exception.InvalidVolumeType,
spec_obj.validate,
enabler_status)
def test_validate_extra_spec_tier_invalid(self):
spec_obj = common.ExtraSpecs({
'storagetype:tiering': 'auto',
})
enabler_status = common.VNXEnablerStatus(
dedup=True, fast=False, compression=True, snap=True, thin=True)
self.assertRaises(exception.InvalidVolumeType,
spec_obj.validate,
enabler_status)
def test_get_raw_data(self):
spec_obj = common.ExtraSpecs({'key1': 'value1'})
self.assertTrue('key1' in spec_obj)
self.assertFalse('key2' in spec_obj)
self.assertEqual('value1', spec_obj['key1'])
@res_mock.mock_storage_resources
def test_generate_extra_specs_from_lun(self, mocked_res):
lun = mocked_res['lun']
spec = common.ExtraSpecs.from_lun(lun)
self.assertEqual(storops.VNXProvisionEnum.COMPRESSED, spec.provision)
self.assertEqual(storops.VNXTieringEnum.HIGH, spec.tier)
lun = mocked_res['deduped_lun']
spec = common.ExtraSpecs.from_lun(lun)
self.assertEqual(storops.VNXProvisionEnum.DEDUPED, spec.provision)
self.assertIsNone(spec.tier)
@res_mock.mock_storage_resources
def test_extra_specs_match_with_lun(self, mocked_res):
lun = mocked_res['lun']
spec_obj = common.ExtraSpecs({
'provisioning:type': 'thin',
'storagetype:tiering': 'nomovement',
})
self.assertTrue(spec_obj.match_with_lun(lun))
lun = mocked_res['deduped_lun']
spec_obj = common.ExtraSpecs({
'provisioning:type': 'deduplicated',
})
self.assertTrue(spec_obj.match_with_lun(lun))
@res_mock.mock_storage_resources
def test_extra_specs_not_match_with_lun(self, mocked_res):
lun = mocked_res['lun']
spec_obj = common.ExtraSpecs({
'provisioning:type': 'thick',
'storagetype:tiering': 'nomovement',
})
self.assertFalse(spec_obj.match_with_lun(lun))
class FakeConfiguration(object):
def __init__(self):
self.replication_device = []
class TestReplicationDeviceList(test.TestCase):
def setUp(self):
super(TestReplicationDeviceList, self).setUp()
self.configuration = FakeConfiguration()
replication_devices = []
device = {'backend_id': 'array_id_1',
'san_ip': '192.168.1.1',
'san_login': 'admin',
'san_password': 'admin',
'storage_vnx_authentication_type': 'global',
'storage_vnx_security_file_dir': '/home/stack/'}
replication_devices.append(device)
self.configuration.replication_device = replication_devices
def test_get_device(self):
devices_list = common.ReplicationDeviceList(self.configuration)
device = devices_list.get_device('array_id_1')
self.assertIsNotNone(device)
self.assertEqual('192.168.1.1', device.san_ip)
self.assertEqual('admin', device.san_login)
self.assertEqual('admin', device.san_password)
self.assertEqual('global', device.storage_vnx_authentication_type)
self.assertEqual('/home/stack/', device.storage_vnx_security_file_dir)
def test_get_device_not_found(self):
devices_list = common.ReplicationDeviceList(self.configuration)
device = devices_list.get_device('array_id_not_existed')
self.assertIsNone(device)
def test_devices(self):
devices_list = common.ReplicationDeviceList(self.configuration)
self.assertEqual(1, len(devices_list.devices))
self.assertEqual(1, len(devices_list))
self.assertIsNotNone(devices_list[0])
class TestVNXMirrorView(test.TestCase):
def setUp(self):
super(TestVNXMirrorView, self).setUp()
self.primary_client = mock.create_autospec(client.Client)
self.secondary_client = mock.create_autospec(client.Client)
self.mirror_view = common.VNXMirrorView(
self.primary_client, self.secondary_client)
def test_create_mirror(self):
self.mirror_view.create_mirror('mirror_test', 11)
self.primary_client.create_mirror.assert_called_once_with(
'mirror_test', 11)
def test_create_secondary_lun(self):
self.mirror_view.create_secondary_lun('pool_name', 'lun_name',
10, 'thick', 'auto')
self.secondary_client.create_lun.assert_called_once_with(
'pool_name', 'lun_name', 10, 'thick', 'auto')
def test_delete_secondary_lun(self):
self.mirror_view.delete_secondary_lun('lun_name')
self.secondary_client.delete_lun.assert_called_once_with('lun_name')
def test_delete_mirror(self):
self.mirror_view.delete_mirror('mirror_name')
self.primary_client.delete_mirror.assert_called_once_with(
'mirror_name')
def test_add_image(self):
self.secondary_client.get_available_ip.return_value = '192.168.1.2'
self.mirror_view.add_image('mirror_name', 111)
self.secondary_client.get_available_ip.assert_called_once_with()
self.primary_client.add_image.assert_called_once_with(
'mirror_name', '192.168.1.2', 111)
def test_remove_image(self):
self.mirror_view.remove_image('mirror_remove')
self.primary_client.remove_image.assert_called_once_with(
'mirror_remove')
def test_fracture_image(self):
self.mirror_view.fracture_image('mirror_fracture')
self.primary_client.fracture_image.assert_called_once_with(
'mirror_fracture')
def test_promote_image(self):
self.mirror_view.promote_image('mirror_promote')
self.secondary_client.promote_image.assert_called_once_with(
'mirror_promote')
def test_destroy_mirror(self):
mv = mock.Mock()
mv.existed = True
self.primary_client.get_mirror.return_value = mv
self.mirror_view.destroy_mirror('mirror_name', 'sec_lun_name')
self.primary_client.get_mirror.assert_called_once_with(
'mirror_name')
self.primary_client.fracture_image.assert_called_once_with(
'mirror_name')
self.primary_client.remove_image.assert_called_once_with(
'mirror_name')
self.primary_client.delete_mirror.assert_called_once_with(
'mirror_name')
self.secondary_client.delete_lun.assert_called_once_with(
'sec_lun_name')
def test_destroy_mirror_not_existed(self):
mv = mock.Mock()
mv.existed = False
self.primary_client.get_mirror.return_value = mv
self.mirror_view.destroy_mirror('mirror_name', 'sec_lun_name')
self.primary_client.get_mirror.assert_called_once_with(
'mirror_name')
self.assertFalse(self.primary_client.fracture_image.called)

View File

@ -0,0 +1,71 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.emc.vnx import driver
class TestEMCVNXDriver(test.TestCase):
def setUp(self):
super(TestEMCVNXDriver, self).setUp()
self.configuration = conf.Configuration(None)
self.fc_adapter_patcher = mock.patch(
'cinder.volume.drivers.emc.vnx.adapter.FCAdapter',
autospec=True)
self.fc_adapter_patcher.start()
self.iscsi_adapter_patcher = mock.patch(
'cinder.volume.drivers.emc.vnx.adapter.ISCSIAdapter',
autospec=True)
self.iscsi_adapter_patcher.start()
self.driver = None
self.addCleanup(self.fc_adapter_patcher.stop)
self.addCleanup(self.iscsi_adapter_patcher.stop)
def _get_driver(self, protocol):
self.configuration.storage_protocol = protocol
drv = driver.EMCVNXDriver(configuration=self.configuration,
active_backend_id=None)
drv.do_setup(None)
return drv
def test_init_iscsi_driver(self):
_driver = self._get_driver('iscsi')
driver_name = str(_driver.adapter)
self.assertIn('ISCSIAdapter', driver_name)
def test_init_fc_driver(self):
_driver = self._get_driver('FC')
driver_name = str(_driver.adapter)
self.assertIn('FCAdapter', driver_name)
def test_create_volume(self):
_driver = self._get_driver('iscsi')
_driver.create_volume('fake_volume')
_driver.adapter.create_volume.assert_called_once_with('fake_volume')
def test_initialize_connection(self):
_driver = self._get_driver('iscsi')
_driver.initialize_connection('fake_volume', {'host': 'fake_host'})
_driver.adapter.initialize_connection.assert_called_once_with(
'fake_volume', {'host': 'fake_host'})
def test_terminate_connection(self):
_driver = self._get_driver('iscsi')
_driver.terminate_connection('fake_volume', {'host': 'fake_host'})
_driver.adapter.terminate_connection.assert_called_once_with(
'fake_volume', {'host': 'fake_host'})

View File

@ -0,0 +1,90 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import test
from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
from cinder.volume import configuration as conf
from cinder.volume.drivers.emc.vnx import utils
class TestResMock(test.TestCase):
def setUp(self):
super(TestResMock, self).setUp()
def tearDown(self):
super(TestResMock, self).tearDown()
def test_load_cinder_resource(self):
cinder_res = res_mock.CinderResourceMock('mocked_cinder.yaml')
volume = cinder_res['test_mock_driver_input_inner']['volume']
items = ['base_lun_name^test', 'version^07.00.00', 'type^lun',
'system^fake_serial', 'id^1']
self.assertEqual(sorted(items),
sorted(volume.provider_location.split('|')))
def test_mock_driver_input(self):
@res_mock.mock_driver_input
def test_mock_driver_input_inner(self, mocked_input):
items = ['base_lun_name^test', 'version^07.00.00', 'type^lun',
'system^fake_serial', 'id^1']
mocked_items = mocked_input['volume'].provider_location.split('|')
self.assertEqual(sorted(items),
sorted(mocked_items))
test_mock_driver_input_inner(self)
def test_load_storage_resource(self):
vnx_res = res_mock.StorageResourceMock('test_res_mock.yaml')
lun = vnx_res['test_load_storage_resource']['lun']
pool = vnx_res['test_load_storage_resource']['pool']
created_lun = pool.create_lun()
self.assertEqual(lun.lun_id, created_lun.lun_id)
self.assertEqual(lun.poll, created_lun.poll)
self.assertEqual(lun.state, created_lun.state)
def test_patch_client(self):
@res_mock.patch_client
def test_patch_client_inner(self, patched_client, mocked):
vnx = patched_client.vnx
self.assertEqual('fake_serial', vnx.serial)
pool = vnx.get_pool()
self.assertEqual('pool_name', pool.name)
test_patch_client_inner(self)
def test_patch_client_mocked(self):
@res_mock.patch_client
def test_patch_client_mocked_inner(self, patched_client, mocked):
lun = mocked['lun']
self.assertEqual('Offline', lun.state)
test_patch_client_mocked_inner(self)
def test_patch_adapter_common(self):
self.configuration = conf.Configuration(None)
utils.init_ops(self.configuration)
self.configuration.san_ip = '192.168.1.1'
self.configuration.storage_vnx_authentication_type = 'global'
self.configuration.storage_vnx_pool_names = 'pool1,unit_test_pool'
@res_mock.patch_common_adapter
def test_patch_common_adapter_inner(self, patched_adapter, mocked):
pool = patched_adapter.client.vnx.get_pool()
self.assertEqual('pool_name', pool.name)
test_patch_common_adapter_inner(self)

View File

@ -0,0 +1,59 @@
#################################################
# Storage resource
#################################################
# Common
lun_base:
_properties: &lun_base_prop
lun_id: lun_id
poll: False
operation: None
state: Ready
pool_base:
_properties: &pool_base_prop
name: pool_name
pool_id: 0
state: Ready
user_capacity_gbs: 1311
total_subscribed_capacity_gbs: 131
available_capacity_gbs: 132
percent_full_threshold: 70
fast_cache: True
vnx_base:
_properties: &vnx_base_prop
serial: fake_serial
test_load_storage_resource: &test_load_storage_resource
lun: &lun1
_properties:
<<: *lun_base_prop
state: Offline
_methods:
update:
pool: &pool1
_properties:
<<: *pool_base_prop
_methods:
create_lun: *lun1
vnx:
_properties:
<<: *vnx_base_prop
_methods:
get_pool: *pool1
test_patch_client_inner: *test_load_storage_resource
test_patch_client_mocked_inner: *test_load_storage_resource
test_patch_common_adapter_inner: *test_load_storage_resource
test_property_side_effect_inner:
lun:
_properties:
<<: *lun_base_prop
total_capacity_gb:
_side_effect: [5, 10]

View File

@ -0,0 +1,181 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure
from cinder import test
from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception as vnx_ex
from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
import cinder.volume.drivers.emc.vnx.taskflows as vnx_taskflow
class TestTaskflow(test.TestCase):
def setUp(self):
super(TestTaskflow, self).setUp()
self.work_flow = linear_flow.Flow('test_task')
@res_mock.patch_client
def test_copy_snapshot_task(self, client, mocked):
store_spec = {'client': client,
'snap_name': 'original_name',
'new_snap_name': 'new_name'
}
self.work_flow.add(vnx_taskflow.CopySnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_copy_snapshot_task_revert(self, client, mocked):
store_spec = {'client': client,
'snap_name': 'original_name',
'new_snap_name': 'new_name'
}
self.work_flow.add(vnx_taskflow.CopySnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXSnapError,
engine.run)
@res_mock.patch_client
def test_create_smp_task(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'base_lun_name': 'base_name'
}
self.work_flow.add(vnx_taskflow.CreateSMPTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
smp_id = engine.storage.fetch('smp_id')
self.assertEqual(15, smp_id)
@res_mock.patch_client
def test_create_smp_task_revert(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'base_lun_name': 'base_name'
}
self.work_flow.add(vnx_taskflow.CreateSMPTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXCreateLunError,
engine.run)
smp_id = engine.storage.fetch('smp_id')
self.assertIsInstance(smp_id, failure.Failure)
@res_mock.patch_client
def test_attach_snap_task(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AttachSnapTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_attach_snap_task_revert(self, client, mocked):
store_spec = {
'client': client,
'smp_name': 'mount_point_name',
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AttachSnapTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXAttachSnapError,
engine.run)
@res_mock.patch_client
def test_create_snapshot_task(self, client, mocked):
store_spec = {
'client': client,
'lun_id': 12,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.CreateSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_create_snapshot_task_revert(self, client, mocked):
store_spec = {
'client': client,
'lun_id': 13,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.CreateSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXCreateSnapError,
engine.run)
@res_mock.patch_client
def test_allow_read_write_task(self, client, mocked):
store_spec = {
'client': client,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
@res_mock.patch_client
def test_allow_read_write_task_revert(self, client, mocked):
store_spec = {
'client': client,
'snap_name': 'snap_name'
}
self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXSnapError,
engine.run)
@res_mock.patch_client
def test_create_cg_snapshot_task(self, client, mocked):
store_spec = {
'client': client,
'cg_name': 'test_cg',
'cg_snap_name': 'my_snap_name'
}
self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
engine.run()
snap_name = engine.storage.fetch('new_cg_snap_name')
self.assertTrue(isinstance(snap_name, res_mock.StorageObjectMock))
@res_mock.patch_client
def test_create_cg_snapshot_task_revert(self, client, mocked):
store_spec = {
'client': client,
'cg_name': 'test_cg',
'cg_snap_name': 'my_snap_name'
}
self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask())
engine = taskflow.engines.load(self.work_flow,
store=store_spec)
self.assertRaises(vnx_ex.VNXCreateSnapError,
engine.run)

View File

@ -0,0 +1,177 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.emc.vnx import fake_exception \
as storops_ex
from cinder.tests.unit.volume.drivers.emc.vnx import fake_storops as storops
from cinder.tests.unit.volume.drivers.emc.vnx import res_mock
from cinder.tests.unit.volume.drivers.emc.vnx import utils as ut_utils
from cinder.volume.drivers.emc.vnx import common
from cinder.volume.drivers.emc.vnx import utils
class TestUtils(test.TestCase):
def setUp(self):
super(TestUtils, self).setUp()
self.origin_timeout = common.DEFAULT_TIMEOUT
common.DEFAULT_TIMEOUT = 0.05
def tearDown(self):
super(TestUtils, self).tearDown()
common.DEFAULT_TIMEOUT = self.origin_timeout
@ut_utils.patch_looping_call
def test_wait_until(self):
mock_testmethod = mock.Mock(side_effect=[False, True])
utils.wait_until(mock_testmethod)
mock_testmethod.assert_has_calls([mock.call(), mock.call()])
@ut_utils.patch_looping_call
def test_wait_until_with_exception(self):
mock_testmethod = mock.Mock(side_effect=[
False, storops_ex.VNXAttachSnapError('Unknown error')])
mock_testmethod.__name__ = 'test_method'
self.assertRaises(storops_ex.VNXAttachSnapError,
utils.wait_until,
mock_testmethod,
timeout=20,
reraise_arbiter=(
lambda ex: not isinstance(
ex, storops_ex.VNXCreateLunError)))
mock_testmethod.assert_has_calls([mock.call(), mock.call()])
@ut_utils.patch_looping_call
def test_wait_until_with_params(self):
mock_testmethod = mock.Mock(side_effect=[False, True])
mock_testmethod.__name__ = 'test_method'
utils.wait_until(mock_testmethod,
param1=1,
param2='test')
mock_testmethod.assert_has_calls(
[mock.call(param1=1, param2='test'),
mock.call(param1=1, param2='test')])
@res_mock.mock_driver_input
def test_retype_need_migration_when_host_changed(self, driver_in):
volume = driver_in['volume']
another_host = driver_in['host']
re = utils.retype_need_migration(
volume, None, None, another_host)
self.assertTrue(re)
@res_mock.mock_driver_input
def test_retype_need_migration_for_smp_volume(self, driver_in):
volume = driver_in['volume']
host = driver_in['host']
re = utils.retype_need_migration(
volume, None, None, host)
self.assertTrue(re)
@res_mock.mock_driver_input
def test_retype_need_migration_when_provision_changed(
self, driver_in):
volume = driver_in['volume']
host = driver_in['host']
old_spec = common.ExtraSpecs({'provisioning:type': 'thin'})
new_spec = common.ExtraSpecs({'provisioning:type': 'deduplicated'})
re = utils.retype_need_migration(
volume, old_spec.provision, new_spec.provision, host)
self.assertTrue(re)
@res_mock.mock_driver_input
def test_retype_not_need_migration_when_provision_changed(
self, driver_in):
volume = driver_in['volume']
host = driver_in['host']
old_spec = common.ExtraSpecs({'provisioning:type': 'thick'})
new_spec = common.ExtraSpecs({'provisioning:type': 'compressed'})
re = utils.retype_need_migration(
volume, old_spec.provision, new_spec.provision, host)
self.assertFalse(re)
@res_mock.mock_driver_input
def test_retype_not_need_migration(self, driver_in):
volume = driver_in['volume']
host = driver_in['host']
old_spec = common.ExtraSpecs({'storagetype:tiering': 'auto'})
new_spec = common.ExtraSpecs(
{'storagetype:tiering': 'starthighthenauto'})
re = utils.retype_need_migration(
volume, old_spec.provision, new_spec.provision, host)
self.assertFalse(re)
def test_retype_need_change_tier(self):
re = utils.retype_need_change_tier(
storops.VNXTieringEnum.AUTO, storops.VNXTieringEnum.HIGH_AUTO)
self.assertTrue(re)
def test_retype_need_turn_on_compression(self):
re = utils.retype_need_turn_on_compression(
storops.VNXProvisionEnum.THIN,
storops.VNXProvisionEnum.COMPRESSED)
self.assertTrue(re)
re = utils.retype_need_turn_on_compression(
storops.VNXProvisionEnum.THICK,
storops.VNXProvisionEnum.COMPRESSED)
self.assertTrue(re)
def test_retype_not_need_turn_on_compression(self):
re = utils.retype_need_turn_on_compression(
storops.VNXProvisionEnum.DEDUPED,
storops.VNXProvisionEnum.COMPRESSED)
self.assertFalse(re)
re = utils.retype_need_turn_on_compression(
storops.VNXProvisionEnum.DEDUPED,
storops.VNXProvisionEnum.COMPRESSED)
self.assertFalse(re)
@ut_utils.patch_extra_specs({'provisioning:type': 'compressed'})
@res_mock.mock_driver_input
def test_validate_cg_type(self, mocked_input):
cg = mocked_input['cg']
self.assertRaises(exception.InvalidInput,
utils.validate_cg_type,
cg)
@res_mock.mock_driver_input
def test_get_base_lun_name(self, mocked):
volume = mocked['volume']
self.assertEqual(
'test',
utils.get_base_lun_name(volume))
def test_convert_to_tgt_list_and_itor_tgt_map(self):
zone_mapping = {
'san_1': {'initiator_port_wwn_list':
['wwn1_1'],
'target_port_wwn_list':
['wwnt_1', 'wwnt_2']},
'san_2': {'initiator_port_wwn_list':
['wwn2_1', 'wwn2_2'],
'target_port_wwn_list':
['wwnt_1', 'wwnt_3']},
}
tgt_wwns, itor_tgt_map = (
utils.convert_to_tgt_list_and_itor_tgt_map(zone_mapping))
self.assertEqual(set(['wwnt_1', 'wwnt_2', 'wwnt_3']), set(tgt_wwns))
self.assertEqual({'wwn1_1': ['wwnt_1', 'wwnt_2'],
'wwn2_1': ['wwnt_1', 'wwnt_3'],
'wwn2_2': ['wwnt_1', 'wwnt_3']},
itor_tgt_map)

View File

@ -0,0 +1,93 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os import path
import mock
import six
import yaml
from cinder.tests.unit import utils
from cinder.volume.drivers.emc.vnx import client
from cinder.volume.drivers.emc.vnx import common
patch_looping_call = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
patch_sleep = mock.patch('time.sleep')
patch_vnxsystem = mock.patch('storops.VNXSystem')
patch_no_sleep = mock.patch('time.sleep', new=lambda x: None)
def load_yaml(file_name):
yaml_file = '{}/{}'.format(path.dirname(
path.abspath(__file__)), file_name)
with open(yaml_file) as f:
res = yaml.load(f)
return res
def patch_extra_specs(specs):
return _build_patch_decorator(
'cinder.volume.volume_types.get_volume_type_extra_specs',
return_value=specs)
def patch_extra_specs_validate(return_value=None, side_effect=None):
return _build_patch_decorator(
'cinder.volume.drivers.emc.vnx.common.ExtraSpecs.validate',
return_value=return_value,
side_effect=side_effect)
def _build_patch_decorator(module_str, return_value=None, side_effect=None):
def _inner_mock(func):
@six.wraps(func)
def decorator(*args, **kwargs):
with mock.patch(
module_str,
return_value=return_value,
side_effect=side_effect):
return func(*args, **kwargs)
return decorator
return _inner_mock
def build_fake_mirror_view():
primary_client = mock.create_autospec(spec=client.Client)
secondary_client = mock.create_autospec(spec=client.Client)
mirror_view = mock.create_autospec(spec=common.VNXMirrorView)
mirror_view.primary_client = primary_client
mirror_view.secondary_client = secondary_client
return mirror_view
def get_replication_device():
return {
'backend_id': 'fake_serial',
'san_ip': '192.168.1.12',
'san_login': 'admin',
'san_password': 'admin',
'storage_vnx_authentication_type': 'global',
'storage_vnx_security_file_dir': None,
}

View File

@ -252,6 +252,12 @@ volume_opts = [
'discard (aka. trim/unmap). This will not actually '
'change the behavior of the backend or the client '
'directly, it will only notify that it can be used.'),
cfg.StrOpt('storage_protocol',
ignore_case=True,
default='iscsi',
choices=['iscsi', 'fc'],
help='Protocol for transferring data between host and '
'storage back-end.'),
]
# for backward compatibility

View File

@ -1,296 +0,0 @@
# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""iSCSI Drivers for EMC VNX array based on CLI."""
from oslo_log import log as logging
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vnx_cli
LOG = logging.getLogger(__name__)
@interface.volumedriver
class EMCCLIISCSIDriver(driver.ISCSIDriver):
"""EMC ISCSI Drivers for VNX using CLI.
Version history:
.. code-block:: none
1.0.0 - Initial driver
2.0.0 - Thick/thin provisioning, robust enhancement
3.0.0 - Array-based Backend Support, FC Basic Support,
Target Port Selection for MPIO,
Initiator Auto Registration,
Storage Group Auto Deletion,
Multiple Authentication Type Support,
Storage-Assisted Volume Migration,
SP Toggle for HA
3.0.1 - Security File Support
4.0.0 - Advance LUN Features (Compression Support,
Deduplication Support, FAST VP Support,
FAST Cache Support), Storage-assisted Retype,
External Volume Management, Read-only Volume,
FC Auto Zoning
4.1.0 - Consistency group support
5.0.0 - Performance enhancement, LUN Number Threshold Support,
Initiator Auto Deregistration,
Force Deleting LUN in Storage Groups,
robust enhancement
5.1.0 - iSCSI multipath enhancement
5.2.0 - Pool-aware scheduler support
5.3.0 - Consistency group modification support
6.0.0 - Over subscription support
Create consistency group from cgsnapshot support
Multiple pools support enhancement
Manage/unmanage volume revise
White list target ports support
Snap copy support
Support efficient non-disruptive backup
7.0.0 - Clone consistency group support
Replication v2 support(managed)
Configurable migration rate support
"""
def __init__(self, *args, **kwargs):
super(EMCCLIISCSIDriver, self).__init__(*args, **kwargs)
self.cli = emc_vnx_cli.getEMCVnxCli(
'iSCSI',
configuration=self.configuration,
active_backend_id=kwargs.get('active_backend_id'))
self.VERSION = self.cli.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a VNX volume."""
return self.cli.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.cli.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
return self.cli.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
self.cli.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a VNX volume."""
self.cli.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
return self.cli.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.cli.retype(ctxt, volume, new_type, diff, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.cli.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.cli.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The iscsi driver returns a driver_volume_type of 'iscsi'.
the format of the driver data is defined in vnx_get_iscsi_properties.
Example return value (multipath is not enabled)::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'target_lun': 1,
}
}
Example return value (multipath is enabled)::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqns': ['iqn.2010-10.org.openstack:volume-00001',
'iqn.2010-10.org.openstack:volume-00002'],
'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'],
'target_luns': [1, 1],
}
}
"""
return self.cli.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
self.cli.terminate_connection(volume, connector)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve status info from volume group."""
LOG.debug("Updating volume status.")
# retrieving the volume update from the VNX
data = self.cli.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'EMCCLIISCSIDriver'
data['storage_protocol'] = 'iSCSI'
self._stats = data
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
.. code-block:: none
manage_existing_ref:{
'source-id':<lun id in VNX>
}
or
manage_existing_ref:{
'source-name':<lun name in VNX>
}
"""
return self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing."""
return self.cli.manage_existing_get_size(volume, existing_ref)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
return self.cli.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.cli.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
return self.cli.create_cgsnapshot(
context, cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
return self.cli.delete_cgsnapshot(
context, cgsnapshot, snapshots)
def get_pool(self, volume):
"""Returns the pool name of a volume."""
return self.cli.get_pool(volume)
def update_consistencygroup(self, context, group,
add_volumes,
remove_volumes):
"""Updates LUNs in consistency group."""
return self.cli.update_consistencygroup(context, group,
add_volumes,
remove_volumes)
def unmanage(self, volume):
"""Unmanages a volume."""
self.cli.unmanage(volume)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a consistency group from source."""
return self.cli.create_consistencygroup_from_src(context,
group,
volumes,
cgsnapshot,
snapshots,
source_cg,
source_vols)
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status=None):
"""Returns model update for migrated volume."""
return self.cli.update_migrated_volume(context, volume, new_volume,
original_volume_status)
def create_export_snapshot(self, context, snapshot, connector):
"""Creates a snapshot mount point for snapshot."""
return self.cli.create_export_snapshot(context, snapshot, connector)
def remove_export_snapshot(self, context, snapshot):
"""Removes snapshot mount point for snapshot."""
return self.cli.remove_export_snapshot(context, snapshot)
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
"""Allows connection to snapshot."""
return self.cli.initialize_connection_snapshot(snapshot,
connector,
**kwargs)
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Disallows connection to snapshot."""
return self.cli.terminate_connection_snapshot(snapshot,
connector,
**kwargs)
def backup_use_temp_snapshot(self):
return True
def failover_host(self, context, volumes, secondary_id=None):
"""Failovers volume from primary device to secondary."""
return self.cli.failover_host(context, volumes, secondary_id)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,552 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
storops = importutils.try_import('storops')
if storops:
from storops import exception as storops_ex
from cinder import exception
from cinder.i18n import _, _LW, _LE
from cinder import utils as cinder_utils
from cinder.volume.drivers.emc.vnx import common
from cinder.volume.drivers.emc.vnx import const
from cinder.volume.drivers.emc.vnx import utils
LOG = logging.getLogger(__name__)
class Condition(object):
"""Defines some condition checker which are used in wait_until, .etc."""
@staticmethod
def is_lun_io_ready(lun):
utils.update_res_without_poll(lun)
if not lun.existed:
return False
lun_state = lun.state
if lun_state == common.LUNState.INITIALIZING:
return False
elif lun_state in [common.LUNState.READY,
common.LUNState.FAULTED]:
return lun.operation == 'None'
else:
# Quick exit wait_until when the lun is other state to avoid
# long-time timeout.
msg = (_('Volume %(name)s was created in VNX, '
'but in %(state)s state.')
% {'name': lun.name,
'state': lun_state})
raise exception.VolumeBackendAPIException(data=msg)
@staticmethod
def is_object_existed(vnx_obj):
utils.update_res_without_poll(vnx_obj)
return vnx_obj.existed
@staticmethod
def is_lun_ops_ready(lun):
utils.update_res_without_poll(lun)
return 'None' == lun.operation
@staticmethod
def is_lun_expanded(lun, new_size):
utils.update_res_without_poll(lun)
return new_size == lun.total_capacity_gb
@staticmethod
def is_mirror_synced(mirror):
utils.update_res_without_poll(mirror)
return (
mirror.secondary_image.state ==
storops.VNXMirrorImageState.SYNCHRONIZED)
class Client(object):
def __init__(self, ip, username, password, scope,
naviseccli, sec_file):
self.naviseccli = naviseccli
self.vnx = storops.VNXSystem(ip=ip,
username=username,
password=password,
scope=scope,
naviseccli=naviseccli,
sec_file=sec_file)
def create_lun(self, pool, name, size, provision,
tier, cg_id=None, ignore_thresholds=False):
pool = self.vnx.get_pool(name=pool)
try:
lun = pool.create_lun(lun_name=name,
size_gb=size,
provision=provision,
tier=tier,
ignore_thresholds=ignore_thresholds)
except storops_ex.VNXLunNameInUseError:
lun = self.vnx.get_lun(name=name)
utils.wait_until(condition=Condition.is_lun_io_ready, lun=lun)
if cg_id:
cg = self.vnx.get_cg(name=cg_id)
cg.add_member(lun)
return lun
def get_lun(self, name=None, lun_id=None):
return self.vnx.get_lun(name=name, lun_id=lun_id)
def get_lun_id(self, volume):
"""Retrieves the LUN ID of volume."""
if volume.provider_location:
return int(utils.extract_provider_location(
volume.provider_location, 'id'))
else:
# In some cases, cinder will not update volume info in DB with
# provider_location returned by us. We need to retrieve the id
# from array. For example, cinder backup-create doesn't use the
# provider_location returned from create_cloned_volume.
lun = self.get_lun(name=volume.name)
return lun.lun_id
def delete_lun(self, name, force=False):
"""Deletes a LUN or mount point."""
lun = self.get_lun(name=name)
smp_attached_snap = (lun.attached_snapshot if lun.is_snap_mount_point
else None)
try:
# Do not delete the snapshots of the lun.
lun.delete(force_detach=True, detach_from_sg=force)
if smp_attached_snap:
smp_attached_snap.delete()
except storops_ex.VNXLunNotFoundError as ex:
LOG.warning(_LW("LUN %(name)s is already deleted. "
"Message: %(msg)s"),
{'name': name, 'msg': ex.message})
pass # Ignore the failure that due to retry.
@cinder_utils.retry(const.VNXLunPreparingError, retries=1,
backoff_rate=1)
def expand_lun(self, name, new_size, poll=True):
lun = self.get_lun(name=name)
try:
lun.poll = poll
lun.expand(new_size, ignore_thresholds=True)
except storops_ex.VNXLunExpandSizeError as ex:
LOG.warning(_LW("LUN %(name)s is already expanded. "
"Message: %(msg)s."),
{'name': name, 'msg': ex.message})
except storops_ex.VNXLunPreparingError as ex:
# The error means the operation cannot be performed because the LUN
# is 'Preparing'. Wait for a while so that the LUN may get out of
# the transitioning state.
with excutils.save_and_reraise_exception():
LOG.warning(_LW("LUN %(name)s is not ready for extension: "
"%(msg)s"),
{'name': name, 'msg': ex.message})
utils.wait_until(Condition.is_lun_ops_ready, lun=lun)
utils.wait_until(Condition.is_lun_expanded, lun=lun, new_size=new_size)
def modify_lun(self):
pass
@cinder_utils.retry(exceptions=const.VNXTargetNotReadyError,
interval=15,
retries=5, backoff_rate=1)
def migrate_lun(self, src_id, dst_id,
rate=const.MIGRATION_RATE_HIGH):
src = self.vnx.get_lun(lun_id=src_id)
src.migrate(dst_id, rate)
def session_finished(self, src_lun):
session = self.vnx.get_migration_session(src_lun)
if not session.existed:
return True
elif session.current_state in ('FAULTED', 'STOPPED'):
LOG.warning(_LW('Session is %s, need to handled then.'),
session.current_state)
return True
else:
return False
def verify_migration(self, src_id, dst_id, dst_wwn):
"""Verify whether migration session finished successfully.
:param src_id: source LUN id
:param dst_id: destination LUN id
:param dst_wwn: destination LUN WWN
:returns Boolean: True or False
"""
src_lun = self.vnx.get_lun(lun_id=src_id)
utils.wait_until(condition=self.session_finished,
interval=common.INTERVAL_30_SEC,
src_lun=src_lun)
new_lun = self.vnx.get_lun(lun_id=dst_id)
new_wwn = new_lun.wwn
if not new_wwn or new_wwn != dst_wwn:
return True
else:
return False
def cleanup_migration(self, src_id, dst_id):
"""Invoke when migration meets error.
:param src_id: source LUN id
:param dst_id: destination LUN id
"""
# if migration session is still there
# we need to cancel the session
session = self.vnx.get_migration_session(src_id)
src_lun = self.vnx.get_lun(lun_id=src_id)
if session.existed:
LOG.warning(_LW('Cancelling migration session: '
'%(src_id)s -> %(dst_id)s.'),
{'src_id': src_id,
'dst_id': dst_id})
src_lun.cancel_migrate()
def create_snapshot(self, lun_id, snap_name):
"""Creates a snapshot."""
lun = self.get_lun(lun_id=lun_id)
try:
lun.create_snap(snap_name, allow_rw=True, auto_delete=False)
except storops_ex.VNXSnapNameInUseError as ex:
LOG.warning(_LW('Snapshot %(name)s already exists. '
'Message: %(msg)s'),
{'name': snap_name, 'msg': ex.message})
def delete_snapshot(self, snapshot_name):
"""Deletes a snapshot."""
snap = self.vnx.get_snap(name=snapshot_name)
try:
snap.delete()
except storops_ex.VNXSnapNotExistsError as ex:
LOG.warning(_LW("Snapshot %(name)s may be deleted already. "
"Message: %(msg)s"),
{'name': snapshot_name, 'msg': ex.message})
except storops_ex.VNXDeleteAttachedSnapError as ex:
with excutils.save_and_reraise_exception():
LOG.warning(_LW("Failed to delete snapshot %(name)s "
"which is in use. Message: %(msg)s"),
{'name': snapshot_name, 'msg': ex.message})
def copy_snapshot(self, snap_name, new_snap_name):
snap = self.vnx.get_snap(name=snap_name)
snap.copy(new_name=new_snap_name)
def create_mount_point(self, lun_name, smp_name):
lun = self.vnx.get_lun(name=lun_name)
try:
return lun.create_mount_point(name=smp_name)
except storops_ex.VNXLunNameInUseError as ex:
LOG.warning(_LW('Mount point %(name)s already exists. '
'Message: %(msg)s'),
{'name': smp_name, 'msg': ex.message})
# Ignore the failure that due to retry.
return self.vnx.get_lun(name=smp_name)
def attach_snapshot(self, smp_name, snap_name):
lun = self.vnx.get_lun(name=smp_name)
try:
lun.attach_snap(snap=snap_name)
except storops_ex.VNXSnapAlreadyMountedError as ex:
LOG.warning(_LW("Snapshot %(snap_name)s is attached to "
"snapshot mount point %(smp_name)s already. "
"Message: %(msg)s"),
{'snap_name': snap_name,
'smp_name': smp_name,
'msg': ex.message})
def detach_snapshot(self, smp_name):
lun = self.vnx.get_lun(name=smp_name)
try:
lun.detach_snap()
except storops_ex.VNXSnapNotAttachedError as ex:
LOG.warning(_LW("Snapshot mount point %(smp_name)s is not "
"currently attached. Message: %(msg)s"),
{'smp_name': smp_name, 'msg': ex.message})
def modify_snapshot(self, snap_name, allow_rw=None, auto_delete=None):
snap = self.vnx.get_snap(name=snap_name)
snap.modify(allow_rw=allow_rw, auto_delete=auto_delete)
def create_consistency_group(self, cg_name, lun_id_list=None):
try:
cg = self.vnx.create_cg(name=cg_name, members=lun_id_list)
except storops_ex.VNXConsistencyGroupNameInUseError:
cg = self.vnx.get_cg(name=cg_name)
# Wait until cg is found on VNX, or deletion will fail afterwards
utils.wait_until(Condition.is_object_existed, vnx_obj=cg)
return cg
def delete_consistency_group(self, cg_name):
cg = self.vnx.get_cg(cg_name)
try:
cg.delete()
except storops_ex.VNXConsistencyGroupNotFoundError:
pass
def create_cg_snapshot(self, cg_snap_name, cg_name):
cg = self.vnx.get_cg(cg_name)
try:
snap = cg.create_snap(cg_snap_name, allow_rw=True)
except storops_ex.VNXSnapNameInUseError:
snap = self.vnx.get_snap(cg_snap_name)
utils.wait_until(Condition.is_object_existed,
vnx_obj=snap)
return snap
def delete_cg_snapshot(self, cg_snap_name):
self.delete_snapshot(cg_snap_name)
def get_serial(self):
return self.vnx.serial
def get_pools(self):
return self.vnx.get_pool()
def get_pool(self, name):
return self.vnx.get_pool(name=name)
def get_iscsi_targets(self, sp=None, port_id=None, vport_id=None):
return self.vnx.get_iscsi_port(sp=sp, port_id=port_id,
vport_id=vport_id,
has_ip=True)
def get_fc_targets(self, sp=None, port_id=None):
return self.vnx.get_fc_port(sp=sp, port_id=port_id)
def get_enablers(self):
return self.vnx.get_ndu()
def is_fast_enabled(self):
return self.vnx.is_auto_tiering_enabled()
def is_compression_enabled(self):
return self.vnx.is_compression_enabled()
def is_dedup_enabled(self):
return self.vnx.is_dedup_enabled()
def is_fast_cache_enabled(self):
return self.vnx.is_fast_cache_enabled()
def is_thin_enabled(self):
return self.vnx.is_thin_enabled()
def is_snap_enabled(self):
return self.vnx.is_snap_enabled()
def is_mirror_view_enabled(self):
return self.vnx.is_mirror_view_sync_enabled()
def get_pool_feature(self):
return self.vnx.get_pool_feature()
def lun_has_snapshot(self, lun):
"""Checks lun has snapshot.
:param lun: instance of VNXLun
"""
snaps = lun.get_snap()
return len(snaps) != 0
def enable_compression(self, lun):
"""Enables compression on lun.
:param lun: instance of VNXLun
"""
try:
lun.enable_compression(ignore_thresholds=True)
except storops_ex.VNXCompressionAlreadyEnabledError:
LOG.warning(_LW("Compression has already been enabled on %s."),
lun.name)
def get_vnx_enabler_status(self):
return common.VNXEnablerStatus(
dedup=self.is_dedup_enabled(),
compression=self.is_compression_enabled(),
thin=self.is_thin_enabled(),
fast=self.is_fast_enabled(),
snap=self.is_snap_enabled())
def create_storage_group(self, name):
try:
return self.vnx.create_sg(name)
except storops_ex.VNXStorageGroupNameInUseError as ex:
# Ignore the failure due to retry
LOG.warning(_LW('Storage group %(name)s already exists. '
'Message: %(msg)s'),
{'name': name, 'msg': ex.message})
return self.vnx.get_sg(name=name)
def get_storage_group(self, name):
return self.vnx.get_sg(name)
def register_initiator(self, storage_group, host, initiator_port_map):
"""Registers the initiators of `host` to the `storage_group`.
:param storage_group: the storage group object.
:param host: the ip and name information of the initiator.
:param initiator_port_map: the dict specifying which initiators are
bound to which ports.
"""
for (initiator_id, ports_to_bind) in initiator_port_map.items():
for port in ports_to_bind:
try:
storage_group.connect_hba(port, initiator_id, host.name,
host_ip=host.ip)
except storops_ex.VNXStorageGroupError as ex:
LOG.warning(_LW('Failed to set path to port %(port)s for '
'initiator %(hba_id)s. Message: %(msg)s'),
{'port': port, 'hba_id': initiator_id,
'msg': ex.message})
if initiator_port_map:
utils.update_res_with_poll(storage_group)
def ping_node(self, port, ip_address):
iscsi_port = self.get_iscsi_targets(sp=port.sp,
port_id=port.port_id,
vport_id=port.vport_id)
try:
iscsi_port.ping_node(ip_address, count=1)
return True
except storops_ex.VNXPingNodeError:
return False
def add_lun_to_sg(self, storage_group, lun, max_retries):
"""Adds the `lun` to `storage_group`."""
try:
return storage_group.attach_alu(lun, max_retries)
except storops_ex.VNXAluAlreadyAttachedError as ex:
# Ignore the failure due to retry.
return storage_group.get_hlu(lun)
except storops_ex.VNXNoHluAvailableError as ex:
with excutils.save_and_reraise_exception():
# Reach the max times of retry, fail the attach action.
LOG.error(_LE('Failed to add %(lun)s into %(sg)s after '
'%(tried)s tries. Reach the max retry times. '
'Message: %(msg)s'),
{'lun': lun.lun_id, 'sg': storage_group.name,
'tried': max_retries, 'msg': ex.message})
def get_wwn_of_online_fc_ports(self, ports):
"""Returns wwns of online fc ports.
wwn of a certain port will not be included in the return list when it
is not present or down.
"""
wwns = set()
ports_with_all_info = self.vnx.get_fc_port()
for po in ports:
online_list = list(
filter(lambda p: (p == po and p.link_status == 'Up'
and p.port_status == 'Online'),
ports_with_all_info))
wwns.update([p.wwn for p in online_list])
return list(wwns)
def sg_has_lun_attached(self, sg):
return bool(sg.get_alu_hlu_map())
def deregister_initiators(self, initiators):
if not isinstance(initiators, list):
initiators = [initiators]
for initiator_uid in initiators:
self.vnx.remove_hba(initiator_uid)
def update_consistencygroup(self, cg, lun_ids_to_add, lun_ids_to_remove):
lun_ids_in_cg = (set([l.lun_id for l in cg.lun_list]) if cg.lun_list
else set())
# lun_ids_to_add and lun_ids_to_remove never overlap.
lun_ids_updated = ((lun_ids_in_cg | set(lun_ids_to_add)) -
set(lun_ids_to_remove))
if lun_ids_updated:
cg.replace_member(*[self.get_lun(lun_id=lun_id)
for lun_id in lun_ids_updated])
else:
# Need to remove all LUNs from cg. However, replace_member cannot
# handle empty list. So use delete_member.
cg.delete_member(*[self.get_lun(lun_id=lun_id)
for lun_id in lun_ids_in_cg])
def get_cg(self, name):
return self.vnx.get_cg(name=name)
def get_available_ip(self):
return self.vnx.alive_sp_ip
def get_mirror(self, mirror_name):
return self.vnx.get_mirror_view(mirror_name)
def create_mirror(self, mirror_name, primary_lun_id):
src_lun = self.vnx.get_lun(lun_id=primary_lun_id)
try:
mv = self.vnx.create_mirror_view(mirror_name, src_lun)
except storops_ex.VNXMirrorNameInUseError:
mv = self.vnx.get_mirror_view(mirror_name)
return mv
def delete_mirror(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
try:
mv.delete()
except storops_ex.VNXMirrorNotFoundError:
pass
def add_image(self, mirror_name, sp_ip, secondary_lun_id):
mv = self.vnx.get_mirror_view(mirror_name)
mv.add_image(sp_ip, secondary_lun_id)
# Secondary image info usually did not appear, so
# here add a poll to update.
utils.update_res_with_poll(mv)
utils.wait_until(Condition.is_mirror_synced, mirror=mv)
def remove_image(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
mv.remove_image()
def fracture_image(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
mv.fracture_image()
def sync_image(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
mv.sync_image()
utils.wait_until(Condition.is_mirror_synced, mirror=mv)
def promote_image(self, mirror_name):
mv = self.vnx.get_mirror_view(mirror_name)
mv.promote_image()
def get_pool_name(self, lun_name):
lun = self.get_lun(name=lun_name)
utils.update_res_without_poll(lun)
return lun.pool_name

View File

@ -0,0 +1,483 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
VNX Common Utils
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
storops = importutils.try_import('storops')
from cinder import exception
from cinder.i18n import _, _LW
from cinder.volume.drivers.emc.vnx import const
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 60 * 60 * 24 * 365
INTERVAL_5_SEC = 5
INTERVAL_20_SEC = 20
INTERVAL_30_SEC = 30
INTERVAL_60_SEC = 60
EMC_VNX_OPTS = [
cfg.StrOpt('storage_vnx_authentication_type',
default='global',
help='VNX authentication scope type. '
'By default, the value is global.'),
cfg.StrOpt('storage_vnx_security_file_dir',
help='Directory path that contains the VNX security file. '
'Make sure the security file is generated first.'),
cfg.StrOpt('naviseccli_path',
help='Naviseccli Path.'),
cfg.ListOpt('storage_vnx_pool_names',
help='Comma-separated list of storage pool names to be used.'),
cfg.IntOpt('default_timeout',
default=DEFAULT_TIMEOUT,
help='Default timeout for CLI operations in minutes. '
'For example, LUN migration is a typical long '
'running operation, which depends on the LUN size and '
'the load of the array. '
'An upper bound in the specific deployment can be set to '
'avoid unnecessary long wait. '
'By default, it is 365 days long.'),
cfg.IntOpt('max_luns_per_storage_group',
default=255,
help='Default max number of LUNs in a storage group.'
' By default, the value is 255.'),
cfg.BoolOpt('destroy_empty_storage_group',
default=False,
help='To destroy storage group '
'when the last LUN is removed from it. '
'By default, the value is False.'),
# iscsi_initiators is a dict which key is string and value is a list.
# This could be a DictOpt. Unfortunately DictOpt doesn't support the value
# of list type.
cfg.StrOpt('iscsi_initiators',
help='Mapping between hostname and '
'its iSCSI initiator IP addresses.'),
cfg.ListOpt('io_port_list',
help='Comma separated iSCSI or FC ports '
'to be used in Nova or Cinder.'),
cfg.BoolOpt('initiator_auto_registration',
default=False,
help='Automatically register initiators. '
'By default, the value is False.'),
cfg.BoolOpt('initiator_auto_deregistration',
default=False,
help='Automatically deregister initiators after the related '
'storage group is destroyed. '
'By default, the value is False.'),
cfg.BoolOpt('check_max_pool_luns_threshold',
default=False,
help='Report free_capacity_gb as 0 when the limit to '
'maximum number of pool LUNs is reached. '
'By default, the value is False.'),
cfg.BoolOpt('force_delete_lun_in_storagegroup',
default=False,
help='Delete a LUN even if it is in Storage Groups. '
'By default, the value is False.'),
cfg.BoolOpt('ignore_pool_full_threshold',
default=False,
help='Force LUN creation even if '
'the full threshold of pool is reached. '
'By default, the value is False.')
]
CONF.register_opts(EMC_VNX_OPTS)
PROTOCOL_FC = 'fc'
PROTOCOL_ISCSI = 'iscsi'
class ExtraSpecs(object):
_provision_key = 'provisioning:type'
_tier_key = 'storagetype:tiering'
_replication_key = 'replication_enabled'
PROVISION_DEFAULT = const.PROVISION_THICK
TIER_DEFAULT = None
def __init__(self, extra_specs):
self.specs = extra_specs
self._provision = self._get_provision()
self.provision = self._provision
self._tier = self._get_tier()
self.tier = self._tier
self.apply_default_values()
def apply_default_values(self):
self.provision = (ExtraSpecs.PROVISION_DEFAULT
if self.provision is None
else self.provision)
# Can not set Tier when provision is set to deduped. So don't set the
# tier default when provision is deduped.
if self.provision != storops.VNXProvisionEnum.DEDUPED:
self.tier = (ExtraSpecs.TIER_DEFAULT if self.tier is None
else self.tier)
@classmethod
def set_defaults(cls, provision_default, tier_default):
cls.PROVISION_DEFAULT = provision_default
cls.TIER_DEFAULT = tier_default
def _get_provision(self):
value = self._parse_to_enum(self._provision_key,
storops.VNXProvisionEnum)
return value
def _get_tier(self):
return self._parse_to_enum(self._tier_key, storops.VNXTieringEnum)
@property
def is_replication_enabled(self):
return self.specs.get('replication_enabled', '').lower() == '<is> true'
def _parse_to_enum(self, key, enum_class):
value = (self.specs[key]
if key in self.specs else None)
if value is not None:
try:
value = enum_class.parse(value)
except ValueError:
reason = (_("The value %(value)s for key %(key)s in extra "
"specs is invalid."),
{'key': key, 'value': value})
raise exception.InvalidVolumeType(reason=reason)
return value
@classmethod
def from_volume(cls, volume):
specs = {}
type_id = volume['volume_type_id']
if type_id is not None:
specs = volume_types.get_volume_type_extra_specs(type_id)
return cls(specs)
@classmethod
def from_volume_type(cls, type):
return cls(type['extra_specs'])
@classmethod
def from_lun(cls, lun):
ex = cls({})
ex.provision = lun.provision
ex.tier = (lun.tier
if lun.provision != storops.VNXProvisionEnum.DEDUPED
else None)
return ex
def match_with_lun(self, lun):
ex = ExtraSpecs.from_lun(lun)
return (self.provision == ex.provision and
self.tier == ex.tier)
def validate(self, enabler_status):
"""Checks whether the extra specs are valid.
:param enabler_status: Instance of VNXEnablerStatus
"""
if "storagetype:pool" in self.specs:
LOG.warning(_LW("Extra spec key 'storagetype:pool' is obsoleted "
"since driver version 5.1.0. This key will be "
"ignored."))
if (self._provision == storops.VNXProvisionEnum.DEDUPED and
self._tier is not None):
msg = _("Can not set tiering policy for a deduplicated volume. "
"Set the tiering policy on the pool where the "
"deduplicated volume locates.")
raise exception.InvalidVolumeType(reason=msg)
if (self._provision == storops.VNXProvisionEnum.COMPRESSED
and not enabler_status.compression_enabled):
msg = _("Compression Enabler is not installed. "
"Can not create compressed volume.")
raise exception.InvalidVolumeType(reason=msg)
if (self._provision == storops.VNXProvisionEnum.DEDUPED
and not enabler_status.dedup_enabled):
msg = _("Deduplication Enabler is not installed. "
"Can not create deduplicated volume.")
raise exception.InvalidVolumeType(reason=msg)
if (self._provision in [storops.VNXProvisionEnum.THIN,
storops.VNXProvisionEnum.COMPRESSED,
storops.VNXProvisionEnum.DEDUPED]
and not enabler_status.thin_enabled):
msg = _("ThinProvisioning Enabler is not installed. "
"Can not create thin volume.")
raise exception.InvalidVolumeType(reason=msg)
if (self._tier is not None
and not enabler_status.fast_enabled):
msg = _("FAST VP Enabler is not installed. "
"Can not set tiering policy for the volume.")
raise exception.InvalidVolumeType(reason=msg)
return True
def __len__(self):
return len(self.specs)
def __getitem__(self, key):
return self.specs[key]
def __iter__(self):
return iter(self.specs)
def __contains__(self, item):
return item in self.specs
def __eq__(self, other):
if isinstance(other, ExtraSpecs):
return self.specs == other.specs
elif isinstance(other, dict):
return self.specs == other
else:
return False
def __hash__(self):
return self.specs.__hash__()
class LUNState(object):
INITIALIZING = 'Initializing'
READY = 'Ready'
FAULTED = 'Faulted'
class PoolState(object):
INITIALIZING = 'Initializing'
OFFLINE = 'Offline'
DELETING = 'Deleting'
VALID_CREATE_LUN_STATE = (INITIALIZING, OFFLINE, DELETING)
class VNXEnablerStatus(object):
def __init__(self,
dedup=False,
compression=False,
fast=False,
thin=False,
snap=False):
self.dedup_enabled = dedup
self.compression_enabled = compression
self.fast_enabled = fast
self.thin_enabled = thin
self.snap_enabled = snap
class WaitUtilTimeoutException(exception.VolumeDriverException):
"""Raised when timeout occurs in wait_until."""
# TODO(Ryan) put this exception under Cinder shared module.
pass
class Host(object):
"""The model of a host which acts as an initiator to access the storage."""
def __init__(self, name, initiators, ip=None, wwpns=None):
# ip and wwpns are optional.
self.name = name
if not self.name:
raise ValueError(('Name of host cannot be empty.'))
self.initiators = initiators
if not self.initiators:
raise ValueError(_('Initiators of host cannot be empty.'))
self.ip = ip
self.wwpns = wwpns
class Volume(object):
"""The internal volume which is used to pass in method call."""
def __init__(self, name, id, vnx_lun_id=None):
self.name = name
self.id = id
self.vnx_lun_id = vnx_lun_id
class ISCSITargetData(dict):
def __init__(self, volume_id, is_discovered, iqn='unknown', iqns=None,
portal='unknown', portals=None, lun='unknown', luns=None):
data = {'volume_id': volume_id, 'target_discovered': is_discovered,
'target_iqn': iqn, 'target_iqns': iqns,
'target_portal': portal, 'target_portals': portals,
'target_lun': lun, 'target_luns': luns}
self['driver_volume_type'] = 'iscsi'
self['data'] = data
def to_dict(self):
"""Converts to the dict.
It helps serialize and deserialize the data before returning to nova.
"""
return {key: value for (key, value) in self.items()}
class FCTargetData(dict):
def __init__(self, volume_id, is_discovered, wwn=None, lun=None,
initiator_target_map=None):
data = {'volume_id': volume_id, 'target_discovered': is_discovered,
'target_lun': lun, 'target_wwn': wwn,
'initiator_target_map': initiator_target_map}
self['driver_volume_type'] = 'fibre_channel'
self['data'] = data
def to_dict(self):
"""Converts to the dict.
It helps serialize and deserialize the data before returning to nova.
"""
return {key: value for (key, value) in self.items()}
class ReplicationDevice(object):
def __init__(self, replication_device):
self.replication_device = replication_device
@property
def backend_id(self):
return self.replication_device['backend_id']
@property
def san_ip(self):
return self.replication_device['san_ip']
@property
def san_login(self):
return self.replication_device['san_login']
@property
def san_password(self):
return self.replication_device['san_password']
@property
def storage_vnx_authentication_type(self):
return self.replication_device['storage_vnx_authentication_type']
@property
def storage_vnx_security_file_dir(self):
return self.replication_device['storage_vnx_security_file_dir']
class ReplicationDeviceList(list):
"""Replication devices configured in cinder.conf
Cinder supports multiple replication_device, while VNX driver
only support one replication_device for now.
"""
def __init__(self, configuration):
self.list = []
self.configuration = configuration
self._device_map = dict()
self.parse_configuration()
def parse_configuration(self):
if self.configuration.replication_device:
for replication_device in self.configuration.replication_device:
rd = ReplicationDevice(replication_device)
self._device_map[rd.backend_id] = rd
self.list.append(rd)
return self._device_map
def get_device(self, backend_id):
try:
device = self._device_map[backend_id]
except KeyError:
device = None
LOG.warning(_LW('Unable to find secondary device named: %s'),
backend_id)
return device
@property
def devices(self):
return self._device_map.values()
def __len__(self):
return len(self.list)
def __iter__(self):
self._iter = self.list.__iter__()
return self
def next(self):
return next(self._iter)
def __next__(self):
return self.next()
def __getitem__(self, item):
return self.list[item]
class VNXMirrorView(object):
def __init__(self, primary_client, secondary_client):
self.primary_client = primary_client
self.secondary_client = secondary_client
def create_mirror(self, name, primary_lun_id):
self.primary_client.create_mirror(name, primary_lun_id)
def create_secondary_lun(self, pool_name, lun_name, size, provision, tier):
return self.secondary_client.create_lun(
pool_name, lun_name, size, provision, tier)
def delete_secondary_lun(self, lun_name):
self.secondary_client.delete_lun(lun_name)
def delete_mirror(self, mirror_name):
self.primary_client.delete_mirror(mirror_name)
def add_image(self, mirror_name, secondary_lun_id):
sp_ip = self.secondary_client.get_available_ip()
self.primary_client.add_image(mirror_name, sp_ip, secondary_lun_id)
def remove_image(self, mirror_name):
self.primary_client.remove_image(mirror_name)
def fracture_image(self, mirror_name):
self.primary_client.fracture_image(mirror_name)
def promote_image(self, mirror_name):
self.secondary_client.promote_image(mirror_name)
def destroy_mirror(self, mirror_name, secondary_lun_name):
"""Destroy the mirror view's related VNX objects.
NOTE: primary lun will not be deleted here.
:param mirror_name: name of mirror to be destroyed
:param secondary_lun_name: name of LUN name
"""
mv = self.primary_client.get_mirror(mirror_name)
if not mv.existed:
# We will skip the mirror operations if not existed
LOG.warning(_LW('Mirror view %s was deleted already.'),
mirror_name)
return
self.fracture_image(mirror_name)
self.remove_image(mirror_name)
self.delete_mirror(mirror_name)
self.delete_secondary_lun(lun_name=secondary_lun_name)

View File

@ -0,0 +1,39 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
VNX Constants
This module includes re-declaration from storops which directly used
by driver in module scope. That's to say:
If a constant from storops is used in class level, function signature,
module level, a re-declaration is needed in this file to avoid some static
import error when storops is not installed.
"""
from oslo_utils import importutils
storops = importutils.try_import('storops')
if storops:
from storops import exception as storops_ex
VNXLunPreparingError = storops_ex.VNXLunPreparingError
VNXTargetNotReadyError = storops_ex.VNXTargetNotReadyError
MIGRATION_RATE_HIGH = storops.VNXMigrationRate.HIGH
PROVISION_THICK = storops.VNXProvisionEnum.THICK
else:
VNXLunPreparingError = None
MIGRATION_RATE_HIGH = None
PROVISION_THICK = None
VNXTargetNotReadyError = None

View File

@ -1,4 +1,4 @@
# Copyright (c) 2012 - 2015 EMC Corporation, Inc.
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -12,13 +12,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fibre Channel Driver for EMC VNX array based on CLI."""
"""Cinder Driver for EMC VNX based on CLI."""
from oslo_log import log as logging
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vnx_cli
from cinder.volume.drivers.emc.vnx import adapter
from cinder.volume.drivers.emc.vnx import common
from cinder.volume.drivers.emc.vnx import utils
from cinder.zonemanager import utils as zm_utils
@ -26,13 +28,17 @@ LOG = logging.getLogger(__name__)
@interface.volumedriver
class EMCCLIFCDriver(driver.FibreChannelDriver):
"""EMC FC Driver for VNX using CLI.
class EMCVNXDriver(driver.TransferVD,
driver.ManageableVD,
driver.ExtendVD,
driver.SnapshotVD,
driver.ManageableSnapshotsVD,
driver.MigrateVD,
driver.ConsistencyGroupVD,
driver.BaseVD):
"""EMC Cinder Driver for VNX using CLI.
Version history:
.. code-block:: none
1.0.0 - Initial driver
2.0.0 - Thick/thin provisioning, robust enhancement
3.0.0 - Array-based Backend Support, FC Basic Support,
@ -66,54 +72,63 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
7.0.0 - Clone consistency group support
Replication v2 support(managed)
Configurable migration rate support
8.0.0 - New VNX Cinder driver
"""
def __init__(self, *args, **kwargs):
super(EMCCLIFCDriver, self).__init__(*args, **kwargs)
self.cli = emc_vnx_cli.getEMCVnxCli(
'FC',
configuration=self.configuration,
active_backend_id=kwargs.get('active_backend_id'))
self.VERSION = self.cli.VERSION
super(EMCVNXDriver, self).__init__(*args, **kwargs)
utils.init_ops(self.configuration)
self.protocol = self.configuration.storage_protocol.lower()
self.active_backend_id = kwargs.get('active_backend_id', None)
self.adapter = None
def do_setup(self, context):
if self.protocol == common.PROTOCOL_FC:
self.adapter = adapter.FCAdapter(self.configuration,
self.active_backend_id)
else:
self.adapter = adapter.ISCSIAdapter(self.configuration,
self.active_backend_id)
self.adapter.do_setup()
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a volume."""
return self.cli.create_volume(volume)
return self.adapter.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.cli.create_volume_from_snapshot(volume, snapshot)
return self.adapter.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
return self.cli.create_cloned_volume(volume, src_vref)
return self.adapter.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
self.cli.extend_volume(volume, new_size)
self.adapter.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
self.cli.delete_volume(volume)
self.adapter.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate volume via EMC migration functionality."""
return self.cli.migrate_volume(ctxt, volume, host)
return self.adapter.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.cli.retype(ctxt, volume, new_type, diff, host)
return self.adapter.retype(ctxt, volume, new_type, diff, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.cli.create_snapshot(snapshot)
self.adapter.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.cli.delete_snapshot(snapshot)
self.adapter.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
@ -144,22 +159,8 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
The initiator_target_map is a map that represents the remote wwn(s)
and a list of wwns which are visible to the remote wwn(s).
Example return values:
FC:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'initiator_target_map': {
'1122334455667788': ['1234567890123']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
@ -171,28 +172,44 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
}
}
}
iSCSI:
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqns': ['iqn.2010-10.org.openstack:volume-00001',
'iqn.2010-10.org.openstack:volume-00002'],
'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'],
'target_luns': [1, 1],
}
}
"""
conn_info = self.cli.initialize_connection(volume,
connector)
LOG.debug("Entering initialize_connection"
" - connector: %(connector)s.",
{'connector': connector})
conn_info = self.adapter.initialize_connection(volume,
connector)
LOG.debug("Exit initialize_connection"
" - Returning FC connection info: %(conn_info)s.",
" - Returning connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
@zm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
conn_info = self.cli.terminate_connection(volume, connector)
LOG.debug("Entering terminate_connection"
" - connector: %(connector)s.",
{'connector': connector})
conn_info = self.adapter.terminate_connection(volume, connector)
LOG.debug("Exit terminate_connection"
" - Returning FC connection info: %(conn_info)s.",
" - Returning connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
:param refresh: True to get updated data
"""
if refresh:
self.update_volume_stats()
@ -202,11 +219,7 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = self.cli.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'EMCCLIFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
self._stats = self.adapter.update_volume_stats()
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
@ -217,101 +230,96 @@ class EMCCLIFCDriver(driver.FibreChannelDriver):
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
.. code-block:: none
manage_existing_ref:{
'source-id':<lun id in VNX>
}
or
manage_existing_ref:{
'source-name':<lun name in VNX>
}
manage_existing_ref:{
'source-id':<lun id in VNX>
}
or
manage_existing_ref:{
'source-name':<lun name in VNX>
}
"""
return self.cli.manage_existing(volume, existing_ref)
return self.adapter.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing."""
return self.cli.manage_existing_get_size(volume, existing_ref)
return self.adapter.manage_existing_get_size(volume, existing_ref)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
return self.cli.create_consistencygroup(context, group)
return self.adapter.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.cli.delete_consistencygroup(
return self.adapter.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
return self.cli.create_cgsnapshot(
return self.adapter.create_cgsnapshot(
context, cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
return self.cli.delete_cgsnapshot(
return self.adapter.delete_cgsnapshot(
context, cgsnapshot, snapshots)
def get_pool(self, volume):
"""Returns the pool name of a volume."""
return self.cli.get_pool(volume)
return self.adapter.get_pool_name(volume)
def update_consistencygroup(self, context, group,
add_volumes,
remove_volumes):
"""Updates LUNs in consistency group."""
return self.cli.update_consistencygroup(context, group,
add_volumes,
remove_volumes)
return self.adapter.update_consistencygroup(context, group,
add_volumes,
remove_volumes)
def unmanage(self, volume):
"""Unmanages a volume."""
return self.cli.unmanage(volume)
return self.adapter.unmanage(volume)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a consistency group from source."""
return self.cli.create_consistencygroup_from_src(context,
group,
volumes,
cgsnapshot,
snapshots,
source_cg,
source_vols)
if cgsnapshot:
return self.adapter.create_cg_from_cgsnapshot(
context, group, volumes, cgsnapshot, snapshots)
elif source_cg:
return self.adapter.create_cloned_cg(
context, group, volumes, source_cg, source_vols)
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status=None):
"""Returns model update for migrated volume."""
return self.cli.update_migrated_volume(context, volume, new_volume,
original_volume_status)
return self.adapter.update_migrated_volume(context, volume, new_volume,
original_volume_status)
def create_export_snapshot(self, context, snapshot, connector):
"""Creates a snapshot mount point for snapshot."""
return self.cli.create_export_snapshot(context, snapshot, connector)
return self.adapter.create_export_snapshot(
context, snapshot, connector)
def remove_export_snapshot(self, context, snapshot):
"""Removes snapshot mount point for snapshot."""
return self.cli.remove_export_snapshot(context, snapshot)
return self.adapter.remove_export_snapshot(context, snapshot)
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
"""Allows connection to snapshot."""
return self.cli.initialize_connection_snapshot(snapshot,
connector,
**kwargs)
return self.adapter.initialize_connection_snapshot(snapshot,
connector,
**kwargs)
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Disallows connection to snapshot."""
return self.cli.terminate_connection_snapshot(snapshot,
connector,
**kwargs)
return self.adapter.terminate_connection_snapshot(snapshot,
connector,
**kwargs)
def backup_use_temp_snapshot(self):
return True
def failover_host(self, context, volumes, secondary_id=None):
"""Failovers volume from primary device to secondary."""
return self.cli.failover_host(context, volumes, secondary_id)
"""Fail-overs volumes from primary device to secondary."""
return self.adapter.failover_host(context, volumes, secondary_id)

View File

@ -0,0 +1,579 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import importutils
storops = importutils.try_import('storops')
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow import task
from taskflow.types import failure
from cinder import exception
from cinder.volume.drivers.emc.vnx import const
from cinder.volume.drivers.emc.vnx import utils
from cinder.i18n import _, _LI, _LW
LOG = logging.getLogger(__name__)
class MigrateLunTask(task.Task):
"""Starts a migration between two LUNs/SMPs.
Reversion strategy: Cleanup the migration session
"""
def __init__(self, name=None, provides=None, inject=None,
rebind=None, wait_for_completion=True):
super(MigrateLunTask, self).__init__(name=name,
provides=provides,
inject=inject,
rebind=rebind)
self.wait_for_completion = wait_for_completion
def execute(self, client, src_id, dst_id, *args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
dst_lun = client.get_lun(lun_id=dst_id)
dst_wwn = dst_lun.wwn
client.migrate_lun(src_id, dst_id)
if self.wait_for_completion:
migrated = client.verify_migration(src_id, dst_id, dst_wwn)
if not migrated:
msg = _("Failed to migrate volume between source vol %(src)s"
" and dest vol %(dst)s.") % {
'src': src_id, 'dst': dst_id}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def revert(self, result, client, src_id, dst_id, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method)s: cleanup migration session: '
'%(src_id)s -> %(dst_id)s.'),
{'method': method_name,
'src_id': src_id,
'dst_id': dst_id})
client.cleanup_migration(src_id, dst_id)
class CreateLunTask(task.Task):
"""Creates a new lun task.
Reversion strategy: Delete the lun.
"""
def __init__(self, name=None, provides=('new_lun_id', 'new_lun_wwn'),
inject=None):
super(CreateLunTask, self).__init__(name=name,
provides=provides,
inject=inject)
if provides and not isinstance(provides, tuple):
raise ValueError('Only tuple is allowed for [provides].')
def execute(self, client, pool_name, lun_name, lun_size,
provision, tier, ignore_thresholds=False,
*args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
lun = client.create_lun(pool=pool_name,
name=lun_name,
size=lun_size,
provision=provision,
tier=tier,
ignore_thresholds=ignore_thresholds)
return lun.lun_id, lun.wwn
def revert(self, result, client, lun_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
if isinstance(result, failure.Failure):
return
else:
LOG.warning(_LW('%(method_name)s: delete lun %(lun_name)s'),
{'method_name': method_name, 'lun_name': lun_name})
client.delete_lun(lun_name)
class CopySnapshotTask(task.Task):
"""Task to copy a volume snapshot/consistency group snapshot.
Reversion Strategy: Delete the copied snapshot/cgsnapshot
"""
def execute(self, client, snap_name, new_snap_name,
*args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
client.copy_snapshot(snap_name,
new_snap_name)
def revert(self, result, client, snap_name, new_snap_name,
*args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: delete the '
'copied snapshot %(new_name)s of '
'%(source_name)s.'),
{'method_name': method_name,
'new_name': new_snap_name,
'source_name': snap_name})
client.delete_snapshot(new_snap_name)
class CreateSMPTask(task.Task):
"""Creates a snap mount point (SMP) for the source snapshot.
Reversion strategy: Delete the SMP.
"""
def __init__(self, name=None, provides='smp_id', inject=None):
super(CreateSMPTask, self).__init__(name=name,
provides=provides,
inject=inject)
def execute(self, client, smp_name, base_lun_name,
*args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
client.create_mount_point(base_lun_name, smp_name)
lun = client.get_lun(name=smp_name)
return lun.lun_id
def revert(self, result, client, smp_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: delete mount point %(name)s'),
{'method_name': method_name,
'name': smp_name})
client.delete_lun(smp_name)
class AttachSnapTask(task.Task):
"""Attaches the snapshot to the SMP created before.
Reversion strategy: Detach the SMP.
"""
def execute(self, client, smp_name, snap_name,
*args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
client.attach_snapshot(smp_name, snap_name)
def revert(self, result, client, smp_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: detach mount point %(smp_name)s'),
{'method_name': method_name,
'smp_name': smp_name})
client.detach_snapshot(smp_name)
class CreateSnapshotTask(task.Task):
"""Creates a snapshot of a volume.
Reversion Strategy: Delete the created snapshot.
"""
def execute(self, client, snap_name, lun_id, *args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
LOG.info(_LI('Create snapshot: %(snapshot)s: lun: %(lun)s'),
{'snapshot': snap_name,
'lun': lun_id})
client.create_snapshot(lun_id, snap_name)
def revert(self, result, client, snap_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: '
'delete temp snapshot %(snap_name)s'),
{'method_name': method_name,
'snap_name': snap_name})
client.delete_snapshot(snap_name)
class AllowReadWriteTask(task.Task):
"""Task to modify a Snapshot to allow ReadWrite on it."""
def execute(self, client, snap_name, *args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
client.modify_snapshot(snap_name, allow_rw=True)
def revert(self, result, client, snap_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: '
'setting snapshot %(snap_name)s to read-only.'),
{'method_name': method_name,
'snap_name': snap_name})
client.modify_snapshot(snap_name, allow_rw=False)
class WaitMigrationsTask(task.Task):
"""Task to wait migrations to be completed."""
def __init__(self, src_id_template, dst_id_template,
dst_wwn_template, num_of_members, *args, **kwargs):
self.migrate_tuples = [
(src_id_template % x, dst_id_template % x, dst_wwn_template % x)
for x in range(num_of_members)]
src_id_keys = sorted(set(
[src_id_template % i for i in range(num_of_members)]))
dst_id_keys = sorted(set(
[dst_id_template % i for i in range(num_of_members)]))
dst_wwn_keys = sorted(set(
[dst_wwn_template % i for i in range(num_of_members)]))
super(WaitMigrationsTask, self).__init__(
requires=(src_id_keys + dst_id_keys + dst_wwn_keys),
*args, **kwargs)
def execute(self, client, *args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
for src_id_key, dst_id_key, dst_wwn_key in self.migrate_tuples:
src_id = kwargs[src_id_key]
dst_id = kwargs[dst_id_key]
dst_wwn = kwargs[dst_wwn_key]
migrated = client.verify_migration(src_id,
dst_id,
dst_wwn)
if not migrated:
msg = _("Failed to migrate volume %(src)s.") % {'src': src_id}
raise exception.VolumeBackendAPIException(data=msg)
class CreateConsistencyGroupTask(task.Task):
"""Task to create a consistency group."""
def __init__(self, lun_id_key_template, num_of_members,
*args, **kwargs):
self.lun_id_keys = sorted(set(
[lun_id_key_template % i for i in range(num_of_members)]))
super(CreateConsistencyGroupTask, self).__init__(
requires=self.lun_id_keys, *args, **kwargs)
def execute(self, client, new_cg_name, *args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
lun_ids = [kwargs[key] for key in self.lun_id_keys]
client.create_consistency_group(new_cg_name,
lun_ids)
class CreateCGSnapshotTask(task.Task):
"""Task to create a CG snapshot."""
def __init__(self, provides='new_cg_snap_name', *args, **kwargs):
super(CreateCGSnapshotTask, self).__init__(
provides=provides, *args, **kwargs)
def execute(self, client, cg_snap_name, cg_name, *args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
return client.create_cg_snapshot(cg_snap_name, cg_name)
def revert(self, client, cg_snap_name, cg_name, *args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method_name)s: '
'deleting CG snapshot %(snap_name)s.'),
{'method_name': method_name,
'snap_name': cg_snap_name})
client.delete_cg_snapshot(cg_snap_name)
class CreateMirrorTask(task.Task):
"""Creates a MirrorView with primary lun for replication.
Reversion strategy: Destroy the created MirrorView.
"""
def execute(self, mirror, mirror_name, primary_lun_id,
*args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
mirror.create_mirror(mirror_name, primary_lun_id)
def revert(self, result, mirror, mirror_name,
*args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method)s: removing mirror '
'view %(name)s.'),
{'method': method_name,
'name': mirror_name})
mirror.delete_mirror(mirror_name)
class AddMirrorImageTask(task.Task):
"""Add the secondary image to MirrorView.
Reversion strategy: Remove the secondary image.
"""
def execute(self, mirror, mirror_name, secondary_lun_id,
*args, **kwargs):
LOG.debug('%s.execute', self.__class__.__name__)
mirror.add_image(mirror_name, secondary_lun_id)
def revert(self, result, mirror, mirror_name,
*args, **kwargs):
method_name = '%s.revert' % self.__class__.__name__
LOG.warning(_LW('%(method)s: removing secondary image '
'from %(name)s.'),
{'method': method_name,
'name': mirror_name})
mirror.remove_image(mirror_name)
def run_migration_taskflow(client,
lun_id,
lun_name,
lun_size,
pool_name,
provision,
tier,
rate=const.MIGRATION_RATE_HIGH):
# Step 1: create target LUN
# Step 2: start and migrate migration session
tmp_lun_name = utils.construct_tmp_lun_name(lun_name)
flow_name = 'migrate_lun'
store_spec = {'client': client,
'pool_name': pool_name,
'lun_name': tmp_lun_name,
'lun_size': lun_size,
'provision': provision,
'tier': tier,
'ignore_thresholds': True,
'src_id': lun_id,
}
work_flow = linear_flow.Flow(flow_name)
work_flow.add(CreateLunTask(),
MigrateLunTask(rebind={'dst_id': 'new_lun_id'}))
engine = taskflow.engines.load(
work_flow, store=store_spec)
engine.run()
def fast_create_volume_from_snapshot(client,
snap_name,
new_snap_name,
lun_name,
base_lun_name,
pool_name):
# Step 1: copy snapshot
# Step 2: allow read/write for snapshot
# Step 3: create smp LUN
# Step 4: attach the snapshot
flow_name = 'create_snapcopy_volume_from_snapshot'
store_spec = {'client': client,
'snap_name': snap_name,
'new_snap_name': new_snap_name,
'pool_name': pool_name,
'smp_name': lun_name,
'base_lun_name': base_lun_name,
'ignore_thresholds': True,
}
work_flow = linear_flow.Flow(flow_name)
work_flow.add(CopySnapshotTask(),
AllowReadWriteTask(rebind={'snap_name': 'new_snap_name'}),
CreateSMPTask(),
AttachSnapTask(rebind={'snap_name': 'new_snap_name'}))
engine = taskflow.engines.load(
work_flow, store=store_spec)
engine.run()
lun_id = engine.storage.fetch('smp_id')
return lun_id
def create_volume_from_snapshot(client, snap_name, lun_name,
lun_size, base_lun_name, pool_name,
provision, tier):
# Step 1: create smp from base lun
# Step 2: attach snapshot to smp
# Step 3: Create new LUN
# Step 4: migrate the smp to new LUN
tmp_lun_name = '%s_dest' % lun_name
flow_name = 'create_volume_from_snapshot'
store_spec = {'client': client,
'snap_name': snap_name,
'smp_name': lun_name,
'lun_name': tmp_lun_name,
'lun_size': lun_size,
'base_lun_name': base_lun_name,
'pool_name': pool_name,
'provision': provision,
'tier': tier,
}
work_flow = linear_flow.Flow(flow_name)
work_flow.add(CreateSMPTask(),
AttachSnapTask(),
CreateLunTask(),
MigrateLunTask(
rebind={'src_id': 'smp_id',
'dst_id': 'new_lun_id'}))
engine = taskflow.engines.load(
work_flow, store=store_spec)
engine.run()
lun_id = engine.storage.fetch('smp_id')
return lun_id
def fast_create_cloned_volume(client, snap_name, lun_id,
lun_name, base_lun_name):
flow_name = 'create_cloned_snapcopy_volume'
store_spec = {
'client': client,
'snap_name': snap_name,
'lun_id': lun_id,
'smp_name': lun_name,
'base_lun_name': base_lun_name}
work_flow = linear_flow.Flow(flow_name)
work_flow.add(CreateSnapshotTask(),
CreateSMPTask(),
AttachSnapTask())
engine = taskflow.engines.load(work_flow, store=store_spec)
engine.run()
lun_id = engine.storage.fetch('smp_id')
return lun_id
def create_cloned_volume(client, snap_name, lun_id, lun_name,
lun_size, base_lun_name, pool_name,
provision, tier):
tmp_lun_name = '%s_dest' % lun_name
flow_name = 'create_cloned_volume'
store_spec = {'client': client,
'snap_name': snap_name,
'lun_id': lun_id,
'smp_name': lun_name,
'lun_name': tmp_lun_name,
'lun_size': lun_size,
'base_lun_name': base_lun_name,
'pool_name': pool_name,
'provision': provision,
'tier': tier,
}
work_flow = linear_flow.Flow(flow_name)
work_flow.add(
CreateSnapshotTask(),
CreateSMPTask(),
AttachSnapTask(),
CreateLunTask(),
MigrateLunTask(
rebind={'src_id': 'smp_id', 'dst_id': 'new_lun_id'}))
engine = taskflow.engines.load(
work_flow, store=store_spec)
engine.run()
lun_id = engine.storage.fetch('smp_id')
return lun_id
def create_cg_from_cg_snapshot(client, cg_name, src_cg_name,
cg_snap_name, src_cg_snap_name,
pool_name, lun_sizes, lun_names,
src_lun_names, specs_list, copy_snap=True):
prepare_tasks = []
store_spec = {}
if copy_snap:
flow_name = 'create_cg_from_cg_snapshot'
temp_cg_snap = utils.construct_tmp_cg_snap_name(cg_name)
snap_name = temp_cg_snap
store_spec.update({'snap_name': src_cg_snap_name,
'new_snap_name': snap_name})
prepare_tasks.append(
CopySnapshotTask())
prepare_tasks.append(
AllowReadWriteTask(rebind={'snap_name': 'new_snap_name'}))
else:
flow_name = 'create_cg_from_cg'
snap_name = cg_snap_name
store_spec.update({'cg_name': src_cg_name,
'cg_snap_name': snap_name})
prepare_tasks.append(CreateCGSnapshotTask())
work_flow = linear_flow.Flow(flow_name)
work_flow.add(*prepare_tasks)
new_src_id_template = 'new_src_id_%s'
new_dst_id_template = 'new_dst_id_%s'
new_dst_wwn_template = 'new_dst_wwn_%s'
common_store_spec = {
'client': client,
'pool_name': pool_name,
'ignore_thresholds': True,
'new_cg_name': cg_name
}
store_spec.update(common_store_spec)
# Create LUNs for CG
for i, lun_name in enumerate(lun_names):
sub_store_spec = {
'lun_name': utils.construct_tmp_lun_name(lun_name),
'lun_size': lun_sizes[i],
'provision': specs_list[i].provision,
'tier': specs_list[i].tier,
'base_lun_name': src_lun_names[i],
'smp_name': lun_name,
'snap_name': snap_name,
}
work_flow.add(CreateSMPTask(name="CreateSMPTask_%s" % i,
inject=sub_store_spec,
provides=new_src_id_template % i),
AttachSnapTask(name="AttachSnapTask_%s" % i,
inject=sub_store_spec),
CreateLunTask(name="CreateLunTask_%s" % i,
inject=sub_store_spec,
provides=(new_dst_id_template % i,
new_dst_wwn_template % i)),
MigrateLunTask(
name="MigrateLunTask_%s" % i,
inject=sub_store_spec,
rebind={'src_id': new_src_id_template % i,
'dst_id': new_dst_id_template % i},
wait_for_completion=False))
# Wait all migration session finished
work_flow.add(WaitMigrationsTask(new_src_id_template,
new_dst_id_template,
new_dst_wwn_template,
len(lun_names)),
CreateConsistencyGroupTask(new_src_id_template,
len(lun_names)))
engine = taskflow.engines.load(work_flow, store=store_spec)
engine.run()
# Fetch all created LUNs and add them into CG
lun_id_list = []
for i, lun_name in enumerate(lun_names):
lun_id = engine.storage.fetch(new_src_id_template % i)
lun_id_list.append(lun_id)
client.delete_cg_snapshot(snap_name)
return lun_id_list
def create_cloned_cg(client, cg_name, src_cg_name,
pool_name, lun_sizes, lun_names,
src_lun_names, specs_list):
cg_snap_name = utils.construct_tmp_cg_snap_name(cg_name)
return create_cg_from_cg_snapshot(
client, cg_name, src_cg_name,
cg_snap_name, None,
pool_name, lun_sizes, lun_names,
src_lun_names, specs_list, copy_snap=False)
def create_mirror_view(mirror_view, mirror_name,
primary_lun_id, pool_name,
lun_name, lun_size, provision, tier):
flow_name = 'create_mirror_view'
store_specs = {
'mirror': mirror_view,
'mirror_name': mirror_name,
'primary_lun_id': primary_lun_id,
'pool_name': pool_name,
'lun_name': lun_name,
'lun_size': lun_size,
'provision': provision,
'tier': tier,
'ignore_thresholds': True
}
# NOTE: should create LUN on secondary device/array
work_flow = linear_flow.Flow(flow_name)
work_flow.add(CreateMirrorTask(),
CreateLunTask(
name='CreateSecondaryLunTask',
provides=('secondary_lun_id', 'secondary_lun_wwn'),
inject={'client': mirror_view.secondary_client}),
AddMirrorImageTask())
engine = taskflow.engines.load(work_flow, store=store_specs)
engine.run()

View File

@ -0,0 +1,339 @@
# Copyright (c) 2016 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import importutils
storops = importutils.try_import('storops')
from cinder import exception
from cinder.i18n import _, _LW
from cinder.volume.drivers.emc.vnx import common
from cinder.volume.drivers.san.san import san_opts
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
def init_ops(configuration):
configuration.append_config_values(common.EMC_VNX_OPTS)
configuration.append_config_values(san_opts)
def get_metadata(volume):
# Since versionedobjects is partially merged, metadata
# may come from 'volume_metadata' or 'metadata', here
# we need to take care both of them.
volume_metadata = {}
if 'volume_metadata' in volume:
for metadata in volume['volume_metadata']:
volume_metadata[metadata['key']] = metadata['value']
return volume_metadata
return volume['metadata'] if 'metadata' in volume else {}
def dump_provider_location(location_dict):
return '|'.join([k + '^' + v for k, v in location_dict.items()])
def build_provider_location(system, lun_type, lun_id, base_lun_name, version):
"""Builds provider_location for volume or snapshot.
:param system: VNX serial number
:param lun_id: LUN ID in VNX
:param lun_type: 'lun' or 'smp'
:param base_lun_name: primary LUN name,
it will be used when creating snap lun
:param version: driver version
"""
location_dict = {'system': system,
'type': lun_type,
'id': six.text_type(lun_id),
'base_lun_name': six.text_type(base_lun_name),
'version': version}
return dump_provider_location(location_dict)
def extract_provider_location(provider_location, key):
"""Extracts value of the specified field from provider_location string.
:param provider_location: provider_location string
:param key: field name of the value that to be extracted
:return: value of the specified field if it exists, otherwise,
None is returned
"""
if not provider_location:
return None
kvps = provider_location.split('|')
for kvp in kvps:
fields = kvp.split('^')
if len(fields) == 2 and fields[0] == key:
return fields[1]
def update_provider_location(provider_location, items):
"""Updates provider_location with new dict items.
:param provider_location: volume's provider_location.
:param items: dict items for updating.
"""
location_dict = {tp.split('^')[0]: tp.split('^')[1]
for tp in provider_location.split('|')}
for key, value in items.items():
location_dict[key] = value
return dump_provider_location(location_dict)
def get_pool_from_host(host):
return vol_utils.extract_host(host, 'pool')
def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC,
reraise_arbiter=lambda ex: True, *args, **kwargs):
start_time = time.time()
if not timeout:
timeout = common.DEFAULT_TIMEOUT
def _inner():
try:
test_value = condition(*args, **kwargs)
except Exception as ex:
test_value = False
with excutils.save_and_reraise_exception(
reraise=reraise_arbiter(ex)):
LOG.debug('Exception raised when executing %(condition_name)s'
'in wait_until. Message: %(msg)s',
{'condition_name': condition.__name__,
'msg': ex.message})
if test_value:
raise loopingcall.LoopingCallDone()
if int(time.time()) - start_time > timeout:
msg = (_('Timeout waiting for %(condition_name)s in wait_until.')
% {'condition_name': condition.__name__})
LOG.error(msg)
raise common.WaitUtilTimeoutException(msg)
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=interval).wait()
def validate_storage_migration(volume, target_host, src_serial, src_protocol):
if 'location_info' not in target_host['capabilities']:
LOG.warning(_LW("Failed to get pool name and "
"serial number. 'location_info' "
"from %s."), target_host['host'])
return False
info = target_host['capabilities']['location_info']
LOG.debug("Host for migration is %s.", info)
try:
serial_number = info.split('|')[1]
except AttributeError:
LOG.warning(_LW('Error on getting serial number '
'from %s.'), target_host['host'])
return False
if serial_number != src_serial:
LOG.debug('Skip storage-assisted migration because '
'target and source backend are not managing'
'the same array.')
return False
if (target_host['capabilities']['storage_protocol'] != src_protocol
and get_original_status(volume) == 'in-use'):
LOG.debug('Skip storage-assisted migration because '
'in-use volume can not be '
'migrate between different protocols.')
return False
return True
def retype_need_migration(volume, old_provision, new_provision, host):
if volume['host'] != host['host']:
return True
lun_type = extract_provider_location(volume['provider_location'], 'type')
if lun_type == 'smp':
return True
if old_provision != new_provision:
if retype_need_turn_on_compression(old_provision, new_provision):
return False
else:
return True
return False
def retype_need_turn_on_compression(old_provision, new_provision):
return (old_provision in [storops.VNXProvisionEnum.THIN,
storops.VNXProvisionEnum.THICK]
and new_provision == storops.VNXProvisionEnum.COMPRESSED)
def retype_need_change_tier(old_tier, new_tier):
return new_tier is not None and old_tier != new_tier
def get_original_status(volume):
if not volume['volume_attachment']:
return 'available'
else:
return 'in-use'
def construct_snap_name(volume):
"""Return snapshot name."""
if snapcopy_enabled(volume):
return 'snap-as-vol-' + six.text_type(volume.name_id)
else:
return 'tmp-snap-' + six.text_type(volume.name_id)
def construct_mirror_name(volume):
"""Constructs MirrorView name for volume."""
return 'mirror_' + six.text_type(volume.id)
def construct_tmp_cg_snap_name(cg_name):
"""Return CG snapshot name."""
return 'tmp-snap-' + six.text_type(cg_name)
def construct_tmp_lun_name(lun_name):
"""Constructs a time-based temporary LUN name."""
return '%(src)s-%(ts)s' % {'src': lun_name,
'ts': int(time.time())}
def construct_smp_name(snap_id):
return 'tmp-smp-' + six.text_type(snap_id)
def snapcopy_enabled(volume):
meta = get_metadata(volume)
return 'snapcopy' in meta and meta['snapcopy'].lower() == 'true'
def get_migration_rate(volume):
metadata = get_metadata(volume)
rate = metadata.get('migrate_rate', None)
if rate:
if rate.lower() in storops.VNXMigrationRate.values():
return storops.VNXMigrationRate.parse(rate.lower())
else:
LOG.warning(_LW('Unknown migration rate specified, '
'using [high] as migration rate.'))
return storops.VNXMigrationRate.HIGH
def validate_cg_type(group):
if group.get('volume_type_id') is None:
return
for type_id in group['volume_type_id'].split(","):
if type_id:
specs = volume_types.get_volume_type_extra_specs(type_id)
extra_specs = common.ExtraSpecs(specs)
if extra_specs.provision == storops.VNXProvisionEnum.COMPRESSED:
msg = _("Failed to create consistency group %s "
"because VNX consistency group cannot "
"accept compressed LUNs as members."
) % group['id']
raise exception.InvalidInput(reason=msg)
def update_res_without_poll(res):
with res.with_no_poll():
res.update()
def update_res_with_poll(res):
with res.with_poll():
res.update()
def get_base_lun_name(volume):
"""Returns base LUN name for LUN/snapcopy LUN."""
base_name = extract_provider_location(
volume.provider_location, 'base_lun_name')
if base_name is None or base_name == 'None':
return volume.name
return base_name
def sift_port_white_list(port_white_list, registered_io_ports):
"""Filters out the unregistered ports.
Goes through the `port_white_list`, and filters out the ones not
registered (that is not in `registered_io_ports`).
"""
valid_port_list = []
LOG.debug('Filter ports in [%(white)s}] but not in [%(reg_ports)s].',
{'white': ','.join(
[port.display_name for port in port_white_list]),
'reg_ports': ','.join(
[port.display_name for port in registered_io_ports])})
for io_port in port_white_list:
if io_port not in registered_io_ports:
LOG.debug('Skipped SP port %(port)s due to it is not registered. '
'The registered IO ports: %(reg_ports)s.',
{'port': io_port, 'reg_ports': registered_io_ports})
else:
valid_port_list.append(io_port)
return valid_port_list
def convert_to_tgt_list_and_itor_tgt_map(zone_mapping):
"""Function to process data from lookup service.
:param zone_mapping: mapping is the data from the zone lookup service
with below format
{
<San name>: {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'..)
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121'..)
}
}
"""
target_wwns = []
itor_tgt_map = {}
for san_name in zone_mapping:
one_map = zone_mapping[san_name]
for target in one_map['target_port_wwn_list']:
if target not in target_wwns:
target_wwns.append(target)
for initiator in one_map['initiator_port_wwn_list']:
itor_tgt_map[initiator] = one_map['target_port_wwn_list']
LOG.debug("target_wwns: %(tgt_wwns)s\n init_targ_map: %(itor_tgt_map)s",
{'tgt_wwns': target_wwns,
'itor_tgt_map': itor_tgt_map})
return target_wwns, itor_tgt_map
def truncate_fc_port_wwn(wwn):
return wwn.replace(':', '')[16:]
def is_volume_smp(volume):
return 'smp' == extract_provider_location(volume.provider_location, 'type')

View File

@ -0,0 +1,16 @@
---
features:
- Adds VNX new Cinder driver which is based on storops.
storops is a library released to pypi.
upgrade:
- For EMC VNX backends, please upgrade to use
'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver'.
new option
For FC driver, adds 'storage_protocol = fc' to driver section.
For iSCSI driver, adds 'storage_protocol = iscsi' to driver section.
deprecations:
- Old VNX FC(cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver)/
iSCSI(cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver) drivers
were deprecated. Please refer to upgrade section for information about
new driver.