VNX: Add async migration support
VNX cinder driver now supports async migration during volume cloning. By default, User can create cloned volume after the migration starts in the VNX instead of waiting for the completion of migration. this greatly accelerates the cloning process. If user wants to disable this, he could add '--metadata async_migrate=False' when creating volume from source volume/snapshot. DocImpact Closes-bug: #1657966 Change-Id: Idfe5d9d4043f84cad2ae2d4d31914d3ae573de50
This commit is contained in:
parent
152138b13b
commit
6ccfcafd45
@ -60,6 +60,10 @@ class VNXMigrationError(VNXException):
|
||||
pass
|
||||
|
||||
|
||||
class VNXLunNotMigratingError(VNXException):
|
||||
pass
|
||||
|
||||
|
||||
class VNXTargetNotReadyError(VNXMigrationError):
|
||||
message = 'The destination LUN is not available for migration'
|
||||
|
||||
@ -128,6 +132,10 @@ class VNXDeleteLunError(VNXLunError):
|
||||
pass
|
||||
|
||||
|
||||
class VNXLunUsedByFeatureError(VNXLunError):
|
||||
pass
|
||||
|
||||
|
||||
class VNXCompressionError(VNXLunError):
|
||||
pass
|
||||
|
||||
|
@ -0,0 +1,28 @@
|
||||
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class PQueue(object):
|
||||
|
||||
def __init__(self, path, interval=None):
|
||||
self.path = path
|
||||
self._interval = interval
|
||||
self.started = False
|
||||
|
||||
def put(self, item):
|
||||
return item
|
||||
|
||||
def start(self):
|
||||
self.started = True
|
@ -94,6 +94,11 @@ test_delete_volume_not_force: &test_delete_volume_not_force
|
||||
|
||||
test_delete_volume_force: *test_delete_volume_not_force
|
||||
|
||||
test_delete_async_volume:
|
||||
volume: *volume_base
|
||||
|
||||
test_delete_async_volume_migrating:
|
||||
volume: *volume_base
|
||||
|
||||
test_retype_need_migration_when_host_changed:
|
||||
volume: *volume_base
|
||||
|
@ -377,6 +377,17 @@ test_cleanup_migration:
|
||||
get_migration_session: *session_cancel
|
||||
get_lun: *lun_cancel_migrate
|
||||
|
||||
test_cleanup_migration_not_migrating:
|
||||
lun: &lun_cancel_migrate_not_migrating
|
||||
_methods:
|
||||
cancel_migrate:
|
||||
_raise:
|
||||
VNXLunNotMigratingError: The LUN is not migrating
|
||||
vnx:
|
||||
_methods:
|
||||
get_migration_session: *session_cancel
|
||||
get_lun: *lun_cancel_migrate_not_migrating
|
||||
|
||||
test_get_lun_by_name:
|
||||
lun: &lun_test_get_lun_by_name
|
||||
_properties:
|
||||
@ -463,6 +474,26 @@ test_delete_lun_exception:
|
||||
_methods:
|
||||
get_lun: *lun_test_delete_lun_exception
|
||||
|
||||
test_cleanup_async_lun:
|
||||
lun: &lun_test_cleanup_async_lun
|
||||
_properties:
|
||||
<<: *lun_base_prop
|
||||
name: lun_test_cleanup_async_lun
|
||||
is_snap_mount_point: True
|
||||
_methods:
|
||||
delete:
|
||||
cancel_migrate:
|
||||
snap: &snap_test_cleanup_async_lun
|
||||
_methods:
|
||||
delete:
|
||||
vnx:
|
||||
_properties:
|
||||
<<: *vnx_base_prop
|
||||
_methods:
|
||||
get_lun: *lun_test_cleanup_async_lun
|
||||
get_snap: *snap_test_cleanup_async_lun
|
||||
get_migration_session: *session_migrating
|
||||
|
||||
test_create_cg: &test_create_cg
|
||||
cg: &cg_for_create
|
||||
_properties:
|
||||
@ -1211,6 +1242,29 @@ test_append_volume_stats:
|
||||
test_delete_volume_not_force: *test_delete_lun
|
||||
test_delete_volume_force: *test_delete_lun
|
||||
|
||||
test_delete_async_volume:
|
||||
snap: &snap_test_delete_async_volume
|
||||
_methods:
|
||||
delete:
|
||||
vnx:
|
||||
_methods:
|
||||
get_lun: *lun_test_delete_lun
|
||||
get_snap: *snap_test_delete_async_volume
|
||||
|
||||
test_delete_async_volume_migrating:
|
||||
lun: &lun_used_by_feature
|
||||
_properties:
|
||||
is_snap_mount_point: false
|
||||
_methods:
|
||||
cancel_migrate:
|
||||
delete:
|
||||
_raise:
|
||||
VNXLunUsedByFeatureError:
|
||||
vnx:
|
||||
_methods:
|
||||
get_lun: *lun_used_by_feature
|
||||
get_snap: *snap_test_delete_async_volume
|
||||
|
||||
test_enable_compression:
|
||||
lun:
|
||||
_properties:
|
||||
|
@ -363,7 +363,8 @@ def _build_client():
|
||||
password='sysadmin',
|
||||
scope='global',
|
||||
naviseccli=None,
|
||||
sec_file=None)
|
||||
sec_file=None,
|
||||
queue_path='vnx-cinder')
|
||||
|
||||
|
||||
def patch_client(func):
|
||||
|
@ -120,6 +120,7 @@ class TestCommonAdapter(test.TestCase):
|
||||
def test_create_volume_from_snapshot(
|
||||
self, vnx_common, mocked, cinder_input):
|
||||
volume = cinder_input['volume']
|
||||
volume['metadata'] = {'async_migrate': 'False'}
|
||||
snapshot = cinder_input['snapshot']
|
||||
snapshot.volume = volume
|
||||
update = vnx_common.create_volume_from_snapshot(volume, snapshot)
|
||||
@ -293,7 +294,9 @@ class TestCommonAdapter(test.TestCase):
|
||||
@res_mock.patch_common_adapter
|
||||
def test_delete_volume_not_force(self, vnx_common, mocked, mocked_input):
|
||||
vnx_common.force_delete_lun_in_sg = False
|
||||
vnx_common.delete_volume(mocked_input['volume'])
|
||||
volume = mocked_input['volume']
|
||||
volume['metadata'] = {'async_migrate': 'False'}
|
||||
vnx_common.delete_volume(volume)
|
||||
lun = vnx_common.client.vnx.get_lun()
|
||||
lun.delete.assert_called_with(force_detach=True, detach_from_sg=False)
|
||||
|
||||
@ -301,7 +304,32 @@ class TestCommonAdapter(test.TestCase):
|
||||
@res_mock.patch_common_adapter
|
||||
def test_delete_volume_force(self, vnx_common, mocked, mocked_input):
|
||||
vnx_common.force_delete_lun_in_sg = True
|
||||
vnx_common.delete_volume(mocked_input['volume'])
|
||||
volume = mocked_input['volume']
|
||||
volume['metadata'] = {'async_migrate': 'False'}
|
||||
vnx_common.delete_volume(volume)
|
||||
lun = vnx_common.client.vnx.get_lun()
|
||||
lun.delete.assert_called_with(force_detach=True, detach_from_sg=True)
|
||||
|
||||
@res_mock.mock_driver_input
|
||||
@res_mock.patch_common_adapter
|
||||
def test_delete_async_volume(self, vnx_common, mocked, mocked_input):
|
||||
volume = mocked_input['volume']
|
||||
volume.metadata = {'async_migrate': 'True'}
|
||||
vnx_common.force_delete_lun_in_sg = True
|
||||
vnx_common.delete_volume(volume)
|
||||
lun = vnx_common.client.vnx.get_lun()
|
||||
lun.delete.assert_called_with(force_detach=True, detach_from_sg=True)
|
||||
|
||||
@res_mock.mock_driver_input
|
||||
@res_mock.patch_common_adapter
|
||||
def test_delete_async_volume_migrating(self, vnx_common, mocked,
|
||||
mocked_input):
|
||||
|
||||
volume = mocked_input['volume']
|
||||
volume.metadata = {'async_migrate': 'True'}
|
||||
vnx_common.force_delete_lun_in_sg = True
|
||||
vnx_common.client.cleanup_async_lun = mock.Mock()
|
||||
vnx_common.delete_volume(volume)
|
||||
lun = vnx_common.client.vnx.get_lun()
|
||||
lun.delete.assert_called_with(force_detach=True, detach_from_sg=True)
|
||||
|
||||
|
@ -159,6 +159,10 @@ class TestClient(test.TestCase):
|
||||
def test_cleanup_migration(self, client, mocked):
|
||||
client.cleanup_migration(1, 2)
|
||||
|
||||
@res_mock.patch_client
|
||||
def test_cleanup_migration_not_migrating(self, client, mocked):
|
||||
client.cleanup_migration(1, 2)
|
||||
|
||||
@res_mock.patch_client
|
||||
def test_get_lun_by_name(self, client, mocked):
|
||||
lun = client.get_lun(name='lun_name_test_get_lun_by_name')
|
||||
@ -182,6 +186,12 @@ class TestClient(test.TestCase):
|
||||
'General lun delete error.',
|
||||
client.delete_lun, mocked['lun'].name)
|
||||
|
||||
@res_mock.patch_client
|
||||
def test_cleanup_async_lun(self, client, mocked):
|
||||
client.cleanup_async_lun(
|
||||
mocked['lun'].name,
|
||||
force=True)
|
||||
|
||||
@res_mock.patch_client
|
||||
def test_enable_compression(self, client, mocked):
|
||||
lun_obj = mocked['lun']
|
||||
@ -261,7 +271,8 @@ class TestClient(test.TestCase):
|
||||
lun = client.vnx.get_lun()
|
||||
lun.create_snap.assert_called_once_with('snap_test_create_snapshot',
|
||||
allow_rw=True,
|
||||
auto_delete=False)
|
||||
auto_delete=False,
|
||||
keep_for=None)
|
||||
|
||||
@res_mock.patch_client
|
||||
def test_create_snapshot_snap_name_exist_error(self, client, _ignore):
|
||||
|
@ -137,7 +137,7 @@ class TestTaskflow(test.TestCase):
|
||||
'client': client,
|
||||
'snap_name': 'snap_name'
|
||||
}
|
||||
self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
|
||||
self.work_flow.add(vnx_taskflow.ModifySnapshotTask())
|
||||
engine = taskflow.engines.load(self.work_flow,
|
||||
store=store_spec)
|
||||
engine.run()
|
||||
@ -148,7 +148,7 @@ class TestTaskflow(test.TestCase):
|
||||
'client': client,
|
||||
'snap_name': 'snap_name'
|
||||
}
|
||||
self.work_flow.add(vnx_taskflow.AllowReadWriteTask())
|
||||
self.work_flow.add(vnx_taskflow.ModifySnapshotTask())
|
||||
engine = taskflow.engines.load(self.work_flow,
|
||||
store=store_spec)
|
||||
self.assertRaises(vnx_ex.VNXSnapError,
|
||||
|
@ -59,6 +59,7 @@ class CommonAdapter(object):
|
||||
self.reserved_percentage = None
|
||||
self.destroy_empty_sg = None
|
||||
self.itor_auto_dereg = None
|
||||
self.queue_path = None
|
||||
|
||||
def do_setup(self):
|
||||
self._normalize_config()
|
||||
@ -68,7 +69,8 @@ class CommonAdapter(object):
|
||||
self.config.san_password,
|
||||
self.config.storage_vnx_authentication_type,
|
||||
self.config.naviseccli_path,
|
||||
self.config.storage_vnx_security_file_dir)
|
||||
self.config.storage_vnx_security_file_dir,
|
||||
self.queue_path)
|
||||
# Replication related
|
||||
self.mirror_view = self.build_mirror_view(self.config, True)
|
||||
self.serial_number = self.client.get_serial()
|
||||
@ -86,6 +88,9 @@ class CommonAdapter(object):
|
||||
self.set_extra_spec_defaults()
|
||||
|
||||
def _normalize_config(self):
|
||||
self.queue_path = (
|
||||
self.config.config_group if self.config.config_group
|
||||
else 'DEFAULT')
|
||||
# Check option `naviseccli_path`.
|
||||
# Set to None (then pass to storops) if it is not set or set to an
|
||||
# empty string.
|
||||
@ -301,7 +306,7 @@ class CommonAdapter(object):
|
||||
tier = specs.tier
|
||||
base_lun_name = utils.get_base_lun_name(snapshot.volume)
|
||||
rep_update = dict()
|
||||
if utils.snapcopy_enabled(volume):
|
||||
if utils.is_snapcopy_enabled(volume):
|
||||
new_lun_id = emc_taskflow.fast_create_volume_from_snapshot(
|
||||
client=self.client,
|
||||
snap_name=snapshot.name,
|
||||
@ -309,26 +314,34 @@ class CommonAdapter(object):
|
||||
lun_name=volume.name,
|
||||
base_lun_name=base_lun_name,
|
||||
pool_name=pool)
|
||||
|
||||
location = self._build_provider_location(
|
||||
lun_type='smp',
|
||||
lun_id=new_lun_id,
|
||||
base_lun_name=base_lun_name)
|
||||
volume_metadata['snapcopy'] = 'True'
|
||||
volume_metadata['async_migrate'] = 'False'
|
||||
else:
|
||||
async_migrate = utils.is_async_migrate_enabled(volume)
|
||||
new_snap_name = (
|
||||
utils.construct_snap_name(volume) if async_migrate else None)
|
||||
new_lun_id = emc_taskflow.create_volume_from_snapshot(
|
||||
client=self.client,
|
||||
snap_name=snapshot.name,
|
||||
src_snap_name=snapshot.name,
|
||||
lun_name=volume.name,
|
||||
lun_size=volume.size,
|
||||
base_lun_name=base_lun_name,
|
||||
pool_name=pool,
|
||||
provision=provision,
|
||||
tier=tier)
|
||||
tier=tier,
|
||||
new_snap_name=new_snap_name)
|
||||
|
||||
location = self._build_provider_location(
|
||||
lun_type='lun',
|
||||
lun_id=new_lun_id,
|
||||
base_lun_name=volume.name)
|
||||
volume_metadata['snapcopy'] = 'False'
|
||||
volume_metadata['async_migrate'] = six.text_type(async_migrate)
|
||||
rep_update = self.setup_lun_replication(volume, new_lun_id)
|
||||
|
||||
model_update = {'provider_location': location,
|
||||
@ -349,7 +362,7 @@ class CommonAdapter(object):
|
||||
source_lun_id = self.client.get_lun_id(src_vref)
|
||||
snap_name = utils.construct_snap_name(volume)
|
||||
rep_update = dict()
|
||||
if utils.snapcopy_enabled(volume):
|
||||
if utils.is_snapcopy_enabled(volume):
|
||||
# snapcopy feature enabled
|
||||
new_lun_id = emc_taskflow.fast_create_cloned_volume(
|
||||
client=self.client,
|
||||
@ -362,7 +375,10 @@ class CommonAdapter(object):
|
||||
lun_type='smp',
|
||||
lun_id=new_lun_id,
|
||||
base_lun_name=base_lun_name)
|
||||
volume_metadata['snapcopy'] = 'True'
|
||||
volume_metadata['async_migrate'] = 'False'
|
||||
else:
|
||||
async_migrate = utils.is_async_migrate_enabled(volume)
|
||||
new_lun_id = emc_taskflow.create_cloned_volume(
|
||||
client=self.client,
|
||||
snap_name=snap_name,
|
||||
@ -372,14 +388,15 @@ class CommonAdapter(object):
|
||||
base_lun_name=base_lun_name,
|
||||
pool_name=pool,
|
||||
provision=provision,
|
||||
tier=tier)
|
||||
self.client.delete_snapshot(snap_name)
|
||||
tier=tier,
|
||||
async_migrate=async_migrate)
|
||||
# After migration, volume's base lun is itself
|
||||
location = self._build_provider_location(
|
||||
lun_type='lun',
|
||||
lun_id=new_lun_id,
|
||||
base_lun_name=volume.name)
|
||||
volume_metadata['snapcopy'] = 'False'
|
||||
volume_metadata['async_migrate'] = six.text_type(async_migrate)
|
||||
rep_update = self.setup_lun_replication(volume, new_lun_id)
|
||||
|
||||
model_update = {'provider_location': location,
|
||||
@ -720,8 +737,27 @@ class CommonAdapter(object):
|
||||
|
||||
def delete_volume(self, volume):
|
||||
"""Deletes an EMC volume."""
|
||||
async_migrate = utils.is_async_migrate_enabled(volume)
|
||||
self.cleanup_lun_replication(volume)
|
||||
self.client.delete_lun(volume.name, force=self.force_delete_lun_in_sg)
|
||||
try:
|
||||
self.client.delete_lun(volume.name,
|
||||
force=self.force_delete_lun_in_sg)
|
||||
except storops_ex.VNXLunUsedByFeatureError:
|
||||
# Case 1. Migration not finished, cleanup related stuff.
|
||||
if async_migrate:
|
||||
self.client.cleanup_async_lun(
|
||||
name=volume.name,
|
||||
force=self.force_delete_lun_in_sg)
|
||||
else:
|
||||
raise
|
||||
except (storops_ex.VNXLunHasSnapError,
|
||||
storops_ex.VNXLunHasSnapMountPointError):
|
||||
# Here, we assume no Cinder managed snaps, and add it to queue
|
||||
# for later deletion
|
||||
self.client.delay_delete_lun(volume.name)
|
||||
# Case 2. Migration already finished, delete temp snap if exists.
|
||||
if async_migrate:
|
||||
self.client.delete_snapshot(utils.construct_snap_name(volume))
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Extends an EMC volume."""
|
||||
|
@ -19,9 +19,10 @@ from oslo_utils import importutils
|
||||
storops = importutils.try_import('storops')
|
||||
if storops:
|
||||
from storops import exception as storops_ex
|
||||
from storops.lib import tasks as storops_tasks
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LW, _LE
|
||||
from cinder.i18n import _, _LW, _LE, _LI
|
||||
from cinder import utils as cinder_utils
|
||||
from cinder.volume.drivers.dell_emc.vnx import common
|
||||
from cinder.volume.drivers.dell_emc.vnx import const
|
||||
@ -79,7 +80,7 @@ class Condition(object):
|
||||
|
||||
class Client(object):
|
||||
def __init__(self, ip, username, password, scope,
|
||||
naviseccli, sec_file):
|
||||
naviseccli, sec_file, queue_path=None):
|
||||
self.naviseccli = naviseccli
|
||||
if not storops:
|
||||
msg = _('storops Python library is not installed.')
|
||||
@ -91,6 +92,10 @@ class Client(object):
|
||||
naviseccli=naviseccli,
|
||||
sec_file=sec_file)
|
||||
self.sg_cache = {}
|
||||
if queue_path:
|
||||
self.queue = storops_tasks.PQueue(path=queue_path)
|
||||
self.queue.start()
|
||||
LOG.info(_LI('PQueue[%s] starts now.'), queue_path)
|
||||
|
||||
def create_lun(self, pool, name, size, provision,
|
||||
tier, cg_id=None, ignore_thresholds=False):
|
||||
@ -138,10 +143,25 @@ class Client(object):
|
||||
if smp_attached_snap:
|
||||
smp_attached_snap.delete()
|
||||
except storops_ex.VNXLunNotFoundError as ex:
|
||||
LOG.warning(_LW("LUN %(name)s is already deleted. "
|
||||
"Message: %(msg)s"),
|
||||
{'name': name, 'msg': ex.message})
|
||||
pass # Ignore the failure that due to retry.
|
||||
LOG.info(_LI("LUN %(name)s is already deleted. This message can "
|
||||
"be safely ignored. Message: %(msg)s"),
|
||||
{'name': name, 'msg': ex.message})
|
||||
|
||||
def cleanup_async_lun(self, name, force=False):
|
||||
"""Helper method to cleanup stuff for async migration.
|
||||
|
||||
.. note::
|
||||
Only call it when VNXLunUsedByFeatureError occurs
|
||||
"""
|
||||
lun = self.get_lun(name=name)
|
||||
self.cleanup_migration(src_id=lun.lun_id)
|
||||
lun.delete(force_detach=True, detach_from_sg=force)
|
||||
|
||||
def delay_delete_lun(self, name):
|
||||
"""Delay the deletion by putting it in a storops queue."""
|
||||
self.queue.put(self.vnx.delete_lun, name=name)
|
||||
LOG.info(_LI("VNX object has been added to queue for later"
|
||||
" deletion: %s"), name)
|
||||
|
||||
@cinder_utils.retry(const.VNXLunPreparingError, retries=1,
|
||||
backoff_rate=1)
|
||||
@ -212,7 +232,7 @@ class Client(object):
|
||||
else:
|
||||
return False
|
||||
|
||||
def cleanup_migration(self, src_id, dst_id):
|
||||
def cleanup_migration(self, src_id, dst_id=None):
|
||||
"""Invoke when migration meets error.
|
||||
|
||||
:param src_id: source LUN id
|
||||
@ -227,14 +247,20 @@ class Client(object):
|
||||
'%(src_id)s -> %(dst_id)s.'),
|
||||
{'src_id': src_id,
|
||||
'dst_id': dst_id})
|
||||
src_lun.cancel_migrate()
|
||||
try:
|
||||
src_lun.cancel_migrate()
|
||||
except storops_ex.VNXLunNotMigratingError:
|
||||
LOG.info(_LI('The LUN is not migrating, this message can be'
|
||||
' safely ignored'))
|
||||
|
||||
def create_snapshot(self, lun_id, snap_name):
|
||||
def create_snapshot(self, lun_id, snap_name, keep_for=None):
|
||||
"""Creates a snapshot."""
|
||||
|
||||
lun = self.get_lun(lun_id=lun_id)
|
||||
try:
|
||||
lun.create_snap(snap_name, allow_rw=True, auto_delete=False)
|
||||
lun.create_snap(
|
||||
snap_name, allow_rw=True, auto_delete=False,
|
||||
keep_for=keep_for)
|
||||
except storops_ex.VNXSnapNameInUseError as ex:
|
||||
LOG.warning(_LW('Snapshot %(name)s already exists. '
|
||||
'Message: %(msg)s'),
|
||||
@ -292,9 +318,11 @@ class Client(object):
|
||||
"currently attached. Message: %(msg)s"),
|
||||
{'smp_name': smp_name, 'msg': ex.message})
|
||||
|
||||
def modify_snapshot(self, snap_name, allow_rw=None, auto_delete=None):
|
||||
def modify_snapshot(self, snap_name, allow_rw=None,
|
||||
auto_delete=None, keep_for=None):
|
||||
snap = self.vnx.get_snap(name=snap_name)
|
||||
snap.modify(allow_rw=allow_rw, auto_delete=auto_delete)
|
||||
snap.modify(allow_rw=allow_rw, auto_delete=auto_delete,
|
||||
keep_for=None)
|
||||
|
||||
def create_consistency_group(self, cg_name, lun_id_list=None):
|
||||
try:
|
||||
|
@ -38,6 +38,9 @@ INTERVAL_20_SEC = 20
|
||||
INTERVAL_30_SEC = 30
|
||||
INTERVAL_60_SEC = 60
|
||||
|
||||
SNAP_EXPIRATION_HOUR = '1h'
|
||||
|
||||
|
||||
VNX_OPTS = [
|
||||
cfg.StrOpt('storage_vnx_authentication_type',
|
||||
default='global',
|
||||
@ -160,7 +163,7 @@ class ExtraSpecs(object):
|
||||
value = enum_class.parse(value)
|
||||
except ValueError:
|
||||
reason = (_("The value %(value)s for key %(key)s in extra "
|
||||
"specs is invalid."),
|
||||
"specs is invalid.") %
|
||||
{'key': key, 'value': value})
|
||||
raise exception.InvalidVolumeType(reason=reason)
|
||||
return value
|
||||
|
@ -73,9 +73,10 @@ class VNXDriver(driver.TransferVD,
|
||||
Replication v2 support(managed)
|
||||
Configurable migration rate support
|
||||
8.0.0 - New VNX Cinder driver
|
||||
9.0.0 - Use asynchronous migration for cloning
|
||||
"""
|
||||
|
||||
VERSION = '08.00.00'
|
||||
VERSION = '09.00.00'
|
||||
VENDOR = 'Dell EMC'
|
||||
# ThirdPartySystems wiki page
|
||||
CI_WIKI_NAME = "EMC_VNX_CI"
|
||||
|
@ -24,6 +24,7 @@ from taskflow import task
|
||||
from taskflow.types import failure
|
||||
|
||||
from cinder import exception
|
||||
from cinder.volume.drivers.dell_emc.vnx import common
|
||||
from cinder.volume.drivers.dell_emc.vnx import const
|
||||
from cinder.volume.drivers.dell_emc.vnx import utils
|
||||
from cinder.i18n import _, _LI, _LW
|
||||
@ -37,19 +38,18 @@ class MigrateLunTask(task.Task):
|
||||
Reversion strategy: Cleanup the migration session
|
||||
"""
|
||||
def __init__(self, name=None, provides=None, inject=None,
|
||||
rebind=None, wait_for_completion=True):
|
||||
rebind=None):
|
||||
super(MigrateLunTask, self).__init__(name=name,
|
||||
provides=provides,
|
||||
inject=inject,
|
||||
rebind=rebind)
|
||||
self.wait_for_completion = wait_for_completion
|
||||
|
||||
def execute(self, client, src_id, dst_id, *args, **kwargs):
|
||||
def execute(self, client, src_id, dst_id, async_migrate, *args, **kwargs):
|
||||
LOG.debug('%s.execute', self.__class__.__name__)
|
||||
dst_lun = client.get_lun(lun_id=dst_id)
|
||||
dst_wwn = dst_lun.wwn
|
||||
client.migrate_lun(src_id, dst_id)
|
||||
if self.wait_for_completion:
|
||||
if not async_migrate:
|
||||
migrated = client.verify_migration(src_id, dst_id, dst_wwn)
|
||||
if not migrated:
|
||||
msg = _("Failed to migrate volume between source vol %(src)s"
|
||||
@ -175,12 +175,13 @@ class CreateSnapshotTask(task.Task):
|
||||
|
||||
Reversion Strategy: Delete the created snapshot.
|
||||
"""
|
||||
def execute(self, client, snap_name, lun_id, *args, **kwargs):
|
||||
def execute(self, client, snap_name, lun_id, keep_for=None,
|
||||
*args, **kwargs):
|
||||
LOG.debug('%s.execute', self.__class__.__name__)
|
||||
LOG.info(_LI('Create snapshot: %(snapshot)s: lun: %(lun)s'),
|
||||
{'snapshot': snap_name,
|
||||
'lun': lun_id})
|
||||
client.create_snapshot(lun_id, snap_name)
|
||||
client.create_snapshot(lun_id, snap_name, keep_for=keep_for)
|
||||
|
||||
def revert(self, result, client, snap_name, *args, **kwargs):
|
||||
method_name = '%s.revert' % self.__class__.__name__
|
||||
@ -191,11 +192,12 @@ class CreateSnapshotTask(task.Task):
|
||||
client.delete_snapshot(snap_name)
|
||||
|
||||
|
||||
class AllowReadWriteTask(task.Task):
|
||||
class ModifySnapshotTask(task.Task):
|
||||
"""Task to modify a Snapshot to allow ReadWrite on it."""
|
||||
def execute(self, client, snap_name, *args, **kwargs):
|
||||
def execute(self, client, snap_name, keep_for=None,
|
||||
*args, **kwargs):
|
||||
LOG.debug('%s.execute', self.__class__.__name__)
|
||||
client.modify_snapshot(snap_name, allow_rw=True)
|
||||
client.modify_snapshot(snap_name, allow_rw=True, keep_for=keep_for)
|
||||
|
||||
def revert(self, result, client, snap_name, *args, **kwargs):
|
||||
method_name = '%s.revert' % self.__class__.__name__
|
||||
@ -333,6 +335,7 @@ def run_migration_taskflow(client,
|
||||
'tier': tier,
|
||||
'ignore_thresholds': True,
|
||||
'src_id': lun_id,
|
||||
'async_migrate': False,
|
||||
}
|
||||
work_flow = linear_flow.Flow(flow_name)
|
||||
work_flow.add(CreateLunTask(),
|
||||
@ -364,7 +367,7 @@ def fast_create_volume_from_snapshot(client,
|
||||
}
|
||||
work_flow = linear_flow.Flow(flow_name)
|
||||
work_flow.add(CopySnapshotTask(),
|
||||
AllowReadWriteTask(rebind={'snap_name': 'new_snap_name'}),
|
||||
ModifySnapshotTask(rebind={'snap_name': 'new_snap_name'}),
|
||||
CreateSMPTask(),
|
||||
AttachSnapTask(rebind={'snap_name': 'new_snap_name'}))
|
||||
engine = taskflow.engines.load(
|
||||
@ -374,17 +377,19 @@ def fast_create_volume_from_snapshot(client,
|
||||
return lun_id
|
||||
|
||||
|
||||
def create_volume_from_snapshot(client, snap_name, lun_name,
|
||||
def create_volume_from_snapshot(client, src_snap_name, lun_name,
|
||||
lun_size, base_lun_name, pool_name,
|
||||
provision, tier):
|
||||
# Step 1: create smp from base lun
|
||||
# Step 2: attach snapshot to smp
|
||||
# Step 3: Create new LUN
|
||||
# Step 4: migrate the smp to new LUN
|
||||
provision, tier, new_snap_name=None):
|
||||
# Step 1: Copy and modify snap(only for async migrate)
|
||||
# Step 2: Create smp from base lun
|
||||
# Step 3: Attach snapshot to smp
|
||||
# Step 4: Create new LUN
|
||||
# Step 5: migrate the smp to new LUN
|
||||
tmp_lun_name = '%s_dest' % lun_name
|
||||
flow_name = 'create_volume_from_snapshot'
|
||||
store_spec = {'client': client,
|
||||
'snap_name': snap_name,
|
||||
'snap_name': src_snap_name,
|
||||
'new_snap_name': new_snap_name,
|
||||
'smp_name': lun_name,
|
||||
'lun_name': tmp_lun_name,
|
||||
'lun_size': lun_size,
|
||||
@ -392,10 +397,19 @@ def create_volume_from_snapshot(client, snap_name, lun_name,
|
||||
'pool_name': pool_name,
|
||||
'provision': provision,
|
||||
'tier': tier,
|
||||
'keep_for': (common.SNAP_EXPIRATION_HOUR
|
||||
if new_snap_name else None),
|
||||
'async_migrate': True if new_snap_name else False,
|
||||
}
|
||||
work_flow = linear_flow.Flow(flow_name)
|
||||
if new_snap_name:
|
||||
work_flow.add(CopySnapshotTask(),
|
||||
ModifySnapshotTask(
|
||||
rebind={'snap_name': 'new_snap_name'}))
|
||||
|
||||
work_flow.add(CreateSMPTask(),
|
||||
AttachSnapTask(),
|
||||
AttachSnapTask(rebind={'snap_name': 'new_snap_name'})
|
||||
if new_snap_name else AttachSnapTask(),
|
||||
CreateLunTask(),
|
||||
MigrateLunTask(
|
||||
rebind={'src_id': 'smp_id',
|
||||
@ -428,7 +442,7 @@ def fast_create_cloned_volume(client, snap_name, lun_id,
|
||||
|
||||
def create_cloned_volume(client, snap_name, lun_id, lun_name,
|
||||
lun_size, base_lun_name, pool_name,
|
||||
provision, tier):
|
||||
provision, tier, async_migrate=False):
|
||||
tmp_lun_name = '%s_dest' % lun_name
|
||||
flow_name = 'create_cloned_volume'
|
||||
store_spec = {'client': client,
|
||||
@ -441,6 +455,9 @@ def create_cloned_volume(client, snap_name, lun_id, lun_name,
|
||||
'pool_name': pool_name,
|
||||
'provision': provision,
|
||||
'tier': tier,
|
||||
'keep_for': (common.SNAP_EXPIRATION_HOUR if
|
||||
async_migrate else None),
|
||||
'async_migrate': async_migrate,
|
||||
}
|
||||
work_flow = linear_flow.Flow(flow_name)
|
||||
work_flow.add(
|
||||
@ -453,6 +470,8 @@ def create_cloned_volume(client, snap_name, lun_id, lun_name,
|
||||
engine = taskflow.engines.load(
|
||||
work_flow, store=store_spec)
|
||||
engine.run()
|
||||
if not async_migrate:
|
||||
client.delete_snapshot(snap_name)
|
||||
lun_id = engine.storage.fetch('smp_id')
|
||||
return lun_id
|
||||
|
||||
@ -473,7 +492,7 @@ def create_cg_from_cg_snapshot(client, cg_name, src_cg_name,
|
||||
prepare_tasks.append(
|
||||
CopySnapshotTask())
|
||||
prepare_tasks.append(
|
||||
AllowReadWriteTask(rebind={'snap_name': 'new_snap_name'}))
|
||||
ModifySnapshotTask(rebind={'snap_name': 'new_snap_name'}))
|
||||
else:
|
||||
flow_name = 'create_cg_from_cg'
|
||||
snap_name = cg_snap_name
|
||||
@ -505,6 +524,7 @@ def create_cg_from_cg_snapshot(client, cg_name, src_cg_name,
|
||||
'base_lun_name': src_lun_names[i],
|
||||
'smp_name': lun_name,
|
||||
'snap_name': snap_name,
|
||||
'async_migrate': True,
|
||||
}
|
||||
work_flow.add(CreateSMPTask(name="CreateSMPTask_%s" % i,
|
||||
inject=sub_store_spec,
|
||||
@ -519,8 +539,7 @@ def create_cg_from_cg_snapshot(client, cg_name, src_cg_name,
|
||||
name="MigrateLunTask_%s" % i,
|
||||
inject=sub_store_spec,
|
||||
rebind={'src_id': new_src_id_template % i,
|
||||
'dst_id': new_dst_id_template % i},
|
||||
wait_for_completion=False))
|
||||
'dst_id': new_dst_id_template % i}))
|
||||
|
||||
# Wait all migration session finished
|
||||
work_flow.add(WaitMigrationsTask(new_src_id_template,
|
||||
|
@ -201,7 +201,7 @@ def get_original_status(volume):
|
||||
|
||||
def construct_snap_name(volume):
|
||||
"""Return snapshot name."""
|
||||
if snapcopy_enabled(volume):
|
||||
if is_snapcopy_enabled(volume):
|
||||
return 'snap-as-vol-' + six.text_type(volume.name_id)
|
||||
else:
|
||||
return 'tmp-snap-' + six.text_type(volume.name_id)
|
||||
@ -227,11 +227,25 @@ def construct_smp_name(snap_id):
|
||||
return 'tmp-smp-' + six.text_type(snap_id)
|
||||
|
||||
|
||||
def snapcopy_enabled(volume):
|
||||
def is_snapcopy_enabled(volume):
|
||||
meta = get_metadata(volume)
|
||||
return 'snapcopy' in meta and meta['snapcopy'].lower() == 'true'
|
||||
|
||||
|
||||
def is_async_migrate_enabled(volume):
|
||||
extra_specs = common.ExtraSpecs.from_volume(volume)
|
||||
if extra_specs.is_replication_enabled:
|
||||
# For replication-enabled volume, we should not use the async-cloned
|
||||
# volume, or setup replication would fail with
|
||||
# VNXMirrorLunNotAvailableError
|
||||
return False
|
||||
meta = get_metadata(volume)
|
||||
if 'async_migrate' not in meta:
|
||||
# Asynchronous migration is the default behavior now
|
||||
return True
|
||||
return 'async_migrate' in meta and meta['async_migrate'].lower() == 'true'
|
||||
|
||||
|
||||
def get_migration_rate(volume):
|
||||
metadata = get_metadata(volume)
|
||||
rate = metadata.get('migrate_rate', None)
|
||||
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
features:
|
||||
- VNX cinder driver now supports async migration during volume cloning.
|
||||
By default, the cloned volume will be available after the migration starts
|
||||
in the VNX instead of waiting for the completion of migration. This greatly
|
||||
accelerates the cloning process.
|
||||
If user wants to disable this, he could add
|
||||
``--metadata async_migrate=False`` when creating volume from source
|
||||
volume/snapshot.
|
||||
|
Loading…
Reference in New Issue
Block a user