Clean up repl v1 volume creation from replication

There is still some cruft left over from the repl v1 implmentation
that should be removed.

Change-Id: Ia98fff46505a8474dbb28d3ffdd67e66f556eddc
This commit is contained in:
Sean McGinnis 2017-07-11 19:13:18 -05:00 committed by Sean McGinnis
parent 0ae42ac801
commit f7187d7fd5
18 changed files with 38 additions and 283 deletions

View File

@ -556,7 +556,8 @@ snapshot_id_2:
type: string
source_replica:
description: |
The UUID of the primary volume to clone.
The UUID of the replication volume to clone. This was for legacy
replication functionality and is no longer supported.
in: body
required: false
type: string

View File

@ -1523,7 +1523,8 @@ source_cgid:
type: string
source_replica:
description: |
The UUID of the primary volume to clone.
The UUID of the replication volume to clone. This was for legacy
replication functionality and is no longer supported.
in: body
required: false
type: string

View File

@ -10,7 +10,6 @@
"imageRef": null,
"volume_type": null,
"metadata": {},
"source_replica": null,
"consistencygroup_id": null
},
"OS-SCH-HNT:scheduler_hints": {

View File

@ -2033,7 +2033,8 @@ source_reference:
type: object
source_replica:
description: |
The UUID of the primary volume to clone.
The UUID of the replication volume to clone. This was for legacy
replication functionality and is no longer supported.
in: body
required: false
type: string

View File

@ -10,7 +10,6 @@
"imageRef": null,
"volume_type": null,
"metadata": {},
"source_replica": null,
"consistencygroup_id": null
},
"OS-SCH-HNT:scheduler_hints": {

View File

@ -179,6 +179,14 @@ class VolumeController(wsgi.Controller):
context = req.environ['cinder.context']
volume = body['volume']
# Check up front for legacy replication parameters to quick fail
source_replica = volume.get('source_replica')
if source_replica:
msg = _("Creating a volume from a replica source was part of the "
"replication v1 implementation which is no longer "
"available.")
raise exception.InvalidInput(reason=msg)
kwargs = {}
self.validate_name_and_description(volume)
@ -226,23 +234,6 @@ class VolumeController(wsgi.Controller):
else:
kwargs['source_volume'] = None
source_replica = volume.get('source_replica')
if source_replica is not None:
if not uuidutils.is_uuid_like(source_replica):
msg = _("Source replica ID '%s' must be a "
"valid UUID") % source_replica
raise exc.HTTPBadRequest(explanation=msg)
# Not found exception will be handled at the wsgi level
src_vol = self.volume_api.get_volume(context,
source_replica)
if src_vol['replication_status'] == 'disabled':
explanation = _('source volume id:%s is not'
' replicated') % source_replica
raise exc.HTTPBadRequest(explanation=explanation)
kwargs['source_replica'] = src_vol
else:
kwargs['source_replica'] = None
kwargs['group'] = None
kwargs['consistencygroup'] = None
consistencygroup_id = volume.get('consistencygroup_id')
@ -259,8 +250,6 @@ class VolumeController(wsgi.Controller):
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
elif size is None and kwargs['source_replica'] is not None:
size = kwargs['source_replica']['size']
LOG.info("Create volume of %s GB", size)

View File

@ -228,6 +228,14 @@ class VolumeController(volumes_v2.VolumeController):
kwargs = {}
self.validate_name_and_description(volume)
# Check up front for legacy replication parameters to quick fail
source_replica = volume.get('source_replica')
if source_replica:
msg = _("Creating a volume from a replica source was part of the "
"replication v1 implementation which is no longer "
"available.")
raise exception.InvalidInput(reason=msg)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in volume:
volume['display_name'] = volume.pop('name')
@ -272,23 +280,6 @@ class VolumeController(volumes_v2.VolumeController):
else:
kwargs['source_volume'] = None
source_replica = volume.get('source_replica')
if source_replica is not None:
if not uuidutils.is_uuid_like(source_replica):
msg = _("Source replica ID '%s' must be a "
"valid UUID") % source_replica
raise exc.HTTPBadRequest(explanation=msg)
# Not found exception will be handled at the wsgi level
src_vol = self.volume_api.get_volume(context,
source_replica)
if src_vol['replication_status'] == 'disabled':
explanation = _('source volume id:%s is not'
' replicated') % source_replica
raise exc.HTTPBadRequest(explanation=explanation)
kwargs['source_replica'] = src_vol
else:
kwargs['source_replica'] = None
kwargs['group'] = None
kwargs['consistencygroup'] = None
consistencygroup_id = volume.get('consistencygroup_id')
@ -311,8 +302,6 @@ class VolumeController(volumes_v2.VolumeController):
size = kwargs['snapshot']['volume_size']
elif size is None and kwargs['source_volume'] is not None:
size = kwargs['source_volume']['size']
elif size is None and kwargs['source_replica'] is not None:
size = kwargs['source_replica']['size']
LOG.info("Create volume of %s GB", size)

View File

@ -139,7 +139,6 @@ class VolumeApiTest(test.TestCase):
availability_zone=DEFAULT_AZ,
snapshot_id=None,
source_volid=None,
source_replica=None,
consistencygroup_id=None,
volume_type=None,
image_ref=None,
@ -151,7 +150,6 @@ class VolumeApiTest(test.TestCase):
"availability_zone": availability_zone,
"snapshot_id": snapshot_id,
"source_volid": source_volid,
"source_replica": source_replica,
"consistencygroup_id": consistencygroup_id,
"volume_type": volume_type,
"multiattach": multiattach,
@ -222,7 +220,6 @@ class VolumeApiTest(test.TestCase):
return {'metadata': None,
'snapshot': snapshot,
'source_volume': source_volume,
'source_replica': None,
'group': None,
'consistencygroup': None,
'availability_zone': availability_zone,
@ -340,8 +337,6 @@ class VolumeApiTest(test.TestCase):
@ddt.data({'source_volid': 1},
{'source_volid': []},
{'source_replica': 1},
{'source_replica': []},
{'consistencygroup_id': 1},
{'consistencygroup_id': []})
def test_volume_creation_fails_with_invalid_uuids(self, updated_uuids):
@ -353,42 +348,6 @@ class VolumeApiTest(test.TestCase):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
def test_volume_creation_fails_with_invalid_source_replica(self,
get_volume):
get_volume.side_effect = v2_fakes.fake_volume_get_notfound
source_replica = fake.VOLUME_ID
vol = self._vol_in_request_body(source_replica=source_replica)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 404 when source replica cannot be found.
self.assertRaises(exception.VolumeNotFound, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_replica)
@mock.patch.object(volume_api.API, 'get_volume', autospec=True)
def test_volume_creation_fails_with_invalid_source_replication_status(
self, get_volume):
get_volume.side_effect = v2_fakes.fake_volume_get
source_replica = '2f49aa3a-6aae-488d-8b99-a43271605af6'
vol = self._vol_in_request_body(source_replica=source_replica)
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
# Raise 400 when replication status is disabled.
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, body)
context = req.environ['cinder.context']
get_volume.assert_called_once_with(self.controller.volume_api,
context, source_replica)
@mock.patch.object(groupAPI.API, 'get', autospec=True)
def test_volume_creation_fails_with_invalid_consistency_group(self,
get_cg):

View File

@ -257,7 +257,6 @@ class VolumeApiTest(test.TestCase):
availability_zone=DEFAULT_AZ,
snapshot_id=None,
source_volid=None,
source_replica=None,
consistencygroup_id=None,
volume_type=None,
image_ref=None,
@ -269,7 +268,6 @@ class VolumeApiTest(test.TestCase):
"availability_zone": availability_zone,
"snapshot_id": snapshot_id,
"source_volid": source_volid,
"source_replica": source_replica,
"consistencygroup_id": consistencygroup_id,
"volume_type": volume_type,
"group_id": group_id,
@ -349,7 +347,6 @@ class VolumeApiTest(test.TestCase):
'metadata': None,
'snapshot': snapshot,
'source_volume': source_volume,
'source_replica': None,
'consistencygroup': None,
'availability_zone': availability_zone,
'scheduler_hints': None,
@ -440,8 +437,6 @@ class VolumeApiTest(test.TestCase):
@ddt.data({'source_volid': 1},
{'source_volid': []},
{'source_replica': 1},
{'source_replica': []},
{'consistencygroup_id': 1},
{'consistencygroup_id': []})
def test_volume_creation_fails_with_invalid_uuids(self, updated_uuids):

View File

@ -37,9 +37,6 @@ class ScaleIODriver(driver.ScaleIODriver):
def promote_replica(self, context, volume):
pass
def create_replica_test_volume(self, volume, src_vref):
pass
def unmanage(self, volume):
pass

View File

@ -27,8 +27,6 @@ class FakeVolumeAPI(object):
self.test_inst.assertEqual(request_spec['source_volid'], source_volid)
self.test_inst.assertEqual(request_spec['snapshot_id'], snapshot_id)
self.test_inst.assertEqual(request_spec['image_id'], image_id)
self.test_inst.assertEqual(request_spec['source_replicaid'],
source_replicaid)
class FakeSchedulerRpcAPI(object):

View File

@ -75,7 +75,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'source_volid': None,
'snapshot_id': None,
'image_id': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'group_id': None, }
@ -93,7 +92,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'source_volid': 2,
'snapshot_id': 3,
'image_id': 4,
'source_replicaid': 5,
'consistencygroup_id': 5,
'cgsnapshot_id': None,
'group_id': None, }
@ -137,7 +135,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'size': 1},
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -183,7 +180,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=None,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -228,7 +224,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=volume_type,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -240,7 +235,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'volume_type_id': 1,
'encryption_key_id': None,
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'group_id': None,
@ -286,7 +280,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=volume_type,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -330,7 +323,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=volume_type,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -342,7 +334,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'volume_type_id': 1,
'encryption_key_id': None,
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'group_id': None,
@ -395,7 +386,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=volume_type,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -439,7 +429,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=volume_type,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -452,7 +441,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'encryption_key_id': None,
'qos_specs': None,
'replication_status': 'disabled',
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'refresh_az': False,
@ -497,7 +485,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=volume_type,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -509,7 +496,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'volume_type_id': 1,
'encryption_key_id': None,
'qos_specs': {'fake_key': 'fake'},
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'group_id': None,
@ -562,7 +548,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=None,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -574,7 +559,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'volume_type_id': 1,
'encryption_key_id': None,
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'group_id': None,
@ -628,7 +612,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=None,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -640,7 +623,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'volume_type_id': 1,
'encryption_key_id': None,
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'group_id': None,
@ -693,7 +675,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=None,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)
@ -705,7 +686,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
'volume_type_id': 1,
'encryption_key_id': None,
'qos_specs': None,
'source_replicaid': None,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'group_id': None,
@ -756,7 +736,6 @@ class CreateVolumeFlowTestCase(test.TestCase):
volume_type=None,
metadata=None,
key_manager=fake_key_manager,
source_replica=None,
consistencygroup=None,
cgsnapshot=None,
group=None)

View File

@ -1366,32 +1366,6 @@ class VolumeTestCase(base.BaseVolumeTestCase):
snapshot_obj.destroy()
db.volume_destroy(self.context, src_vol_id)
@mock.patch(
'cinder.volume.driver.VolumeDriver.create_replica_test_volume')
@mock.patch('cinder.utils.execute')
def test_create_volume_from_srcreplica_raise_metadata_copy_failure(
self, mock_execute, _create_replica_test):
mock_execute.return_value = None
_create_replica_test.return_value = None
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
self.volume.create_volume(self.context, src_vol)
# set bootable flag of volume to True
db.volume_update(self.context, src_vol['id'], {'bootable': True})
# create volume from source volume
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
self._raise_metadata_copy_failure(
'volume_glance_metadata_copy_from_volume_to_volume',
dst_vol)
# cleanup resource
db.volume_destroy(self.context, src_vol_id)
@mock.patch('cinder.utils.execute')
def test_create_volume_from_snapshot_with_glance_volume_metadata_none(
self, mock_execute):
@ -1433,38 +1407,6 @@ class VolumeTestCase(base.BaseVolumeTestCase):
db.volume_destroy(self.context, src_vol_id)
db.volume_destroy(self.context, dst_vol['id'])
@mock.patch(
'cinder.volume.driver.VolumeDriver.create_replica_test_volume')
def test_create_volume_from_srcreplica_with_glance_volume_metadata_none(
self, _create_replica_test):
"""Test volume can be created from a volume replica."""
_create_replica_test.return_value = None
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src)
db.volume_update(self.context, volume_src['id'], {'bootable': True})
volume = db.volume_get(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(
self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst,
{'source_replicaid': volume.id})
self.assertRaises(exception.GlanceMetadataNotFound,
db.volume_glance_metadata_copy_from_volume_to_volume,
self.context, volume_src['id'], volume_dst['id'])
self.assertEqual('available',
db.volume_get(self.context,
volume_dst['id']).status)
self.assertTrue(_create_replica_test.called)
# cleanup resource
db.volume_destroy(self.context, volume_dst['id'])
db.volume_destroy(self.context, volume_src['id'])
@mock.patch.object(key_manager, 'API', fake_keymgr.fake_api)
def test_create_volume_from_snapshot_with_encryption(self):
"""Test volume can be created from a snapshot of an encrypted volume"""
@ -2350,27 +2292,6 @@ class VolumeTestCase(base.BaseVolumeTestCase):
self.assertEqual(100, volumes_reserved)
@mock.patch(
'cinder.volume.driver.VolumeDriver.create_replica_test_volume')
def test_create_volume_from_sourcereplica(self, _create_replica_test):
"""Test volume can be created from a volume replica."""
_create_replica_test.return_value = None
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src)
volume_dst = tests_utils.create_volume(
self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst,
{'source_replicaid': volume_src.id})
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
self.assertTrue(_create_replica_test.called)
self.volume.delete_volume(self.context, volume_dst)
self.volume.delete_volume(self.context, volume_src)
def test_create_volume_from_sourcevol(self):
"""Test volume can be created from a source volume."""
def fake_create_cloned_volume(volume, src_vref):

View File

@ -225,6 +225,13 @@ class API(base.Base):
check_policy(context, 'create_from_image' if image_id else 'create')
# Check up front for legacy replication parameters to quick fail
if source_replica:
msg = _("Creating a volume from a replica source was part of the "
"replication v1 implementation which is no longer "
"available.")
raise exception.InvalidInput(reason=msg)
# NOTE(jdg): we can have a create without size if we're
# doing a create from snap or volume. Currently
# the taskflow api will handle this and pull in the
@ -276,12 +283,6 @@ class API(base.Base):
"or omit type argument).") % volume_type.id
raise exception.InvalidInput(reason=msg)
# When cloning replica (for testing), volume type must be omitted
if source_replica and volume_type:
msg = _("No volume_type should be provided when creating test "
"replica.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type.id != snapshot.volume_type_id:
if not self._retype_is_possible(context,
@ -315,7 +316,6 @@ class API(base.Base):
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'source_replica': source_replica,
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup,
'cgsnapshot': cgsnapshot,

View File

@ -2122,9 +2122,6 @@ class VolumeDriver(ManageableVD, CloneableImageVD, ManageableSnapshotsVD,
raise NotImplementedError()
def create_replica_test_volume(self, volume, src_vref):
raise NotImplementedError()
def delete_volume(self, volume):
raise NotImplementedError()

View File

@ -68,9 +68,9 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
# reconstructed elsewhere and continued).
default_provides = set(['availability_zone', 'size', 'snapshot_id',
'source_volid', 'volume_type', 'volume_type_id',
'encryption_key_id', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id',
'qos_specs', 'group_id', 'refresh_az'])
'encryption_key_id', 'consistencygroup_id',
'cgsnapshot_id', 'qos_specs', 'group_id',
'refresh_az'])
def __init__(self, image_service, availability_zones, **kwargs):
super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION],
@ -136,13 +136,6 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
return self._extract_resource(source_volume, (SRC_VOL_PROCEED_STATUS,),
exception.InvalidVolume, 'source volume')
def _extract_source_replica(self, source_replica):
return self._extract_resource(source_replica, (SRC_VOL_PROCEED_STATUS,
REPLICA_PROCEED_STATUS),
exception.InvalidVolume,
'replica', ('status',
'replication_status'))
@staticmethod
def _extract_size(size, source_volume, snapshot):
"""Extracts and validates the volume size.
@ -422,7 +415,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
def execute(self, context, size, snapshot, image_id, source_volume,
availability_zone, volume_type, metadata, key_manager,
source_replica, consistencygroup, cgsnapshot, group):
consistencygroup, cgsnapshot, group):
utils.check_exclusive_options(snapshot=snapshot,
imageRef=image_id,
@ -433,7 +426,6 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
# volume will remain available after we do this initial verification??
snapshot_id = self._extract_snapshot(snapshot)
source_volid = self._extract_source_volume(source_volume)
source_replicaid = self._extract_source_replica(source_replica)
size = self._extract_size(size, source_volume, snapshot)
consistencygroup_id = self._extract_consistencygroup(consistencygroup)
cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot)
@ -457,12 +449,6 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
volume_type = (image_volume_type if image_volume_type else
def_vol_type)
# When creating a clone of a replica (replication test), we can't
# use the volume type of the replica, therefore, we use the default.
# NOTE(ronenkat): this assumes the default type is not replicated.
if source_replicaid:
volume_type = def_vol_type
volume_type_id = self._get_volume_type_id(volume_type,
source_volume, snapshot)
@ -502,7 +488,6 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
'volume_type_id': volume_type_id,
'encryption_key_id': encryption_key_id,
'qos_specs': specs,
'source_replicaid': source_replicaid,
'consistencygroup_id': consistencygroup_id,
'cgsnapshot_id': cgsnapshot_id,
'group_id': group_id,
@ -523,9 +508,8 @@ class EntryCreateTask(flow_utils.CinderTask):
requires = ['availability_zone', 'description', 'metadata',
'name', 'reservations', 'size', 'snapshot_id',
'source_volid', 'volume_type_id', 'encryption_key_id',
'source_replicaid', 'consistencygroup_id',
'cgsnapshot_id', 'multiattach', 'qos_specs',
'group_id', ]
'consistencygroup_id', 'cgsnapshot_id', 'multiattach',
'qos_specs', 'group_id', ]
super(EntryCreateTask, self).__init__(addons=[ACTION],
requires=requires)
@ -735,8 +719,8 @@ class VolumeCastTask(flow_utils.CinderTask):
def __init__(self, scheduler_rpcapi, volume_rpcapi, db):
requires = ['image_id', 'scheduler_hints', 'snapshot_id',
'source_volid', 'volume_id', 'volume', 'volume_type',
'volume_properties', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id', 'group_id', ]
'volume_properties', 'consistencygroup_id',
'cgsnapshot_id', 'group_id', ]
super(VolumeCastTask, self).__init__(addons=[ACTION],
requires=requires)
self.volume_rpcapi = volume_rpcapi
@ -745,8 +729,7 @@ class VolumeCastTask(flow_utils.CinderTask):
def _cast_create_volume(self, context, request_spec, filter_properties):
source_volume_ref = None
source_volid = (request_spec['source_volid'] or
request_spec['source_replicaid'])
source_volid = request_spec['source_volid']
volume = request_spec['volume']
snapshot_id = request_spec['snapshot_id']
image_id = request_spec['image_id']

View File

@ -295,19 +295,6 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
'source_volstatus': source_volume_ref.status,
'type': 'source_vol',
})
elif request_spec.get('source_replicaid'):
# We are making a clone based on the replica.
#
# NOTE(harlowja): This will likely fail if the replica
# disappeared by the time this call occurred.
source_volid = request_spec['source_replicaid']
source_volume_ref = objects.Volume.get_by_id(context,
source_volid)
specs.update({
'source_replicaid': source_volid,
'source_replicastatus': source_volume_ref.status,
'type': 'source_replica',
})
elif request_spec.get('image_id'):
# We are making an image based volume instead of a raw volume.
image_href = request_spec['image_id']
@ -416,17 +403,6 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
context,
source_volid,
volume.id)
elif kwargs.get('source_replicaid'):
src_type = 'source replica'
src_id = kwargs['source_replicaid']
source_replicaid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_replicaid,
volume.id)
elif kwargs.get('image_id'):
src_type = 'image'
src_id = kwargs['image_id']
@ -503,28 +479,6 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
context, volume, source_volid=srcvol_ref.id)
return model_update
def _create_from_source_replica(self, context, volume, source_replicaid,
**kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = objects.Volume.get_by_id(context, source_replicaid)
model_update = self.driver.create_replica_test_volume(volume,
srcvol_ref)
self._cleanup_cg_in_volume(volume)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(
context,
volume,
source_replicaid=source_replicaid)
return model_update
def _copy_image_to_volume(self, context, volume,
image_meta, image_location, image_service):
@ -942,9 +896,6 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
elif create_type == 'source_vol':
model_update = self._create_from_source_volume(
context, volume, **volume_spec)
elif create_type == 'source_replica':
model_update = self._create_from_source_replica(
context, volume, **volume_spec)
elif create_type == 'image':
model_update = self._create_from_image(context,
volume,

View File

@ -620,7 +620,6 @@ class VolumeManager(manager.CleanableManager,
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
@ -628,9 +627,6 @@ class VolumeManager(manager.CleanableManager,
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None