Merge "Support create volume from backup"
This commit is contained in:
commit
2d836d9fd2
@ -555,6 +555,13 @@ backup_gigabytes_usage:
|
||||
in: body
|
||||
required: true
|
||||
type: object
|
||||
backup_id_1:
|
||||
description: |
|
||||
The UUID of the backup.
|
||||
in: body
|
||||
required: false
|
||||
type: string
|
||||
min_version: 3.46
|
||||
backup_service:
|
||||
description: |
|
||||
The service used to perform the backup.
|
||||
|
@ -6,6 +6,7 @@
|
||||
"description": null,
|
||||
"multiattach ": false,
|
||||
"snapshot_id": null,
|
||||
"backup_id": null,
|
||||
"name": null,
|
||||
"imageRef": null,
|
||||
"volume_type": null,
|
||||
|
@ -188,6 +188,7 @@ Request
|
||||
- description: description
|
||||
- multiattach: multiattach
|
||||
- snapshot_id: snapshot_id
|
||||
- backup_id: backup_id_1
|
||||
- name: name_13
|
||||
- imageRef: imageRef
|
||||
- volume_type: volume_type
|
||||
|
@ -131,6 +131,8 @@ SUPPORT_COUNT_INFO = '3.45'
|
||||
|
||||
SUPPORT_NOVA_IMAGE = '3.46'
|
||||
|
||||
VOLUME_CREATE_FROM_BACKUP = '3.47'
|
||||
|
||||
|
||||
def get_mv_header(version):
|
||||
"""Gets a formatted HTTP microversion header.
|
||||
|
@ -110,6 +110,7 @@ REST_API_VERSION_HISTORY = """
|
||||
* 3.45 - Add ``count`` field to volume, backup and snapshot list and
|
||||
detail APIs.
|
||||
* 3.46 - Support create volume by Nova specific image (0 size image).
|
||||
* 3.47 - Support create volume from backup.
|
||||
"""
|
||||
|
||||
# The minimum and maximum versions of the API supported
|
||||
@ -117,7 +118,7 @@ REST_API_VERSION_HISTORY = """
|
||||
# minimum version of the API supported.
|
||||
# Explicitly using /v2 endpoints will still work
|
||||
_MIN_API_VERSION = "3.0"
|
||||
_MAX_API_VERSION = "3.46"
|
||||
_MAX_API_VERSION = "3.47"
|
||||
_LEGACY_API_VERSION2 = "2.0"
|
||||
UPDATED = "2017-09-19T20:18:14Z"
|
||||
|
||||
|
@ -381,3 +381,7 @@ user documentation.
|
||||
3.46
|
||||
----
|
||||
Support create volume by Nova specific image (0 size image).
|
||||
|
||||
3.47
|
||||
----
|
||||
Support create volume from backup.
|
||||
|
@ -25,6 +25,7 @@ from cinder.api import microversions as mv
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v2 import volumes as volumes_v2
|
||||
from cinder.api.v3.views import volumes as volume_views_v3
|
||||
from cinder.backup import api as backup_api
|
||||
from cinder import exception
|
||||
from cinder import group as group_api
|
||||
from cinder.i18n import _
|
||||
@ -43,6 +44,7 @@ class VolumeController(volumes_v2.VolumeController):
|
||||
|
||||
def __init__(self, ext_mgr):
|
||||
self.group_api = group_api.API()
|
||||
self.backup_api = backup_api.API()
|
||||
super(VolumeController, self).__init__(ext_mgr)
|
||||
|
||||
def delete(self, req, id):
|
||||
@ -335,11 +337,26 @@ class VolumeController(volumes_v2.VolumeController):
|
||||
else:
|
||||
kwargs['image_id'] = image_uuid
|
||||
|
||||
# Add backup if min version is greater than or equal
|
||||
# to VOLUME_CREATE_FROM_BACKUP.
|
||||
if req_version.matches(mv.VOLUME_CREATE_FROM_BACKUP, None):
|
||||
backup_id = volume.get('backup_id')
|
||||
if backup_id:
|
||||
if not uuidutils.is_uuid_like(backup_id):
|
||||
msg = _("Backup ID must be in UUID form.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
kwargs['backup'] = self.backup_api.get(context,
|
||||
backup_id=backup_id)
|
||||
else:
|
||||
kwargs['backup'] = None
|
||||
|
||||
size = volume.get('size', None)
|
||||
if size is None and kwargs['snapshot'] is not None:
|
||||
size = kwargs['snapshot']['volume_size']
|
||||
elif size is None and kwargs['source_volume'] is not None:
|
||||
size = kwargs['source_volume']['size']
|
||||
elif size is None and kwargs.get('backup') is not None:
|
||||
size = kwargs['backup']['size']
|
||||
|
||||
LOG.info("Create volume of %s GB", size)
|
||||
|
||||
|
@ -165,6 +165,9 @@ class API(base.Base):
|
||||
idx = idx + 1
|
||||
return None
|
||||
|
||||
def get_available_backup_service_host(self, host, az):
|
||||
return self._get_available_backup_service_host(host, az)
|
||||
|
||||
def _get_available_backup_service_host(self, host, az):
|
||||
"""Return an appropriate backup service host."""
|
||||
backup_host = None
|
||||
|
@ -139,6 +139,7 @@ OBJ_VERSIONS.add('1.28', {'Service': '1.5'})
|
||||
OBJ_VERSIONS.add('1.29', {'Service': '1.6'})
|
||||
OBJ_VERSIONS.add('1.30', {'RequestSpec': '1.2'})
|
||||
OBJ_VERSIONS.add('1.31', {'Volume': '1.7'})
|
||||
OBJ_VERSIONS.add('1.32', {'RequestSpec': '1.3'})
|
||||
|
||||
|
||||
class CinderObjectRegistry(base.VersionedObjectRegistry):
|
||||
|
@ -25,7 +25,8 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
|
||||
# Version 1.0: Initial version
|
||||
# Version 1.1: Added group_id and group_backend
|
||||
# Version 1.2 Added ``resource_backend``
|
||||
VERSION = '1.2'
|
||||
# Version 1.3: Added backup_id
|
||||
VERSION = '1.3'
|
||||
|
||||
fields = {
|
||||
'consistencygroup_id': fields.UUIDField(nullable=True),
|
||||
@ -42,7 +43,8 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
|
||||
nullable=True),
|
||||
'CG_backend': fields.StringField(nullable=True),
|
||||
'group_backend': fields.StringField(nullable=True),
|
||||
'resource_backend': fields.StringField(nullable=True)
|
||||
'resource_backend': fields.StringField(nullable=True),
|
||||
'backup_id': fields.UUIDField(nullable=True),
|
||||
}
|
||||
|
||||
obj_extra_fields = ['resource_properties']
|
||||
|
@ -40,7 +40,7 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask):
|
||||
super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION],
|
||||
**kwargs)
|
||||
|
||||
def _populate_request_spec(self, volume, snapshot_id, image_id):
|
||||
def _populate_request_spec(self, volume, snapshot_id, image_id, backup_id):
|
||||
# Create the full request spec using the volume object.
|
||||
#
|
||||
# NOTE(dulek): At this point, a volume can be deleted before it gets
|
||||
@ -53,6 +53,7 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask):
|
||||
'volume_id': volume.id,
|
||||
'snapshot_id': snapshot_id,
|
||||
'image_id': image_id,
|
||||
'backup_id': backup_id,
|
||||
'volume_properties': {
|
||||
'size': utils.as_int(volume.size, quiet=False),
|
||||
'availability_zone': volume.availability_zone,
|
||||
@ -62,11 +63,12 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask):
|
||||
}
|
||||
|
||||
def execute(self, context, request_spec, volume, snapshot_id,
|
||||
image_id):
|
||||
image_id, backup_id):
|
||||
# For RPC version < 1.2 backward compatibility
|
||||
if request_spec is None:
|
||||
request_spec = self._populate_request_spec(volume,
|
||||
snapshot_id, image_id)
|
||||
snapshot_id, image_id,
|
||||
backup_id)
|
||||
return {
|
||||
'request_spec': request_spec,
|
||||
}
|
||||
@ -140,7 +142,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
|
||||
|
||||
def get_flow(context, driver_api, request_spec=None,
|
||||
filter_properties=None,
|
||||
volume=None, snapshot_id=None, image_id=None):
|
||||
volume=None, snapshot_id=None, image_id=None, backup_id=None):
|
||||
|
||||
"""Constructs and returns the scheduler entrypoint flow.
|
||||
|
||||
@ -158,6 +160,7 @@ def get_flow(context, driver_api, request_spec=None,
|
||||
'volume': volume,
|
||||
'snapshot_id': snapshot_id,
|
||||
'image_id': image_id,
|
||||
'backup_id': backup_id,
|
||||
}
|
||||
|
||||
flow_name = ACTION.replace(":", "_") + "_scheduler"
|
||||
|
@ -173,7 +173,8 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
|
||||
@objects.Volume.set_workers
|
||||
def create_volume(self, context, volume, snapshot_id=None, image_id=None,
|
||||
request_spec=None, filter_properties=None):
|
||||
request_spec=None, filter_properties=None,
|
||||
backup_id=None):
|
||||
self._wait_for_scheduler()
|
||||
|
||||
try:
|
||||
@ -183,7 +184,8 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
filter_properties,
|
||||
volume,
|
||||
snapshot_id,
|
||||
image_id)
|
||||
image_id,
|
||||
backup_id)
|
||||
except Exception:
|
||||
msg = _("Failed to create scheduler manager volume flow")
|
||||
LOG.exception(msg)
|
||||
|
@ -70,9 +70,10 @@ class SchedulerAPI(rpc.RPCAPI):
|
||||
3.7 - Adds set_log_levels and get_log_levels
|
||||
3.8 - Addds ``valid_host_capacity`` method
|
||||
3.9 - Adds create_snapshot method
|
||||
3.10 - Adds backup_id to create_volume method.
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '3.9'
|
||||
RPC_API_VERSION = '3.10'
|
||||
RPC_DEFAULT_VERSION = '3.0'
|
||||
TOPIC = constants.SCHEDULER_TOPIC
|
||||
BINARY = 'cinder-scheduler'
|
||||
@ -94,12 +95,16 @@ class SchedulerAPI(rpc.RPCAPI):
|
||||
cctxt.cast(ctxt, 'create_group', **msg_args)
|
||||
|
||||
def create_volume(self, ctxt, volume, snapshot_id=None, image_id=None,
|
||||
request_spec=None, filter_properties=None):
|
||||
request_spec=None, filter_properties=None,
|
||||
backup_id=None):
|
||||
volume.create_worker()
|
||||
cctxt = self._get_cctxt()
|
||||
msg_args = {'snapshot_id': snapshot_id, 'image_id': image_id,
|
||||
'request_spec': request_spec,
|
||||
'filter_properties': filter_properties, 'volume': volume}
|
||||
'filter_properties': filter_properties,
|
||||
'volume': volume, 'backup_id': backup_id}
|
||||
if not self.client.can_send_version('3.10'):
|
||||
msg_args.pop('backup_id')
|
||||
return cctxt.cast(ctxt, 'create_volume', **msg_args)
|
||||
|
||||
@rpc.assert_min_rpc_version('3.8')
|
||||
|
@ -205,6 +205,26 @@ def fake_snapshot(id, **kwargs):
|
||||
return snapshot
|
||||
|
||||
|
||||
def fake_backup(id, **kwargs):
|
||||
backup = {'id': fake.BACKUP_ID,
|
||||
'volume_id': fake.VOLUME_ID,
|
||||
'status': fields.BackupStatus.CREATING,
|
||||
'size': 1,
|
||||
'display_name': 'fake_name',
|
||||
'display_description': 'fake_description',
|
||||
'user_id': fake.USER_ID,
|
||||
'project_id': fake.PROJECT_ID,
|
||||
'temp_volume_id': None,
|
||||
'temp_snapshot_id': None,
|
||||
'snapshot_id': None,
|
||||
'data_timestamp': None,
|
||||
'restore_volume_id': None,
|
||||
'backup_metadata': {}}
|
||||
|
||||
backup.update(kwargs)
|
||||
return backup
|
||||
|
||||
|
||||
def fake_snapshot_get_all(context, filters=None, marker=None, limit=None,
|
||||
sort_keys=None, sort_dirs=None, offset=None):
|
||||
return [fake_snapshot(fake.VOLUME_ID, project_id=fake.PROJECT_ID),
|
||||
@ -239,6 +259,13 @@ def fake_snapshot_get(self, context, snapshot_id):
|
||||
return fake_snapshot(snapshot_id)
|
||||
|
||||
|
||||
def fake_backup_get(self, context, backup_id):
|
||||
if backup_id == fake.WILL_NOT_BE_FOUND_ID:
|
||||
raise exc.BackupNotFound(backup_id=backup_id)
|
||||
|
||||
return fake_backup(backup_id)
|
||||
|
||||
|
||||
def fake_consistencygroup_get_notfound(self, context, cg_id):
|
||||
raise exc.GroupNotFound(group_id=cg_id)
|
||||
|
||||
|
@ -21,8 +21,11 @@ from cinder.api import extensions
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.api.v3 import volume_metadata
|
||||
from cinder.api.v3 import volumes
|
||||
from cinder.backup import rpcapi as backup_rpcapi
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.objects import base as obj_base
|
||||
from cinder.scheduler import rpcapi as scheduler_rpcapi
|
||||
from cinder import test
|
||||
from cinder.tests.unit.api import fakes
|
||||
from cinder.tests.unit.api.v2 import fakes as v2_fakes
|
||||
@ -135,6 +138,19 @@ class VolumeMetaDataTest(test.TestCase):
|
||||
|
||||
self.ext_mgr = extensions.ExtensionManager()
|
||||
self.ext_mgr.extensions = {}
|
||||
self.patch(
|
||||
'cinder.objects.Service.get_minimum_obj_version',
|
||||
return_value=obj_base.OBJ_VERSIONS.get_current())
|
||||
|
||||
def _get_minimum_rpc_version_mock(ctxt, binary):
|
||||
binary_map = {
|
||||
'cinder-backup': backup_rpcapi.BackupAPI,
|
||||
'cinder-scheduler': scheduler_rpcapi.SchedulerAPI,
|
||||
}
|
||||
return binary_map[binary].RPC_API_VERSION
|
||||
|
||||
self.patch('cinder.objects.Service.get_minimum_rpc_version',
|
||||
side_effect=_get_minimum_rpc_version_mock)
|
||||
self.volume_controller = volumes.VolumeController(self.ext_mgr)
|
||||
self.controller = volume_metadata.Controller()
|
||||
self.req_id = str(uuid.uuid4())
|
||||
@ -261,6 +277,19 @@ class VolumeMetaDataTestNoMicroversion(v2_test.VolumeMetaDataTest):
|
||||
|
||||
def setUp(self):
|
||||
super(VolumeMetaDataTestNoMicroversion, self).setUp()
|
||||
self.patch(
|
||||
'cinder.objects.Service.get_minimum_obj_version',
|
||||
return_value=obj_base.OBJ_VERSIONS.get_current())
|
||||
|
||||
def _get_minimum_rpc_version_mock(ctxt, binary):
|
||||
binary_map = {
|
||||
'cinder-backup': backup_rpcapi.BackupAPI,
|
||||
'cinder-scheduler': scheduler_rpcapi.SchedulerAPI,
|
||||
}
|
||||
return binary_map[binary].RPC_API_VERSION
|
||||
|
||||
self.patch('cinder.objects.Service.get_minimum_rpc_version',
|
||||
side_effect=_get_minimum_rpc_version_mock)
|
||||
self.volume_controller = volumes.VolumeController(self.ext_mgr)
|
||||
self.controller = volume_metadata.Controller()
|
||||
self.url = '/v3/%s/volumes/%s/metadata' % (
|
||||
|
@ -23,6 +23,7 @@ from cinder.api import extensions
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.api.v2.views.volumes import ViewBuilder
|
||||
from cinder.api.v3 import volumes
|
||||
from cinder.backup import api as backup_api
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
@ -393,7 +394,8 @@ class VolumeApiTest(test.TestCase):
|
||||
volume_type=None,
|
||||
image_ref=None,
|
||||
image_id=None,
|
||||
group_id=None):
|
||||
group_id=None,
|
||||
backup_id=None):
|
||||
vol = {"size": size,
|
||||
"name": name,
|
||||
"description": description,
|
||||
@ -409,6 +411,8 @@ class VolumeApiTest(test.TestCase):
|
||||
vol['image_id'] = image_id
|
||||
elif image_ref is not None:
|
||||
vol['imageRef'] = image_ref
|
||||
elif backup_id is not None:
|
||||
vol['backup_id'] = backup_id
|
||||
|
||||
return vol
|
||||
|
||||
@ -557,6 +561,42 @@ class VolumeApiTest(test.TestCase):
|
||||
v2_fakes.DEFAULT_VOL_DESCRIPTION,
|
||||
**kwargs)
|
||||
|
||||
@ddt.data(mv.VOLUME_CREATE_FROM_BACKUP,
|
||||
mv.get_prior_version(mv.VOLUME_CREATE_FROM_BACKUP))
|
||||
@mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full',
|
||||
autospec=True)
|
||||
@mock.patch.object(backup_api.API, 'get', autospec=True)
|
||||
@mock.patch.object(volume_api.API, 'create', autospec=True)
|
||||
def test_volume_creation_from_backup(self, max_ver, create, get_backup,
|
||||
volume_type_get):
|
||||
create.side_effect = v2_fakes.fake_volume_api_create
|
||||
get_backup.side_effect = v2_fakes.fake_backup_get
|
||||
volume_type_get.side_effect = v2_fakes.fake_volume_type_get
|
||||
|
||||
backup_id = fake.BACKUP_ID
|
||||
vol = self._vol_in_request_body(backup_id=backup_id)
|
||||
body = {"volume": vol}
|
||||
req = fakes.HTTPRequest.blank('/v3/volumes')
|
||||
req.api_version_request = mv.get_api_version(max_ver)
|
||||
res_dict = self.controller.create(req, body)
|
||||
ex = self._expected_vol_from_controller(
|
||||
req_version=req.api_version_request)
|
||||
self.assertEqual(ex, res_dict)
|
||||
|
||||
context = req.environ['cinder.context']
|
||||
kwargs = self._expected_volume_api_create_kwargs(
|
||||
req_version=req.api_version_request)
|
||||
if max_ver >= mv.VOLUME_CREATE_FROM_BACKUP:
|
||||
get_backup.assert_called_once_with(self.controller.backup_api,
|
||||
context, backup_id)
|
||||
kwargs.update({'backup': v2_fakes.fake_backup_get(None, context,
|
||||
backup_id)})
|
||||
create.assert_called_once_with(self.controller.volume_api, context,
|
||||
vol['size'],
|
||||
v2_fakes.DEFAULT_VOL_NAME,
|
||||
v2_fakes.DEFAULT_VOL_DESCRIPTION,
|
||||
**kwargs)
|
||||
|
||||
@ddt.data({'s': 'ea895e29-8485-4930-bbb8-c5616a309c0e'},
|
||||
['ea895e29-8485-4930-bbb8-c5616a309c0e'],
|
||||
42)
|
||||
|
@ -42,7 +42,7 @@ object_data = {
|
||||
'ManageableVolumeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8',
|
||||
'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'RequestSpec': '1.2-207502df46a50575a818076e1ea119db',
|
||||
'RequestSpec': '1.3-9510bf37e30fd4c282599a4b2a26675e',
|
||||
'Service': '1.6-e881b6b324151dd861e09cdfffcdaccd',
|
||||
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',
|
||||
|
@ -62,7 +62,10 @@ class SchedulerRPCAPITestCase(test.RPCAPITestCase):
|
||||
timestamp='123')
|
||||
can_send_version.assert_called_once_with('3.3')
|
||||
|
||||
def test_create_volume(self):
|
||||
@ddt.data('3.0', '3.10')
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version')
|
||||
def test_create_volume(self, version, can_send_version):
|
||||
can_send_version.side_effect = lambda x: x == version
|
||||
create_worker_mock = self.mock_object(self.fake_volume,
|
||||
'create_worker')
|
||||
self._test_rpc_api('create_volume',
|
||||
@ -70,9 +73,11 @@ class SchedulerRPCAPITestCase(test.RPCAPITestCase):
|
||||
volume=self.fake_volume,
|
||||
snapshot_id=fake_constants.SNAPSHOT_ID,
|
||||
image_id=fake_constants.IMAGE_ID,
|
||||
backup_id=fake_constants.BACKUP_ID,
|
||||
request_spec=self.fake_rs_obj,
|
||||
filter_properties=self.fake_fp_dict)
|
||||
create_worker_mock.assert_called_once()
|
||||
can_send_version.assert_called_once_with('3.10')
|
||||
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version',
|
||||
return_value=True)
|
||||
|
@ -35,7 +35,8 @@ class FakeSchedulerRpcAPI(object):
|
||||
self.test_inst = test_inst
|
||||
|
||||
def create_volume(self, ctxt, volume, snapshot_id=None, image_id=None,
|
||||
request_spec=None, filter_properties=None):
|
||||
request_spec=None, filter_properties=None,
|
||||
backup_id=None):
|
||||
|
||||
self.test_inst.assertEqual(self.expected_spec, request_spec)
|
||||
|
||||
|
@ -27,6 +27,7 @@ from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.message import message_field
|
||||
from cinder import test
|
||||
from cinder.tests.unit.backup import fake_backup
|
||||
from cinder.tests.unit.consistencygroup import fake_consistencygroup
|
||||
from cinder.tests.unit import fake_constants as fakes
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
@ -75,7 +76,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'image_id': 4,
|
||||
'consistencygroup_id': None,
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None, }
|
||||
'group_id': None,
|
||||
'backup_id': None, }
|
||||
|
||||
# Fake objects assert specs
|
||||
task = create_volume.VolumeCastTask(
|
||||
@ -97,7 +99,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'image_id': 4,
|
||||
'consistencygroup_id': None,
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None, }
|
||||
'group_id': None,
|
||||
'backup_id': None, }
|
||||
|
||||
# Fake objects assert specs
|
||||
task = create_volume.VolumeCastTask(
|
||||
@ -131,7 +134,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'image_id': None,
|
||||
'consistencygroup_id': None,
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None, }
|
||||
'group_id': None,
|
||||
'backup_id': None, }
|
||||
|
||||
# Fake objects assert specs
|
||||
task = create_volume.VolumeCastTask(
|
||||
@ -148,7 +152,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'image_id': 4,
|
||||
'consistencygroup_id': 5,
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None, }
|
||||
'group_id': None,
|
||||
'backup_id': None, }
|
||||
|
||||
# Fake objects assert specs
|
||||
task = create_volume.VolumeCastTask(
|
||||
@ -192,7 +197,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
self.assertEqual(replication_status, result['replication_status'],
|
||||
extra_specs)
|
||||
|
||||
@ -238,7 +244,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
fake_get_encryption_key.assert_called_once_with(
|
||||
fake_key_manager, self.ctxt, fakes.VOLUME_TYPE_ID,
|
||||
None, None, image_meta)
|
||||
@ -283,7 +290,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
@ -296,7 +304,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None,
|
||||
'refresh_az': False,
|
||||
'replication_status': 'disabled'}
|
||||
'replication_status': 'disabled',
|
||||
'backup_id': None}
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
@mock.patch('cinder.volume.volume_types.is_encrypted')
|
||||
@ -340,7 +349,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
|
||||
@mock.patch('cinder.volume.volume_types.is_encrypted')
|
||||
@mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
|
||||
@ -384,7 +394,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
@ -397,7 +408,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None,
|
||||
'refresh_az': True,
|
||||
'replication_status': 'disabled'}
|
||||
'replication_status': 'disabled',
|
||||
'backup_id': None}
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
@mock.patch('cinder.volume.volume_types.is_encrypted',
|
||||
@ -448,7 +460,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
|
||||
mock_is_encrypted.assert_called_once_with(self.ctxt, 1)
|
||||
mock_get_volume_type_encryption.assert_called_once_with(self.ctxt, 1)
|
||||
@ -492,7 +505,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
expected_result = {'size': (sys.maxsize + 1),
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
@ -505,7 +519,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'consistencygroup_id': None,
|
||||
'cgsnapshot_id': None,
|
||||
'refresh_az': False,
|
||||
'group_id': None, }
|
||||
'group_id': None,
|
||||
'backup_id': None}
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
@mock.patch('cinder.volume.volume_types.is_encrypted')
|
||||
@ -549,7 +564,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
@ -562,7 +578,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None,
|
||||
'refresh_az': False,
|
||||
'replication_status': 'disabled'}
|
||||
'replication_status': 'disabled',
|
||||
'backup_id': None}
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
@mock.patch('cinder.volume.volume_types.is_encrypted')
|
||||
@ -613,7 +630,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
@ -626,7 +644,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None,
|
||||
'refresh_az': False,
|
||||
'replication_status': 'disabled'}
|
||||
'replication_status': 'disabled',
|
||||
'backup_id': None}
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
@mock.patch('cinder.db.volume_type_get_by_name')
|
||||
@ -678,7 +697,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
@ -691,7 +711,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None,
|
||||
'refresh_az': False,
|
||||
'replication_status': 'disabled'}
|
||||
'replication_status': 'disabled',
|
||||
'backup_id': None}
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
@mock.patch('cinder.db.volume_type_get_by_name')
|
||||
@ -742,7 +763,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
expected_result = {'size': 1,
|
||||
'snapshot_id': None,
|
||||
'source_volid': None,
|
||||
@ -755,7 +777,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None,
|
||||
'refresh_az': False,
|
||||
'replication_status': 'disabled'}
|
||||
'replication_status': 'disabled',
|
||||
'backup_id': None}
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
@mock.patch('cinder.db.volume_type_get_by_name')
|
||||
@ -804,7 +827,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
consistencygroup=None,
|
||||
cgsnapshot=None,
|
||||
group=None,
|
||||
group_snapshot=None)
|
||||
group_snapshot=None,
|
||||
backup=None)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
@ -986,6 +1010,67 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
|
||||
fake_driver.copy_image_to_volume.assert_called_once_with(
|
||||
self.ctxt, volume, fake_image_service, image_id)
|
||||
|
||||
@ddt.data({'driver_error': True},
|
||||
{'driver_error': False})
|
||||
@mock.patch('cinder.backup.api.API.get_available_backup_service_host')
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup')
|
||||
@mock.patch('oslo_service.loopingcall.'
|
||||
'FixedIntervalWithTimeoutLoopingCall')
|
||||
@mock.patch('cinder.volume.flows.manager.create_volume.'
|
||||
'CreateVolumeFromSpecTask.'
|
||||
'_create_raw_volume')
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('cinder.db.backup_update')
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.objects.Backup.get_by_id')
|
||||
@ddt.unpack
|
||||
def test_create_from_backup(self,
|
||||
backup_get_by_id,
|
||||
volume_get_by_id,
|
||||
mock_backup_update,
|
||||
mock_volume_update,
|
||||
mock_create_volume,
|
||||
mock_fixed_looping_call,
|
||||
mock_restore_backup,
|
||||
mock_get_backup_host,
|
||||
driver_error):
|
||||
fake_db = mock.MagicMock()
|
||||
fake_driver = mock.MagicMock()
|
||||
fake_volume_manager = mock.MagicMock()
|
||||
backup_host = 'host@backend#pool'
|
||||
fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
|
||||
fake_volume_manager, fake_db, fake_driver)
|
||||
|
||||
volume_obj = fake_volume.fake_volume_obj(self.ctxt)
|
||||
backup_obj = fake_backup.fake_backup_obj(self.ctxt,
|
||||
**{'status': 'available',
|
||||
'host': backup_host})
|
||||
backup_get_by_id.return_value = backup_obj
|
||||
volume_get_by_id.return_value = volume_obj
|
||||
|
||||
mock_create_volume.return_value = {}
|
||||
mock_get_backup_host.return_value = backup_host
|
||||
mock_fixed_looping_call.return_value = mock.MagicMock()
|
||||
|
||||
if driver_error:
|
||||
fake_driver.create_volume_from_backup.side_effect = [
|
||||
NotImplementedError]
|
||||
fake_manager._create_from_backup(self.ctxt, volume_obj,
|
||||
backup_obj.id)
|
||||
fake_driver.create_volume_from_backup.assert_called_once_with(
|
||||
volume_obj, backup_obj)
|
||||
if driver_error:
|
||||
mock_create_volume.assert_called_once_with(volume_obj)
|
||||
mock_get_backup_host.assert_called_once_with(
|
||||
backup_obj.host, backup_obj.availability_zone)
|
||||
mock_restore_backup.assert_called_once_with(self.ctxt,
|
||||
backup_host,
|
||||
backup_obj,
|
||||
volume_obj['id'])
|
||||
else:
|
||||
fake_driver.create_volume_from_backup.assert_called_once_with(
|
||||
volume_obj, backup_obj)
|
||||
|
||||
|
||||
class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
|
||||
|
||||
|
@ -197,7 +197,8 @@ class API(base.Base):
|
||||
scheduler_hints=None,
|
||||
source_replica=None, consistencygroup=None,
|
||||
cgsnapshot=None, multiattach=False, source_cg=None,
|
||||
group=None, group_snapshot=None, source_group=None):
|
||||
group=None, group_snapshot=None, source_group=None,
|
||||
backup=None):
|
||||
|
||||
context.authorize(vol_policy.CREATE_FROM_IMAGE_POLICY)
|
||||
|
||||
@ -299,6 +300,7 @@ class API(base.Base):
|
||||
'group': group,
|
||||
'group_snapshot': group_snapshot,
|
||||
'source_group': source_group,
|
||||
'backup': backup,
|
||||
}
|
||||
try:
|
||||
sched_rpcapi = (self.scheduler_rpcapi if (
|
||||
|
@ -1852,6 +1852,19 @@ class BaseVD(object):
|
||||
def accept_transfer(self, context, volume, new_user, new_project):
|
||||
pass
|
||||
|
||||
def create_volume_from_backup(self, volume, backup):
|
||||
"""Creates a volume from a backup.
|
||||
|
||||
Can optionally return a Dictionary of changes to the volume object to
|
||||
be persisted.
|
||||
|
||||
:param volume: the volume object to be created.
|
||||
:param backup: the backup object as source.
|
||||
:returns: volume_model_update
|
||||
"""
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class CloneableImageVD(object):
|
||||
|
@ -49,6 +49,7 @@ REPLICA_PROCEED_STATUS = ('active', 'active-stopped',)
|
||||
CG_PROCEED_STATUS = ('available', 'creating',)
|
||||
CGSNAPSHOT_PROCEED_STATUS = ('available',)
|
||||
GROUP_PROCEED_STATUS = ('available', 'creating',)
|
||||
BACKUP_PROCEED_STATUS = (fields.BackupStatus.AVAILABLE,)
|
||||
|
||||
|
||||
class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
@ -69,7 +70,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
'source_volid', 'volume_type', 'volume_type_id',
|
||||
'encryption_key_id', 'consistencygroup_id',
|
||||
'cgsnapshot_id', 'qos_specs', 'group_id',
|
||||
'refresh_az'])
|
||||
'refresh_az', 'backup_id'])
|
||||
|
||||
def __init__(self, image_service, availability_zones, **kwargs):
|
||||
super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION],
|
||||
@ -135,8 +136,13 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
return self._extract_resource(source_volume, (SRC_VOL_PROCEED_STATUS,),
|
||||
exception.InvalidVolume, 'source volume')
|
||||
|
||||
def _extract_backup(self, backup):
|
||||
return self._extract_resource(backup, (BACKUP_PROCEED_STATUS,),
|
||||
exception.InvalidBackup,
|
||||
'backup')
|
||||
|
||||
@staticmethod
|
||||
def _extract_size(size, source_volume, snapshot):
|
||||
def _extract_size(size, source_volume, snapshot, backup):
|
||||
"""Extracts and validates the volume size.
|
||||
|
||||
This function will validate or when not provided fill in the provided
|
||||
@ -162,6 +168,15 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
'source_size': source_volume['size']}
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
def validate_backup_size(size):
|
||||
if backup and size < backup['size']:
|
||||
msg = _("Volume size %(size)sGB cannot be smaller than "
|
||||
"the backup size %(backup_size)sGB. "
|
||||
"It must be >= backup size.")
|
||||
msg = msg % {'size': size,
|
||||
'backup_size': backup['size']}
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
def validate_int(size):
|
||||
if not isinstance(size, six.integer_types) or size <= 0:
|
||||
msg = _("Volume size '%(size)s' must be an integer and"
|
||||
@ -175,12 +190,16 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
validator_functors.append(validate_source_size)
|
||||
elif snapshot:
|
||||
validator_functors.append(validate_snap_size)
|
||||
elif backup:
|
||||
validator_functors.append(validate_backup_size)
|
||||
|
||||
# If the size is not provided then try to provide it.
|
||||
if not size and source_volume:
|
||||
size = source_volume['size']
|
||||
elif not size and snapshot:
|
||||
size = snapshot.volume_size
|
||||
elif not size and backup:
|
||||
size = backup['size']
|
||||
|
||||
size = utils.as_int(size)
|
||||
LOG.debug("Validating volume size '%(size)s' using %(functors)s",
|
||||
@ -414,18 +433,20 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
|
||||
def execute(self, context, size, snapshot, image_id, source_volume,
|
||||
availability_zone, volume_type, metadata, key_manager,
|
||||
consistencygroup, cgsnapshot, group, group_snapshot):
|
||||
consistencygroup, cgsnapshot, group, group_snapshot, backup):
|
||||
|
||||
utils.check_exclusive_options(snapshot=snapshot,
|
||||
imageRef=image_id,
|
||||
source_volume=source_volume)
|
||||
source_volume=source_volume,
|
||||
backup=backup)
|
||||
context.authorize(policy.CREATE_POLICY)
|
||||
|
||||
# TODO(harlowja): what guarantee is there that the snapshot or source
|
||||
# volume will remain available after we do this initial verification??
|
||||
snapshot_id = self._extract_snapshot(snapshot)
|
||||
source_volid = self._extract_source_volume(source_volume)
|
||||
size = self._extract_size(size, source_volume, snapshot)
|
||||
backup_id = self._extract_backup(backup)
|
||||
size = self._extract_size(size, source_volume, snapshot, backup)
|
||||
consistencygroup_id = self._extract_consistencygroup(consistencygroup)
|
||||
cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot)
|
||||
group_id = self._extract_group(group)
|
||||
@ -491,7 +512,8 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
|
||||
'cgsnapshot_id': cgsnapshot_id,
|
||||
'group_id': group_id,
|
||||
'replication_status': replication_status,
|
||||
'refresh_az': refresh_az
|
||||
'refresh_az': refresh_az,
|
||||
'backup_id': backup_id,
|
||||
}
|
||||
|
||||
|
||||
@ -726,7 +748,7 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
requires = ['image_id', 'scheduler_hints', 'snapshot_id',
|
||||
'source_volid', 'volume_id', 'volume', 'volume_type',
|
||||
'volume_properties', 'consistencygroup_id',
|
||||
'cgsnapshot_id', 'group_id', ]
|
||||
'cgsnapshot_id', 'group_id', 'backup_id', ]
|
||||
super(VolumeCastTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
self.volume_rpcapi = volume_rpcapi
|
||||
@ -740,6 +762,7 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
image_id = request_spec['image_id']
|
||||
cgroup_id = request_spec['consistencygroup_id']
|
||||
group_id = request_spec['group_id']
|
||||
backup_id = request_spec['backup_id']
|
||||
if cgroup_id:
|
||||
# If cgroup_id existed, we should cast volume to the scheduler
|
||||
# to choose a proper pool whose backend is same as CG's backend.
|
||||
@ -776,7 +799,8 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
snapshot_id=snapshot_id,
|
||||
image_id=image_id,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
filter_properties=filter_properties,
|
||||
backup_id=backup_id)
|
||||
|
||||
def execute(self, context, **kwargs):
|
||||
scheduler_hints = kwargs.pop('scheduler_hints', None)
|
||||
|
@ -22,6 +22,8 @@ import taskflow.engines
|
||||
from taskflow.patterns import linear_flow
|
||||
from taskflow.types import failure as ft
|
||||
|
||||
from cinder import backup as backup_api
|
||||
from cinder.backup import rpcapi as backup_rpcapi
|
||||
from cinder import context as cinder_context
|
||||
from cinder import coordination
|
||||
from cinder import exception
|
||||
@ -33,6 +35,7 @@ from cinder.message import api as message_api
|
||||
from cinder.message import message_field
|
||||
from cinder import objects
|
||||
from cinder.objects import consistencygroup
|
||||
from cinder.objects import fields
|
||||
from cinder import utils
|
||||
from cinder.volume.flows import common
|
||||
from cinder.volume import utils as volume_utils
|
||||
@ -267,7 +270,7 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
|
||||
'status': volume.status,
|
||||
'type': 'raw', # This will have the type of the volume to be
|
||||
# created, which should be one of [raw, snap,
|
||||
# source_vol, image]
|
||||
# source_vol, image, backup]
|
||||
'volume_id': volume.id,
|
||||
'volume_name': volume_name,
|
||||
'volume_size': volume_size,
|
||||
@ -314,7 +317,17 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
|
||||
# demand in the future.
|
||||
'image_service': image_service,
|
||||
})
|
||||
|
||||
elif request_spec.get('backup_id'):
|
||||
# We are making a backup based volume instead of a raw volume.
|
||||
specs.update({
|
||||
'type': 'backup',
|
||||
'backup_id': request_spec['backup_id'],
|
||||
# NOTE(luqitao): if the driver does not implement the method
|
||||
# `create_volume_from_backup`, cinder-backup will update the
|
||||
# volume's status, otherwise we need update it in the method
|
||||
# `CreateVolumeOnFinishTask`.
|
||||
'need_update_volume': True,
|
||||
})
|
||||
return specs
|
||||
|
||||
def revert(self, context, result, **kwargs):
|
||||
@ -357,6 +370,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
Reversion strategy: N/A
|
||||
"""
|
||||
|
||||
default_provides = 'volume_spec'
|
||||
|
||||
def __init__(self, manager, db, driver, image_volume_cache=None):
|
||||
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
|
||||
self.manager = manager
|
||||
@ -364,6 +379,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
self.driver = driver
|
||||
self.image_volume_cache = image_volume_cache
|
||||
self.message = message_api.API()
|
||||
self.backup_api = backup_api.API()
|
||||
self.backup_rpcapi = backup_rpcapi.BackupAPI()
|
||||
|
||||
def _handle_bootable_volume_glance_meta(self, context, volume,
|
||||
**kwargs):
|
||||
@ -864,6 +881,45 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
image_meta=image_meta)
|
||||
return model_update
|
||||
|
||||
def _create_from_backup(self, context, volume, backup_id, **kwargs):
|
||||
LOG.info("Creating volume %(volume_id)s from backup %(backup_id)s.",
|
||||
{'volume_id': volume.id,
|
||||
'backup_id': backup_id})
|
||||
ret = {}
|
||||
backup = objects.Backup.get_by_id(context, backup_id)
|
||||
try:
|
||||
ret = self.driver.create_volume_from_backup(volume, backup)
|
||||
need_update_volume = True
|
||||
|
||||
except NotImplementedError:
|
||||
LOG.info("Backend does not support creating volume from "
|
||||
"backup %(id)s. It will directly create the raw volume "
|
||||
"at the backend and then schedule the request to the "
|
||||
"backup service to restore the volume with backup.",
|
||||
{'id': backup_id})
|
||||
model_update = self._create_raw_volume(volume, **kwargs) or {}
|
||||
model_update.update({'status': 'restoring-backup'})
|
||||
volume.update(model_update)
|
||||
volume.save()
|
||||
|
||||
backup_host = self.backup_api.get_available_backup_service_host(
|
||||
backup.host, backup.availability_zone)
|
||||
updates = {'status': fields.BackupStatus.RESTORING,
|
||||
'restore_volume_id': volume.id,
|
||||
'host': backup_host}
|
||||
backup.update(updates)
|
||||
backup.save()
|
||||
|
||||
self.backup_rpcapi.restore_backup(context, backup.host, backup,
|
||||
volume.id)
|
||||
need_update_volume = False
|
||||
|
||||
LOG.info("Created volume %(volume_id)s from backup %(backup_id)s "
|
||||
"successfully.",
|
||||
{'volume_id': volume.id,
|
||||
'backup_id': backup_id})
|
||||
return ret, need_update_volume
|
||||
|
||||
def _create_raw_volume(self, volume, **kwargs):
|
||||
try:
|
||||
ret = self.driver.create_volume(volume)
|
||||
@ -910,6 +966,10 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
model_update = self._create_from_image(context,
|
||||
volume,
|
||||
**volume_spec)
|
||||
elif create_type == 'backup':
|
||||
model_update, need_update_volume = self._create_from_backup(
|
||||
context, volume, **volume_spec)
|
||||
volume_spec.update({'need_update_volume': need_update_volume})
|
||||
else:
|
||||
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
|
||||
|
||||
@ -927,6 +987,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
"with creation provided model %(model)s",
|
||||
{'volume_id': volume_id, 'model': model_update})
|
||||
raise
|
||||
return volume_spec
|
||||
|
||||
def _cleanup_cg_in_volume(self, volume):
|
||||
# NOTE(xyang): Cannot have both group_id and consistencygroup_id.
|
||||
@ -959,6 +1020,11 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
|
||||
}
|
||||
|
||||
def execute(self, context, volume, volume_spec):
|
||||
need_update_volume = volume_spec.pop('need_update_volume', True)
|
||||
if not need_update_volume:
|
||||
super(CreateVolumeOnFinishTask, self).execute(context, volume)
|
||||
return
|
||||
|
||||
new_status = self.status_translation.get(volume_spec.get('status'),
|
||||
'available')
|
||||
update = {
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Starting with API microversion 3.47, Cinder now supports the ability to
|
||||
create a volume directly from a backup. For instance, you can use the
|
||||
command: ``cinder create <size> --backup-id <backup_id>`` in cinderclient.
|
Loading…
Reference in New Issue
Block a user