Backup object
This patch adds VersionedObjects abstraction layer to volume backups. The object derives from CinderObjectDictCompat, so it supports both object (obj.prop) and dict (obj['prop']) syntax to access properties. Complete move to object notation will be made in a follow up clean up patch. Co-Authored-By: Grzegorz Grasza <grzegorz.grasza@intel.com> Change-Id: Icff37261b367463b71a1268be16f9c97f595bf0c Partial-Implements: blueprint cinder-objects
This commit is contained in:
parent
703ce63c1a
commit
0e76126136
@ -219,7 +219,7 @@ class BackupsController(wsgi.Controller):
|
||||
|
||||
backups = self.backup_api.get_all(context, search_opts=filters)
|
||||
backup_count = len(backups)
|
||||
limited_list = common.limited(backups, req)
|
||||
limited_list = common.limited(backups.objects, req)
|
||||
req.cache_db_backups(limited_list)
|
||||
|
||||
if is_detail:
|
||||
|
@ -30,6 +30,7 @@ from cinder import context
|
||||
from cinder.db import base
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LI, _LW
|
||||
from cinder import objects
|
||||
import cinder.policy
|
||||
from cinder import quota
|
||||
from cinder import utils
|
||||
@ -60,8 +61,7 @@ class API(base.Base):
|
||||
|
||||
def get(self, context, backup_id):
|
||||
check_policy(context, 'get')
|
||||
rv = self.db.backup_get(context, backup_id)
|
||||
return dict(rv)
|
||||
return objects.Backup.get_by_id(context, backup_id)
|
||||
|
||||
def delete(self, context, backup_id):
|
||||
"""Make the RPC call to delete a volume backup."""
|
||||
@ -78,21 +78,23 @@ class API(base.Base):
|
||||
msg = _('Incremental backups exist for this backup.')
|
||||
raise exception.InvalidBackup(reason=msg)
|
||||
|
||||
self.db.backup_update(context, backup_id, {'status': 'deleting'})
|
||||
self.backup_rpcapi.delete_backup(context,
|
||||
backup['host'],
|
||||
backup['id'])
|
||||
backup.status = 'deleting'
|
||||
backup.save()
|
||||
self.backup_rpcapi.delete_backup(context, backup)
|
||||
|
||||
def get_all(self, context, search_opts=None):
|
||||
if search_opts is None:
|
||||
search_opts = {}
|
||||
check_policy(context, 'get_all')
|
||||
|
||||
if context.is_admin:
|
||||
backups = self.db.backup_get_all(context, filters=search_opts)
|
||||
backups = objects.BackupList.get_all(context, filters=search_opts)
|
||||
else:
|
||||
backups = self.db.backup_get_all_by_project(context,
|
||||
context.project_id,
|
||||
filters=search_opts)
|
||||
backups = objects.BackupList.get_all_by_project(
|
||||
context,
|
||||
context.project_id,
|
||||
filters=search_opts
|
||||
)
|
||||
|
||||
return backups
|
||||
|
||||
@ -177,50 +179,51 @@ class API(base.Base):
|
||||
# backup to do an incremental backup.
|
||||
latest_backup = None
|
||||
if incremental:
|
||||
backups = self.db.backup_get_all_by_volume(context.elevated(),
|
||||
volume_id)
|
||||
if backups:
|
||||
latest_backup = max(backups, key=lambda x: x['created_at'])
|
||||
backups = objects.BackupList.get_all_by_volume(context.elevated(),
|
||||
volume_id)
|
||||
if backups.objects:
|
||||
latest_backup = max(backups.objects,
|
||||
key=lambda x: x['created_at'])
|
||||
else:
|
||||
msg = _('No backups available to do an incremental backup.')
|
||||
raise exception.InvalidBackup(reason=msg)
|
||||
|
||||
parent_id = None
|
||||
if latest_backup:
|
||||
parent_id = latest_backup['id']
|
||||
parent_id = latest_backup.id
|
||||
if latest_backup['status'] != "available":
|
||||
msg = _('The parent backup must be available for '
|
||||
'incremental backup.')
|
||||
raise exception.InvalidBackup(reason=msg)
|
||||
|
||||
self.db.volume_update(context, volume_id, {'status': 'backing-up'})
|
||||
options = {'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'display_name': name,
|
||||
'display_description': description,
|
||||
'volume_id': volume_id,
|
||||
'status': 'creating',
|
||||
'container': container,
|
||||
'parent_id': parent_id,
|
||||
'size': volume['size'],
|
||||
'host': volume_host, }
|
||||
try:
|
||||
backup = self.db.backup_create(context, options)
|
||||
kwargs = {
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'display_name': name,
|
||||
'display_description': description,
|
||||
'volume_id': volume_id,
|
||||
'status': 'creating',
|
||||
'container': container,
|
||||
'parent_id': parent_id,
|
||||
'size': volume['size'],
|
||||
'host': volume_host,
|
||||
}
|
||||
backup = objects.Backup(context=context, **kwargs)
|
||||
backup.create()
|
||||
QUOTAS.commit(context, reservations)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
self.db.backup_destroy(context, backup['id'])
|
||||
backup.destroy()
|
||||
finally:
|
||||
QUOTAS.rollback(context, reservations)
|
||||
|
||||
# TODO(DuncanT): In future, when we have a generic local attach,
|
||||
# this can go via the scheduler, which enables
|
||||
# better load balancing and isolation of services
|
||||
self.backup_rpcapi.create_backup(context,
|
||||
backup['host'],
|
||||
backup['id'],
|
||||
volume_id)
|
||||
self.backup_rpcapi.create_backup(context, backup)
|
||||
|
||||
return backup
|
||||
|
||||
@ -277,14 +280,13 @@ class API(base.Base):
|
||||
|
||||
# Setting the status here rather than setting at start and unrolling
|
||||
# for each error condition, it should be a very small window
|
||||
self.db.backup_update(context, backup_id, {'status': 'restoring'})
|
||||
backup.status = 'restoring'
|
||||
backup.save()
|
||||
volume_host = volume_utils.extract_host(volume['host'], 'host')
|
||||
self.db.volume_update(context, volume_id, {'status':
|
||||
'restoring-backup'})
|
||||
|
||||
volume_host = volume_utils.extract_host(volume['host'], 'host')
|
||||
self.backup_rpcapi.restore_backup(context,
|
||||
volume_host,
|
||||
backup['id'],
|
||||
self.backup_rpcapi.restore_backup(context, volume_host, backup,
|
||||
volume_id)
|
||||
|
||||
d = {'backup_id': backup_id,
|
||||
@ -304,8 +306,8 @@ class API(base.Base):
|
||||
# get backup info
|
||||
backup = self.get(context, backup_id)
|
||||
# send to manager to do reset operation
|
||||
self.backup_rpcapi.reset_status(ctxt=context, host=backup['host'],
|
||||
backup_id=backup_id, status=status)
|
||||
self.backup_rpcapi.reset_status(ctxt=context, backup=backup,
|
||||
status=status)
|
||||
|
||||
def export_record(self, context, backup_id):
|
||||
"""Make the RPC call to export a volume backup.
|
||||
@ -330,9 +332,7 @@ class API(base.Base):
|
||||
{'ctx': context,
|
||||
'host': backup['host'],
|
||||
'id': backup['id']})
|
||||
export_data = self.backup_rpcapi.export_record(context,
|
||||
backup['host'],
|
||||
backup['id'])
|
||||
export_data = self.backup_rpcapi.export_record(context, backup)
|
||||
|
||||
return export_data
|
||||
|
||||
@ -357,15 +357,18 @@ class API(base.Base):
|
||||
if len(hosts) == 0:
|
||||
raise exception.ServiceNotFound(service_id=backup_service)
|
||||
|
||||
options = {'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'volume_id': '0000-0000-0000-0000',
|
||||
'status': 'creating', }
|
||||
backup = self.db.backup_create(context, options)
|
||||
kwargs = {
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'volume_id': '0000-0000-0000-0000',
|
||||
'status': 'creating',
|
||||
}
|
||||
backup = objects.Backup(context=context, **kwargs)
|
||||
backup.create()
|
||||
first_host = hosts.pop()
|
||||
self.backup_rpcapi.import_record(context,
|
||||
first_host,
|
||||
backup['id'],
|
||||
backup,
|
||||
backup_service,
|
||||
backup_url,
|
||||
hosts)
|
||||
|
@ -36,6 +36,7 @@ import six
|
||||
from cinder.backup import driver
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder import objects
|
||||
from cinder.openstack.common import loopingcall
|
||||
from cinder.volume import utils as volume_utils
|
||||
|
||||
@ -152,18 +153,15 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
||||
return
|
||||
|
||||
def _create_container(self, context, backup):
|
||||
backup_id = backup['id']
|
||||
backup['container'] = self.update_container_name(backup,
|
||||
backup['container'])
|
||||
container = backup['container']
|
||||
backup.container = self.update_container_name(backup, backup.container)
|
||||
LOG.debug('_create_container started, container: %(container)s,'
|
||||
'backup: %(backup_id)s.',
|
||||
{'container': container, 'backup_id': backup_id})
|
||||
if container is None:
|
||||
container = self.backup_default_container
|
||||
self.db.backup_update(context, backup_id, {'container': container})
|
||||
self.put_container(container)
|
||||
return container
|
||||
{'container': backup.container, 'backup_id': backup.id})
|
||||
if backup.container is None:
|
||||
backup.container = self.backup_default_container
|
||||
backup.save()
|
||||
self.put_container(backup.container)
|
||||
return backup.container
|
||||
|
||||
def _generate_object_names(self, backup):
|
||||
prefix = backup['service_metadata']
|
||||
@ -249,9 +247,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
||||
|
||||
def _prepare_backup(self, backup):
|
||||
"""Prepare the backup process and return the backup metadata."""
|
||||
backup_id = backup['id']
|
||||
volume_id = backup['volume_id']
|
||||
volume = self.db.volume_get(self.context, volume_id)
|
||||
volume = self.db.volume_get(self.context, backup.volume_id)
|
||||
|
||||
if volume['size'] <= 0:
|
||||
err = _('volume size %d is invalid.') % volume['size']
|
||||
@ -260,9 +256,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
||||
container = self._create_container(self.context, backup)
|
||||
|
||||
object_prefix = self._generate_object_name_prefix(backup)
|
||||
backup['service_metadata'] = object_prefix
|
||||
self.db.backup_update(self.context, backup_id, {'service_metadata':
|
||||
object_prefix})
|
||||
backup.service_metadata = object_prefix
|
||||
backup.save()
|
||||
|
||||
volume_size_bytes = volume['size'] * units.Gi
|
||||
availability_zone = self.az
|
||||
LOG.debug('starting backup of volume: %(volume_id)s,'
|
||||
@ -270,7 +266,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
||||
' prefix %(object_prefix)s, availability zone:'
|
||||
' %(availability_zone)s',
|
||||
{
|
||||
'volume_id': volume_id,
|
||||
'volume_id': backup.volume_id,
|
||||
'volume_size_bytes': volume_size_bytes,
|
||||
'object_prefix': object_prefix,
|
||||
'availability_zone': availability_zone,
|
||||
@ -349,17 +345,17 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
||||
sha256_list = object_sha256['sha256s']
|
||||
extra_metadata = object_meta.get('extra_metadata')
|
||||
self._write_sha256file(backup,
|
||||
backup['volume_id'],
|
||||
backup.volume_id,
|
||||
container,
|
||||
sha256_list)
|
||||
self._write_metadata(backup,
|
||||
backup['volume_id'],
|
||||
backup.volume_id,
|
||||
container,
|
||||
object_list,
|
||||
volume_meta,
|
||||
extra_metadata)
|
||||
self.db.backup_update(self.context, backup['id'],
|
||||
{'object_count': object_id})
|
||||
backup.object_count = object_id
|
||||
backup.save()
|
||||
LOG.debug('backup %s finished.', backup['id'])
|
||||
|
||||
def _backup_metadata(self, backup, object_meta):
|
||||
@ -410,9 +406,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
||||
# is given.
|
||||
parent_backup_shafile = None
|
||||
parent_backup = None
|
||||
if backup['parent_id']:
|
||||
parent_backup = self.db.backup_get(self.context,
|
||||
backup['parent_id'])
|
||||
if backup.parent_id:
|
||||
parent_backup = objects.Backup.get_by_id(self.context,
|
||||
backup.parent_id)
|
||||
parent_backup_shafile = self._read_sha256file(parent_backup)
|
||||
parent_backup_shalist = parent_backup_shafile['sha256s']
|
||||
if (parent_backup_shafile['chunk_size'] !=
|
||||
@ -425,7 +421,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
# If the volume size increased since the last backup, fail
|
||||
# the incremental backup and ask user to do a full backup.
|
||||
if backup['size'] > parent_backup['size']:
|
||||
if backup.size > parent_backup.size:
|
||||
err = _('Volume size increased since the last '
|
||||
'backup. Do a full backup.')
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
@ -637,9 +633,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
|
||||
backup_list = []
|
||||
backup_list.append(backup)
|
||||
current_backup = backup
|
||||
while current_backup['parent_id']:
|
||||
prev_backup = (self.db.backup_get(
|
||||
self.context, current_backup['parent_id']))
|
||||
while current_backup.parent_id:
|
||||
prev_backup = objects.Backup.get_by_id(self.context,
|
||||
current_backup.parent_id)
|
||||
backup_list.append(prev_backup)
|
||||
current_backup = prev_backup
|
||||
|
||||
|
@ -874,8 +874,8 @@ class CephBackupDriver(driver.BackupDriver):
|
||||
self._full_backup(backup_id, volume_id, volume_file,
|
||||
volume_name, length)
|
||||
|
||||
self.db.backup_update(self.context, backup_id,
|
||||
{'container': self._ceph_backup_pool})
|
||||
backup.container = self._ceph_backup_pool
|
||||
backup.save()
|
||||
|
||||
if backup_metadata:
|
||||
try:
|
||||
|
@ -61,9 +61,8 @@ VALID_BACKUP_MODES = ['image', 'file']
|
||||
|
||||
def _get_backup_metadata(backup, operation):
|
||||
"""Return metadata persisted with backup object."""
|
||||
svc_metadata = backup['service_metadata']
|
||||
try:
|
||||
svc_dict = json.loads(svc_metadata)
|
||||
svc_dict = json.loads(backup.service_metadata)
|
||||
backup_path = svc_dict.get('backup_path')
|
||||
backup_mode = svc_dict.get('backup_mode')
|
||||
except TypeError:
|
||||
@ -364,35 +363,31 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
"not yet support this feature.")
|
||||
raise exception.InvalidBackup(reason=msg)
|
||||
|
||||
backup_id = backup['id']
|
||||
volume_id = backup['volume_id']
|
||||
volume_path, backup_mode = _get_volume_realpath(volume_file,
|
||||
volume_id)
|
||||
backup.volume_id)
|
||||
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
|
||||
' volume path: %(volume_path)s, mode: %(mode)s.',
|
||||
{'volume_id': volume_id,
|
||||
{'volume_id': backup.volume_id,
|
||||
'volume_path': volume_path,
|
||||
'mode': backup_mode})
|
||||
|
||||
backup_path = _create_unique_device_link(backup_id,
|
||||
backup_path = _create_unique_device_link(backup.id,
|
||||
volume_path,
|
||||
volume_id,
|
||||
backup.volume_id,
|
||||
backup_mode)
|
||||
|
||||
service_metadata = {'backup_mode': backup_mode,
|
||||
'backup_path': backup_path}
|
||||
self.db.backup_update(self.context,
|
||||
backup_id,
|
||||
{'service_metadata':
|
||||
json.dumps(service_metadata)})
|
||||
backup.service_metadata = json.dumps(service_metadata)
|
||||
backup.save()
|
||||
|
||||
try:
|
||||
self._do_backup(backup_path, volume_id, backup_mode)
|
||||
self._do_backup(backup_path, backup.volume_id, backup_mode)
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
err = (_('backup: %(vol_id)s failed to run dsmc '
|
||||
'on %(bpath)s.\n'
|
||||
'stdout: %(out)s\n stderr: %(err)s')
|
||||
% {'vol_id': volume_id,
|
||||
% {'vol_id': backup.volume_id,
|
||||
'bpath': backup_path,
|
||||
'out': exc.stdout,
|
||||
'err': exc.stderr})
|
||||
@ -403,7 +398,7 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
'due to invalid arguments '
|
||||
'on %(bpath)s.\n'
|
||||
'stdout: %(out)s\n stderr: %(err)s')
|
||||
% {'vol_id': volume_id,
|
||||
% {'vol_id': backup.volume_id,
|
||||
'bpath': backup_path,
|
||||
'out': exc.stdout,
|
||||
'err': exc.stderr})
|
||||
@ -411,9 +406,10 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
|
||||
finally:
|
||||
_cleanup_device_hardlink(backup_path, volume_path, volume_id)
|
||||
_cleanup_device_hardlink(backup_path, volume_path,
|
||||
backup.volume_id)
|
||||
|
||||
LOG.debug('Backup %s finished.', backup_id)
|
||||
LOG.debug('Backup %s finished.', backup.id)
|
||||
|
||||
def restore(self, backup, volume_id, volume_file):
|
||||
"""Restore the given volume backup from TSM server.
|
||||
@ -424,8 +420,6 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
:raises InvalidBackup
|
||||
"""
|
||||
|
||||
backup_id = backup['id']
|
||||
|
||||
# backup_path is the path that was originally backed up.
|
||||
backup_path, backup_mode = _get_backup_metadata(backup, 'restore')
|
||||
|
||||
@ -434,7 +428,7 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
'backup: %(backup_id)s, '
|
||||
'mode: %(mode)s.',
|
||||
{'volume_id': volume_id,
|
||||
'backup_id': backup_id,
|
||||
'backup_id': backup.id,
|
||||
'mode': backup_mode})
|
||||
|
||||
# volume_path is the path to restore into. This may
|
||||
@ -442,7 +436,7 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
volume_path, unused = _get_volume_realpath(volume_file,
|
||||
volume_id)
|
||||
|
||||
restore_path = _create_unique_device_link(backup_id,
|
||||
restore_path = _create_unique_device_link(backup.id,
|
||||
volume_path,
|
||||
volume_id,
|
||||
backup_mode)
|
||||
@ -475,7 +469,7 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
_cleanup_device_hardlink(restore_path, volume_path, volume_id)
|
||||
|
||||
LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.',
|
||||
{'backup_id': backup_id,
|
||||
{'backup_id': backup.id,
|
||||
'volume_id': volume_id})
|
||||
|
||||
def delete(self, backup):
|
||||
@ -487,10 +481,9 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
|
||||
delete_attrs = {'Total number of objects deleted': '1'}
|
||||
delete_path, backup_mode = _get_backup_metadata(backup, 'restore')
|
||||
volume_id = backup['volume_id']
|
||||
|
||||
LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.',
|
||||
{'backup': backup['id'],
|
||||
{'backup': backup.id,
|
||||
'mode': backup_mode})
|
||||
|
||||
try:
|
||||
@ -508,7 +501,7 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
err = (_('delete: %(vol_id)s failed to run dsmc with '
|
||||
'stdout: %(out)s\n stderr: %(err)s')
|
||||
% {'vol_id': volume_id,
|
||||
% {'vol_id': backup.volume_id,
|
||||
'out': exc.stdout,
|
||||
'err': exc.stderr})
|
||||
LOG.error(err)
|
||||
@ -517,7 +510,7 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
err = (_('delete: %(vol_id)s failed to run dsmc '
|
||||
'due to invalid arguments with '
|
||||
'stdout: %(out)s\n stderr: %(err)s')
|
||||
% {'vol_id': volume_id,
|
||||
% {'vol_id': backup.volume_id,
|
||||
'out': exc.stdout,
|
||||
'err': exc.stderr})
|
||||
LOG.error(err)
|
||||
@ -530,7 +523,7 @@ class TSMBackupDriver(driver.BackupDriver):
|
||||
# object can be removed.
|
||||
LOG.error(_LE('delete: %(vol_id)s failed with '
|
||||
'stdout: %(out)s\n stderr: %(err)s'),
|
||||
{'vol_id': volume_id,
|
||||
{'vol_id': backup.volume_id,
|
||||
'out': out,
|
||||
'err': err})
|
||||
|
||||
|
@ -46,6 +46,7 @@ from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder import manager
|
||||
from cinder import objects
|
||||
from cinder import quota
|
||||
from cinder import rpc
|
||||
from cinder import utils
|
||||
@ -183,6 +184,11 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
|
||||
driver.set_initialized()
|
||||
|
||||
def _update_backup_error(self, backup, context, err):
|
||||
backup.status = 'error'
|
||||
backup.fail_reason = err
|
||||
backup.save()
|
||||
|
||||
def init_host(self):
|
||||
"""Do any initialization that needs to be run if this is a
|
||||
standalone service.
|
||||
@ -222,40 +228,38 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
|
||||
# TODO(smulcahy) implement full resume of backup and restore
|
||||
# operations on restart (rather than simply resetting)
|
||||
backups = self.db.backup_get_all_by_host(ctxt, self.host)
|
||||
backups = objects.BackupList.get_all_by_host(ctxt, self.host)
|
||||
for backup in backups:
|
||||
if backup['status'] == 'creating':
|
||||
LOG.info(_LI('Resetting backup %s to error (was creating).'),
|
||||
backup['id'])
|
||||
err = 'incomplete backup reset on manager restart'
|
||||
self.db.backup_update(ctxt, backup['id'], {'status': 'error',
|
||||
'fail_reason': err})
|
||||
self._update_backup_error(backup, ctxt, err)
|
||||
if backup['status'] == 'restoring':
|
||||
LOG.info(_LI('Resetting backup %s to '
|
||||
'available (was restoring).'),
|
||||
backup['id'])
|
||||
self.db.backup_update(ctxt, backup['id'],
|
||||
{'status': 'available'})
|
||||
backup.status = 'available'
|
||||
backup.save()
|
||||
if backup['status'] == 'deleting':
|
||||
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
|
||||
self.delete_backup(ctxt, backup['id'])
|
||||
self.delete_backup(ctxt, backup)
|
||||
|
||||
def create_backup(self, context, backup_id):
|
||||
def create_backup(self, context, backup):
|
||||
"""Create volume backups using configured backup service."""
|
||||
backup = self.db.backup_get(context, backup_id)
|
||||
volume_id = backup['volume_id']
|
||||
volume_id = backup.volume_id
|
||||
volume = self.db.volume_get(context, volume_id)
|
||||
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
|
||||
'volume: %(volume_id)s.'),
|
||||
{'backup_id': backup_id, 'volume_id': volume_id})
|
||||
{'backup_id': backup.id, 'volume_id': volume_id})
|
||||
|
||||
self._notify_about_backup_usage(context, backup, "create.start")
|
||||
volume_host = volume_utils.extract_host(volume['host'], 'backend')
|
||||
backend = self._get_volume_backend(host=volume_host)
|
||||
|
||||
self.db.backup_update(context, backup_id, {'host': self.host,
|
||||
'service':
|
||||
self.driver_name})
|
||||
backup.host = self.host
|
||||
backup.service = self.driver_name
|
||||
backup.save()
|
||||
|
||||
expected_status = 'backing-up'
|
||||
actual_status = volume['status']
|
||||
@ -265,21 +269,19 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
'expected_status': expected_status,
|
||||
'actual_status': actual_status,
|
||||
}
|
||||
self.db.backup_update(context, backup_id, {'status': 'error',
|
||||
'fail_reason': err})
|
||||
self._update_backup_error(backup, context, err)
|
||||
raise exception.InvalidVolume(reason=err)
|
||||
|
||||
expected_status = 'creating'
|
||||
actual_status = backup['status']
|
||||
actual_status = backup.status
|
||||
if actual_status != expected_status:
|
||||
err = _('Create backup aborted, expected backup status '
|
||||
'%(expected_status)s but got %(actual_status)s.') % {
|
||||
'expected_status': expected_status,
|
||||
'actual_status': actual_status,
|
||||
}
|
||||
self.db.volume_update(context, volume_id, {'status': 'available'})
|
||||
self.db.backup_update(context, backup_id, {'status': 'error',
|
||||
'fail_reason': err})
|
||||
self._update_backup_error(backup, context, err)
|
||||
backup.save()
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
|
||||
try:
|
||||
@ -296,31 +298,29 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': 'available'})
|
||||
self.db.backup_update(context, backup_id,
|
||||
{'status': 'error',
|
||||
'fail_reason': six.text_type(err)})
|
||||
self._update_backup_error(backup, context, six.text_type(err))
|
||||
|
||||
self.db.volume_update(context, volume_id, {'status': 'available'})
|
||||
backup = self.db.backup_update(context, backup_id,
|
||||
{'status': 'available',
|
||||
'size': volume['size'],
|
||||
'availability_zone': self.az})
|
||||
LOG.info(_LI('Create backup finished. backup: %s.'), backup_id)
|
||||
backup.status = 'available'
|
||||
backup.size = volume['size']
|
||||
backup.availability_zone = self.az
|
||||
backup.save()
|
||||
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
|
||||
self._notify_about_backup_usage(context, backup, "create.end")
|
||||
|
||||
def restore_backup(self, context, backup_id, volume_id):
|
||||
def restore_backup(self, context, backup, volume_id):
|
||||
"""Restore volume backups from configured backup service."""
|
||||
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
|
||||
'volume: %(volume_id)s.'),
|
||||
{'backup_id': backup_id, 'volume_id': volume_id})
|
||||
{'backup_id': backup.id, 'volume_id': volume_id})
|
||||
|
||||
backup = self.db.backup_get(context, backup_id)
|
||||
volume = self.db.volume_get(context, volume_id)
|
||||
volume_host = volume_utils.extract_host(volume['host'], 'backend')
|
||||
backend = self._get_volume_backend(host=volume_host)
|
||||
self._notify_about_backup_usage(context, backup, "restore.start")
|
||||
|
||||
self.db.backup_update(context, backup_id, {'host': self.host})
|
||||
backup.host = self.host
|
||||
backup.save()
|
||||
|
||||
expected_status = 'restoring-backup'
|
||||
actual_status = volume['status']
|
||||
@ -329,7 +329,8 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
'%(expected_status)s but got %(actual_status)s.') %
|
||||
{'expected_status': expected_status,
|
||||
'actual_status': actual_status})
|
||||
self.db.backup_update(context, backup_id, {'status': 'available'})
|
||||
backup.status = 'available'
|
||||
backup.save()
|
||||
raise exception.InvalidVolume(reason=err)
|
||||
|
||||
expected_status = 'restoring'
|
||||
@ -339,8 +340,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
'%(expected_status)s but got %(actual_status)s.') %
|
||||
{'expected_status': expected_status,
|
||||
'actual_status': actual_status})
|
||||
self.db.backup_update(context, backup_id, {'status': 'error',
|
||||
'fail_reason': err})
|
||||
self._update_backup_error(backup, context, err)
|
||||
self.db.volume_update(context, volume_id, {'status': 'error'})
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
|
||||
@ -363,7 +363,8 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
'configured_service': configured_service,
|
||||
'backup_service': backup_service,
|
||||
}
|
||||
self.db.backup_update(context, backup_id, {'status': 'available'})
|
||||
backup.status = 'available'
|
||||
backup.save()
|
||||
self.db.volume_update(context, volume_id, {'status': 'error'})
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
|
||||
@ -382,19 +383,21 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context, volume_id,
|
||||
{'status': 'error_restoring'})
|
||||
self.db.backup_update(context, backup_id,
|
||||
{'status': 'available'})
|
||||
backup.status = 'available'
|
||||
backup.save()
|
||||
|
||||
self.db.volume_update(context, volume_id, {'status': 'available'})
|
||||
backup = self.db.backup_update(context, backup_id,
|
||||
{'status': 'available'})
|
||||
backup.status = 'available'
|
||||
backup.save()
|
||||
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
|
||||
' to volume %(volume_id)s.'),
|
||||
{'backup_id': backup_id, 'volume_id': volume_id})
|
||||
{'backup_id': backup.id, 'volume_id': volume_id})
|
||||
self._notify_about_backup_usage(context, backup, "restore.end")
|
||||
|
||||
def delete_backup(self, context, backup_id):
|
||||
def delete_backup(self, context, backup):
|
||||
"""Delete volume backup from configured backup service."""
|
||||
LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
|
||||
|
||||
try:
|
||||
# NOTE(flaper87): Verify the driver is enabled
|
||||
# before going forward. The exception will be caught
|
||||
@ -403,25 +406,20 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
utils.require_driver_initialized(self.driver)
|
||||
except exception.DriverNotInitialized as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.backup_update(context, backup_id,
|
||||
{'status': 'error',
|
||||
'fail_reason':
|
||||
six.text_type(err)})
|
||||
self._update_backup_error(backup, context, six.text_type(err))
|
||||
|
||||
LOG.info(_LI('Delete backup started, backup: %s.'), backup_id)
|
||||
backup = self.db.backup_get(context, backup_id)
|
||||
self._notify_about_backup_usage(context, backup, "delete.start")
|
||||
self.db.backup_update(context, backup_id, {'host': self.host})
|
||||
backup.host = self.host
|
||||
backup.save()
|
||||
|
||||
expected_status = 'deleting'
|
||||
actual_status = backup['status']
|
||||
actual_status = backup.status
|
||||
if actual_status != expected_status:
|
||||
err = _('Delete_backup aborted, expected backup status '
|
||||
'%(expected_status)s but got %(actual_status)s.') \
|
||||
% {'expected_status': expected_status,
|
||||
'actual_status': actual_status}
|
||||
self.db.backup_update(context, backup_id,
|
||||
{'status': 'error', 'fail_reason': err})
|
||||
self._update_backup_error(backup, context, err)
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
|
||||
backup_service = self._map_service_to_driver(backup['service'])
|
||||
@ -434,8 +432,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
' backup [%(backup_service)s].')\
|
||||
% {'configured_service': configured_service,
|
||||
'backup_service': backup_service}
|
||||
self.db.backup_update(context, backup_id,
|
||||
{'status': 'error'})
|
||||
self._update_backup_error(backup, context, err)
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
|
||||
try:
|
||||
@ -443,33 +440,31 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
backup_service.delete(backup)
|
||||
except Exception as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.backup_update(context, backup_id,
|
||||
{'status': 'error',
|
||||
'fail_reason':
|
||||
six.text_type(err)})
|
||||
self._update_backup_error(backup, context,
|
||||
six.text_type(err))
|
||||
|
||||
# Get reservations
|
||||
try:
|
||||
reserve_opts = {
|
||||
'backups': -1,
|
||||
'backup_gigabytes': -backup['size'],
|
||||
'backup_gigabytes': -backup.size,
|
||||
}
|
||||
reservations = QUOTAS.reserve(context,
|
||||
project_id=backup['project_id'],
|
||||
project_id=backup.project_id,
|
||||
**reserve_opts)
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_LE("Failed to update usages deleting backup"))
|
||||
|
||||
context = context.elevated()
|
||||
self.db.backup_destroy(context, backup_id)
|
||||
backup.destroy()
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations,
|
||||
project_id=backup['project_id'])
|
||||
project_id=backup.project_id)
|
||||
|
||||
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup_id)
|
||||
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
|
||||
self._notify_about_backup_usage(context, backup, "delete.end")
|
||||
|
||||
def _notify_about_backup_usage(self,
|
||||
@ -482,25 +477,23 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
extra_usage_info=extra_usage_info,
|
||||
host=self.host)
|
||||
|
||||
def export_record(self, context, backup_id):
|
||||
def export_record(self, context, backup):
|
||||
"""Export all volume backup metadata details to allow clean import.
|
||||
|
||||
Export backup metadata so it could be re-imported into the database
|
||||
without any prerequisite in the backup database.
|
||||
|
||||
:param context: running context
|
||||
:param backup_id: backup id to export
|
||||
:param backup: backup object to export
|
||||
:returns: backup_record - a description of how to import the backup
|
||||
:returns: contains 'backup_url' - how to import the backup, and
|
||||
:returns: 'backup_service' describing the needed driver.
|
||||
:raises: InvalidBackup
|
||||
"""
|
||||
LOG.info(_LI('Export record started, backup: %s.'), backup_id)
|
||||
|
||||
backup = self.db.backup_get(context, backup_id)
|
||||
LOG.info(_LI('Export record started, backup: %s.'), backup.id)
|
||||
|
||||
expected_status = 'available'
|
||||
actual_status = backup['status']
|
||||
actual_status = backup.status
|
||||
if actual_status != expected_status:
|
||||
err = (_('Export backup aborted, expected backup status '
|
||||
'%(expected_status)s but got %(actual_status)s.') %
|
||||
@ -509,8 +502,8 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
|
||||
backup_record = {}
|
||||
backup_record['backup_service'] = backup['service']
|
||||
backup_service = self._map_service_to_driver(backup['service'])
|
||||
backup_record['backup_service'] = backup.service
|
||||
backup_service = self._map_service_to_driver(backup.service)
|
||||
configured_service = self.driver_name
|
||||
if backup_service != configured_service:
|
||||
err = (_('Export record aborted, the backup service currently'
|
||||
@ -531,19 +524,19 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
msg = six.text_type(err)
|
||||
raise exception.InvalidBackup(reason=msg)
|
||||
|
||||
LOG.info(_LI('Export record finished, backup %s exported.'), backup_id)
|
||||
LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
|
||||
return backup_record
|
||||
|
||||
def import_record(self,
|
||||
context,
|
||||
backup_id,
|
||||
backup,
|
||||
backup_service,
|
||||
backup_url,
|
||||
backup_hosts):
|
||||
"""Import all volume backup metadata details to the backup db.
|
||||
|
||||
:param context: running context
|
||||
:param backup_id: The new backup id for the import
|
||||
:param backup: The new backup object for the import
|
||||
:param backup_service: The needed backup driver for import
|
||||
:param backup_url: An identifier string to locate the backup
|
||||
:param backup_hosts: Potential hosts to execute the import
|
||||
@ -560,7 +553,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
first_host = backup_hosts.pop()
|
||||
self.backup_rpcapi.import_record(context,
|
||||
first_host,
|
||||
backup_id,
|
||||
backup.id,
|
||||
backup_service,
|
||||
backup_url,
|
||||
backup_hosts)
|
||||
@ -569,8 +562,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
err = _('Import record failed, cannot find backup '
|
||||
'service to perform the import. Request service '
|
||||
'%(service)s') % {'service': backup_service}
|
||||
self.db.backup_update(context, backup_id, {'status': 'error',
|
||||
'fail_reason': err})
|
||||
self._update_backup_error(backup, context, err)
|
||||
raise exception.ServiceNotFound(service_id=backup_service)
|
||||
else:
|
||||
# Yes...
|
||||
@ -580,10 +572,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
backup_options = backup_service.import_record(backup_url)
|
||||
except Exception as err:
|
||||
msg = six.text_type(err)
|
||||
self.db.backup_update(context,
|
||||
backup_id,
|
||||
{'status': 'error',
|
||||
'fail_reason': msg})
|
||||
self._update_backup_error(backup, context, msg)
|
||||
raise exception.InvalidBackup(reason=msg)
|
||||
|
||||
required_import_options = ['display_name',
|
||||
@ -603,40 +592,36 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
if entry not in backup_options:
|
||||
msg = (_('Backup metadata received from driver for '
|
||||
'import is missing %s.'), entry)
|
||||
self.db.backup_update(context,
|
||||
backup_id,
|
||||
{'status': 'error',
|
||||
'fail_reason': msg})
|
||||
self._update_backup_error(backup, context, msg)
|
||||
raise exception.InvalidBackup(reason=msg)
|
||||
backup_update[entry] = backup_options[entry]
|
||||
# Update the database
|
||||
self.db.backup_update(context, backup_id, backup_update)
|
||||
backup.update(backup_update)
|
||||
backup.save()
|
||||
|
||||
# Verify backup
|
||||
try:
|
||||
if isinstance(backup_service, driver.BackupDriverWithVerify):
|
||||
backup_service.verify(backup_id)
|
||||
backup_service.verify(backup.id)
|
||||
else:
|
||||
LOG.warning(_LW('Backup service %(service)s does not '
|
||||
'support verify. Backup id %(id)s is '
|
||||
'not verified. Skipping verify.'),
|
||||
{'service': self.driver_name,
|
||||
'id': backup_id})
|
||||
'id': backup.id})
|
||||
except exception.InvalidBackup as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.backup_update(context, backup_id,
|
||||
{'status': 'error',
|
||||
'fail_reason':
|
||||
six.text_type(err)})
|
||||
self._update_backup_error(backup, context,
|
||||
six.text_type(err))
|
||||
|
||||
LOG.info(_LI('Import record id %s metadata from driver '
|
||||
'finished.'), backup_id)
|
||||
'finished.'), backup.id)
|
||||
|
||||
def reset_status(self, context, backup_id, status):
|
||||
def reset_status(self, context, backup, status):
|
||||
"""Reset volume backup status.
|
||||
|
||||
:param context: running context
|
||||
:param backup_id: The backup id for reset status operation
|
||||
:param backup: The backup object for reset status operation
|
||||
:param status: The status to be set
|
||||
:raises: InvalidBackup
|
||||
:raises: BackupVerifyUnsupportedDriver
|
||||
@ -644,7 +629,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
"""
|
||||
LOG.info(_LI('Reset backup status started, backup_id: '
|
||||
'%(backup_id)s, status: %(status)s.'),
|
||||
{'backup_id': backup_id,
|
||||
{'backup_id': backup.id,
|
||||
'status': status})
|
||||
try:
|
||||
# NOTE(flaper87): Verify the driver is enabled
|
||||
@ -656,8 +641,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Backup driver has not been initialized"))
|
||||
|
||||
backup = self.db.backup_get(context, backup_id)
|
||||
backup_service = self._map_service_to_driver(backup['service'])
|
||||
backup_service = self._map_service_to_driver(backup.service)
|
||||
LOG.info(_LI('Backup service: %s.'), backup_service)
|
||||
if backup_service is not None:
|
||||
configured_service = self.driver_name
|
||||
@ -676,9 +660,9 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
# check whether we could verify the backup is ok or not
|
||||
if isinstance(backup_service,
|
||||
driver.BackupDriverWithVerify):
|
||||
backup_service.verify(backup_id)
|
||||
self.db.backup_update(context, backup_id,
|
||||
{'status': status})
|
||||
backup_service.verify(backup.id)
|
||||
backup.status = status
|
||||
backup.save()
|
||||
# driver does not support verify function
|
||||
else:
|
||||
msg = (_('Backup service %(configured_service)s '
|
||||
@ -686,20 +670,20 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
' %(id)s is not verified. '
|
||||
'Skipping verify.') %
|
||||
{'configured_service': self.driver_name,
|
||||
'id': backup_id})
|
||||
'id': backup.id})
|
||||
raise exception.BackupVerifyUnsupportedDriver(
|
||||
reason=msg)
|
||||
# reset status to error or from restoring to available
|
||||
else:
|
||||
if (status == 'error' or
|
||||
(status == 'available' and
|
||||
backup['status'] == 'restoring')):
|
||||
self.db.backup_update(context, backup_id,
|
||||
{'status': status})
|
||||
backup.status == 'restoring')):
|
||||
backup.status = status
|
||||
backup.save()
|
||||
except exception.InvalidBackup:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("Backup id %s is not invalid. "
|
||||
"Skipping reset."), backup_id)
|
||||
"Skipping reset."), backup.id)
|
||||
except exception.BackupVerifyUnsupportedDriver:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Backup service %(configured_service)s '
|
||||
@ -707,19 +691,19 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
'%(id)s is not verified. '
|
||||
'Skipping verify.'),
|
||||
{'configured_service': self.driver_name,
|
||||
'id': backup_id})
|
||||
'id': backup.id})
|
||||
except AttributeError:
|
||||
msg = (_('Backup service %(service)s does not support '
|
||||
'verify. Backup id %(id)s is not verified. '
|
||||
'Skipping reset.') %
|
||||
{'service': self.driver_name,
|
||||
'id': backup_id})
|
||||
'id': backup.id})
|
||||
LOG.error(msg)
|
||||
raise exception.BackupVerifyUnsupportedDriver(
|
||||
reason=msg)
|
||||
|
||||
# send notification to ceilometer
|
||||
notifier_info = {'id': backup_id, 'update': {'status': status}}
|
||||
notifier_info = {'id': backup.id, 'update': {'status': status}}
|
||||
notifier = rpc.get_notifier('backupStatusUpdate')
|
||||
notifier.info(context, "backups.reset_status.end",
|
||||
notifier_info)
|
||||
|
@ -22,6 +22,7 @@ from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging as messaging
|
||||
|
||||
from cinder.objects import base as objects_base
|
||||
from cinder import rpc
|
||||
|
||||
|
||||
@ -36,6 +37,7 @@ class BackupAPI(object):
|
||||
API version history:
|
||||
|
||||
1.0 - Initial version.
|
||||
1.1 - Changed methods to accept backup objects instaed of IDs.
|
||||
"""
|
||||
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
@ -44,56 +46,57 @@ class BackupAPI(object):
|
||||
super(BackupAPI, self).__init__()
|
||||
target = messaging.Target(topic=CONF.backup_topic,
|
||||
version=self.BASE_RPC_API_VERSION)
|
||||
self.client = rpc.get_client(target, '1.0')
|
||||
serializer = objects_base.CinderObjectSerializer()
|
||||
self.client = rpc.get_client(target, '1.1', serializer=serializer)
|
||||
|
||||
def create_backup(self, ctxt, host, backup_id, volume_id):
|
||||
LOG.debug("create_backup in rpcapi backup_id %s", backup_id)
|
||||
cctxt = self.client.prepare(server=host)
|
||||
cctxt.cast(ctxt, 'create_backup', backup_id=backup_id)
|
||||
def create_backup(self, ctxt, backup):
|
||||
LOG.debug("create_backup in rpcapi backup_id %s", backup.id)
|
||||
cctxt = self.client.prepare(server=backup.host)
|
||||
cctxt.cast(ctxt, 'create_backup', backup=backup)
|
||||
|
||||
def restore_backup(self, ctxt, host, backup_id, volume_id):
|
||||
LOG.debug("restore_backup in rpcapi backup_id %s", backup_id)
|
||||
cctxt = self.client.prepare(server=host)
|
||||
cctxt.cast(ctxt, 'restore_backup', backup_id=backup_id,
|
||||
def restore_backup(self, ctxt, volume_host, backup, volume_id):
|
||||
LOG.debug("restore_backup in rpcapi backup_id %s", backup.id)
|
||||
cctxt = self.client.prepare(server=volume_host)
|
||||
cctxt.cast(ctxt, 'restore_backup', backup=backup,
|
||||
volume_id=volume_id)
|
||||
|
||||
def delete_backup(self, ctxt, host, backup_id):
|
||||
LOG.debug("delete_backup rpcapi backup_id %s", backup_id)
|
||||
cctxt = self.client.prepare(server=host)
|
||||
cctxt.cast(ctxt, 'delete_backup', backup_id=backup_id)
|
||||
def delete_backup(self, ctxt, backup):
|
||||
LOG.debug("delete_backup rpcapi backup_id %s", backup.id)
|
||||
cctxt = self.client.prepare(server=backup.host)
|
||||
cctxt.cast(ctxt, 'delete_backup', backup=backup)
|
||||
|
||||
def export_record(self, ctxt, host, backup_id):
|
||||
def export_record(self, ctxt, backup):
|
||||
LOG.debug("export_record in rpcapi backup_id %(id)s "
|
||||
"on host %(host)s.",
|
||||
{'id': backup_id,
|
||||
'host': host})
|
||||
cctxt = self.client.prepare(server=host)
|
||||
return cctxt.call(ctxt, 'export_record', backup_id=backup_id)
|
||||
{'id': backup.id,
|
||||
'host': backup.host})
|
||||
cctxt = self.client.prepare(server=backup.host)
|
||||
return cctxt.call(ctxt, 'export_record', backup_id=backup.id)
|
||||
|
||||
def import_record(self,
|
||||
ctxt,
|
||||
host,
|
||||
backup_id,
|
||||
backup,
|
||||
backup_service,
|
||||
backup_url,
|
||||
backup_hosts):
|
||||
LOG.debug("import_record rpcapi backup id %(id)s "
|
||||
"on host %(host)s for backup_url %(url)s.",
|
||||
{'id': backup_id,
|
||||
{'id': backup.id,
|
||||
'host': host,
|
||||
'url': backup_url})
|
||||
cctxt = self.client.prepare(server=host)
|
||||
cctxt.cast(ctxt, 'import_record',
|
||||
backup_id=backup_id,
|
||||
backup_id=backup.id,
|
||||
backup_service=backup_service,
|
||||
backup_url=backup_url,
|
||||
backup_hosts=backup_hosts)
|
||||
|
||||
def reset_status(self, ctxt, host, backup_id, status):
|
||||
def reset_status(self, ctxt, backup, status):
|
||||
LOG.debug("reset_status in rpcapi backup_id %(id)s "
|
||||
"on host %(host)s.",
|
||||
{'id': backup_id,
|
||||
'host': host})
|
||||
cctxt = self.client.prepare(server=host)
|
||||
return cctxt.cast(ctxt, 'reset_status', backup_id=backup_id,
|
||||
{'id': backup.id,
|
||||
'host': backup.host})
|
||||
cctxt = self.client.prepare(server=backup.host)
|
||||
return cctxt.cast(ctxt, 'reset_status', backup_id=backup.id,
|
||||
status=status)
|
||||
|
@ -33,6 +33,7 @@ i18n.enable_lazy()
|
||||
|
||||
# Need to register global_opts
|
||||
from cinder.common import config # noqa
|
||||
from cinder import objects
|
||||
from cinder.openstack.common.report import guru_meditation_report as gmr
|
||||
from cinder import service
|
||||
from cinder import utils
|
||||
@ -43,6 +44,7 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
def main():
|
||||
objects.register_all()
|
||||
CONF(sys.argv[1:], project='cinder',
|
||||
version=version.version_string())
|
||||
logging.setup(CONF, "cinder")
|
||||
|
@ -77,7 +77,7 @@ from cinder import db
|
||||
from cinder.db import migration as db_migration
|
||||
from cinder.db.sqlalchemy import api as db_api
|
||||
from cinder.i18n import _
|
||||
from cinder.objects import base as objects_base
|
||||
from cinder import objects
|
||||
from cinder import rpc
|
||||
from cinder import utils
|
||||
from cinder import version
|
||||
@ -272,7 +272,7 @@ class VolumeCommands(object):
|
||||
if not rpc.initialized():
|
||||
rpc.init(CONF)
|
||||
target = messaging.Target(topic=CONF.volume_topic)
|
||||
serializer = objects_base.CinderObjectSerializer()
|
||||
serializer = objects.base.CinderObjectSerializer()
|
||||
self._client = rpc.get_client(target, serializer=serializer)
|
||||
|
||||
return self._client
|
||||
@ -402,7 +402,7 @@ class BackupCommands(object):
|
||||
on which the backup operation is running.
|
||||
"""
|
||||
ctxt = context.get_admin_context()
|
||||
backups = db.backup_get_all(ctxt)
|
||||
backups = objects.BackupList.get_all(ctxt)
|
||||
|
||||
hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s"
|
||||
print(hdr % (_('ID'),
|
||||
@ -531,6 +531,7 @@ def fetch_func_args(func):
|
||||
|
||||
|
||||
def main():
|
||||
objects.register_all()
|
||||
"""Parse options and call the appropriate class/method."""
|
||||
CONF.register_cli_opt(category_opt)
|
||||
script_name = sys.argv[0]
|
||||
|
@ -26,3 +26,4 @@ def register_all():
|
||||
# need to receive it via RPC.
|
||||
__import__('cinder.objects.volume')
|
||||
__import__('cinder.objects.snapshot')
|
||||
__import__('cinder.objects.backup')
|
||||
|
144
cinder/objects/backup.py
Normal file
144
cinder/objects/backup.py
Normal file
@ -0,0 +1,144 @@
|
||||
# Copyright 2015 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_versionedobjects import fields
|
||||
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.objects import base
|
||||
from cinder import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@base.CinderObjectRegistry.register
|
||||
class Backup(base.CinderPersistentObject, base.CinderObject,
|
||||
base.CinderObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'id': fields.UUIDField(),
|
||||
|
||||
'user_id': fields.UUIDField(),
|
||||
'project_id': fields.UUIDField(),
|
||||
|
||||
'volume_id': fields.UUIDField(),
|
||||
'host': fields.StringField(nullable=True),
|
||||
'availability_zone': fields.StringField(nullable=True),
|
||||
'container': fields.StringField(nullable=True),
|
||||
'parent_id': fields.StringField(nullable=True),
|
||||
'status': fields.StringField(nullable=True),
|
||||
'fail_reason': fields.StringField(nullable=True),
|
||||
'size': fields.IntegerField(),
|
||||
|
||||
'display_name': fields.StringField(nullable=True),
|
||||
'display_description': fields.StringField(nullable=True),
|
||||
|
||||
# NOTE(dulek): Metadata field is used to store any strings by backup
|
||||
# drivers, that's why it can't be DictOfStringsField.
|
||||
'service_metadata': fields.StringField(nullable=True),
|
||||
'service': fields.StringField(nullable=True),
|
||||
|
||||
'object_count': fields.IntegerField(),
|
||||
}
|
||||
|
||||
obj_extra_fields = ['name']
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return CONF.backup_name_template % self.id
|
||||
|
||||
def obj_make_compatible(self, primitive, target_version):
|
||||
"""Make an object representation compatible with a target version."""
|
||||
target_version = utils.convert_version_to_tuple(target_version)
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object(context, backup, db_backup):
|
||||
for name, field in backup.fields.items():
|
||||
value = db_backup.get(name)
|
||||
if isinstance(field, fields.IntegerField):
|
||||
value = value if value is not None else 0
|
||||
backup[name] = value
|
||||
|
||||
backup._context = context
|
||||
backup.obj_reset_changes()
|
||||
return backup
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get_by_id(cls, context, id):
|
||||
db_backup = db.backup_get(context, id)
|
||||
return cls._from_db_object(context, cls(context), db_backup)
|
||||
|
||||
@base.remotable
|
||||
def create(self):
|
||||
if self.obj_attr_is_set('id'):
|
||||
raise exception.ObjectActionError(action='create',
|
||||
reason='already created')
|
||||
updates = self.obj_get_changes()
|
||||
|
||||
db_backup = db.backup_create(self._context, updates)
|
||||
self._from_db_object(self._context, self, db_backup)
|
||||
|
||||
@base.remotable
|
||||
def save(self):
|
||||
updates = self.obj_get_changes()
|
||||
if updates:
|
||||
db.backup_update(self._context, self.id, updates)
|
||||
|
||||
self.obj_reset_changes()
|
||||
|
||||
@base.remotable
|
||||
def destroy(self):
|
||||
db.backup_destroy(self._context, self.id)
|
||||
|
||||
|
||||
@base.CinderObjectRegistry.register
|
||||
class BackupList(base.ObjectListBase, base.CinderObject):
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': fields.ListOfObjectsField('Backup'),
|
||||
}
|
||||
child_versions = {
|
||||
'1.0': '1.0'
|
||||
}
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get_all(cls, context, filters=None):
|
||||
backups = db.backup_get_all(context, filters)
|
||||
return base.obj_make_list(context, cls(context), objects.Backup,
|
||||
backups)
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get_all_by_host(cls, context, host):
|
||||
backups = db.backup_get_all_by_host(context, host)
|
||||
return base.obj_make_list(context, cls(context), objects.Backup,
|
||||
backups)
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get_all_by_project(cls, context, project_id, filters=None):
|
||||
backups = db.backup_get_all_by_project(context, project_id, filters)
|
||||
return base.obj_make_list(context, cls(context), objects.Backup,
|
||||
backups)
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get_all_by_volume(cls, context, volume_id, filters=None):
|
||||
backups = db.backup_get_all_by_volume(context, volume_id, filters)
|
||||
return base.obj_make_list(context, cls(context), objects.Backup,
|
||||
backups)
|
@ -198,10 +198,14 @@ class AdminActionsTest(test.TestCase):
|
||||
|
||||
def test_backup_reset_status_as_admin(self):
|
||||
ctx = context.RequestContext('admin', 'fake', True)
|
||||
volume = db.volume_create(ctx, {'status': 'available'})
|
||||
volume = db.volume_create(ctx, {'status': 'available',
|
||||
'user_id': 'user',
|
||||
'project_id': 'project'})
|
||||
backup = db.backup_create(ctx, {'status': 'available',
|
||||
'size': 1,
|
||||
'volume_id': volume['id']})
|
||||
'volume_id': volume['id'],
|
||||
'user_id': 'user',
|
||||
'project_id': 'project'})
|
||||
|
||||
resp = self._issue_backup_reset(ctx,
|
||||
backup,
|
||||
@ -225,7 +229,9 @@ class AdminActionsTest(test.TestCase):
|
||||
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
|
||||
'provider_location': '', 'size': 1})
|
||||
backup = db.backup_create(ctx, {'status': 'available',
|
||||
'volume_id': volume['id']})
|
||||
'volume_id': volume['id'],
|
||||
'user_id': 'user',
|
||||
'project_id': 'project'})
|
||||
|
||||
resp = self._issue_backup_reset(ctx,
|
||||
backup,
|
||||
|
@ -1195,10 +1195,10 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(res.status_int, 202)
|
||||
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
|
||||
self.assertEqual(res_dict['restore']['volume_id'], volume_id)
|
||||
mock_restore_backup.assert_called_once_with(mock.ANY,
|
||||
'HostB',
|
||||
backup_id,
|
||||
volume_id)
|
||||
mock_restore_backup.assert_called_once_with(mock.ANY, u'HostB',
|
||||
mock.ANY, volume_id)
|
||||
# Manually check if restore_backup was called with appropriate backup.
|
||||
self.assertEqual(backup_id, mock_restore_backup.call_args[0][2].id)
|
||||
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
db.backup_destroy(context.get_admin_context(), backup_id)
|
||||
|
@ -36,6 +36,7 @@ from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder import utils
|
||||
|
||||
@ -266,7 +267,10 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
'size': 1,
|
||||
'container': container,
|
||||
'volume_id': '1234-5678-1234-8888',
|
||||
'parent_id': parent_id}
|
||||
'parent_id': parent_id,
|
||||
'user_id': 'user-id',
|
||||
'project_id': 'project-id',
|
||||
}
|
||||
return db.backup_create(self.ctxt, backup)['id']
|
||||
|
||||
def setUp(self):
|
||||
@ -297,7 +301,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='none')
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
def test_backup_bz2(self):
|
||||
@ -305,7 +309,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='bz2')
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
def test_backup_zlib(self):
|
||||
@ -313,7 +317,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='zlib')
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
def test_backup_default_container(self):
|
||||
@ -321,9 +325,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
backup_id=FAKE_BACKUP_ID)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, FAKE_BACKUP_ID)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, FAKE_BACKUP_ID)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
|
||||
self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME)
|
||||
|
||||
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
|
||||
@ -339,7 +343,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
CONF.set_override("backup_enable_progress_timer", False)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
self.assertTrue(_send_progress.called)
|
||||
self.assertTrue(_send_progress_end.called)
|
||||
@ -351,7 +355,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
CONF.set_override("backup_object_number_per_notification", 10)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
self.assertFalse(_send_progress.called)
|
||||
self.assertTrue(_send_progress_end.called)
|
||||
@ -364,7 +368,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
CONF.set_override("backup_enable_progress_timer", True)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
self.assertTrue(_send_progress.called)
|
||||
self.assertTrue(_send_progress_end.called)
|
||||
@ -374,9 +378,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self._create_backup_db_entry(container=container_name)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
def test_backup_shafile(self):
|
||||
@ -399,9 +403,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self._create_backup_db_entry(container=container_name)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
# Verify sha contents
|
||||
@ -430,9 +434,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
backup_id=123)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
# Create incremental backup with no change to contents
|
||||
@ -440,9 +444,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
parent_id=123)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.backup(deltabackup, self.volume_file)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
self.assertEqual(deltabackup['container'], container_name)
|
||||
|
||||
# Compare shas from both files
|
||||
@ -475,9 +479,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self._create_backup_db_entry(container=container_name, backup_id=123)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
# Create incremental backup with no change to contents
|
||||
@ -490,9 +494,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
parent_id=123)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.backup(deltabackup, self.volume_file)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
self.assertEqual(deltabackup['container'], container_name)
|
||||
|
||||
content1 = service._read_sha256file(backup)
|
||||
@ -525,9 +529,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self._create_backup_db_entry(container=container_name, backup_id=123)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
# Create incremental backup with no change to contents
|
||||
@ -540,9 +544,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
parent_id=123)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.backup(deltabackup, self.volume_file)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
self.assertEqual(deltabackup['container'], container_name)
|
||||
|
||||
# Verify that two shas are changed at index 16 and 20
|
||||
@ -562,7 +566,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='none')
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
|
||||
def fake_backup_metadata(self, backup, object_meta):
|
||||
raise exception.BackupDriverException(message=_('fake'))
|
||||
@ -587,7 +591,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='none')
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
|
||||
def fake_backup_metadata(self, backup, object_meta):
|
||||
raise exception.BackupDriverException(message=_('fake'))
|
||||
@ -613,11 +617,12 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self.flags(backup_sha_block_size_bytes=32)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as restored_file:
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.restore(backup, '1234-5678-1234-8888', restored_file)
|
||||
self.assertTrue(filecmp.cmp(self.volume_file.name,
|
||||
restored_file.name))
|
||||
@ -629,11 +634,11 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self.flags(backup_sha_block_size_bytes=1024)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as restored_file:
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.restore(backup, '1234-5678-1234-8888', restored_file)
|
||||
self.assertTrue(filecmp.cmp(self.volume_file.name,
|
||||
restored_file.name))
|
||||
@ -645,11 +650,11 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self.flags(backup_sha_block_size_bytes = 1024)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as restored_file:
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.restore(backup, '1234-5678-1234-8888', restored_file)
|
||||
self.assertTrue(filecmp.cmp(self.volume_file.name,
|
||||
restored_file.name))
|
||||
@ -677,7 +682,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self._create_backup_db_entry(container=container_name, backup_id=123)
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
# Create incremental backup with no change to contents
|
||||
@ -689,12 +694,12 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
self._create_backup_db_entry(container=container_name, backup_id=124,
|
||||
parent_id=123)
|
||||
self.volume_file.seek(0)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.backup(deltabackup, self.volume_file, True)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as restored_file:
|
||||
backup = db.backup_get(self.ctxt, 124)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.restore(backup, '1234-5678-1234-8888',
|
||||
restored_file)
|
||||
self.assertTrue(filecmp.cmp(self.volume_file.name,
|
||||
@ -703,7 +708,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
|
||||
def test_delete(self):
|
||||
self._create_backup_db_entry()
|
||||
service = nfs.NFSBackupDriver(self.ctxt)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.delete(backup)
|
||||
|
||||
def test_get_compressor(self):
|
||||
|
91
cinder/tests/unit/objects/test_backup.py
Normal file
91
cinder/tests/unit/objects/test_backup.py
Normal file
@ -0,0 +1,91 @@
|
||||
# Copyright 2015 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from oslo_versionedobjects.tests import test_objects
|
||||
|
||||
from cinder import objects
|
||||
from cinder.tests.unit import fake_volume
|
||||
|
||||
|
||||
fake_backup = {
|
||||
'id': '1',
|
||||
'volume_id': 'fake_id',
|
||||
'status': "creating",
|
||||
'size': 1,
|
||||
'display_name': 'fake_name',
|
||||
'display_description': 'fake_description',
|
||||
'user_id': 'fake_user',
|
||||
'project_id': 'fake_project',
|
||||
}
|
||||
|
||||
|
||||
class TestBackup(test_objects._LocalTest):
|
||||
@staticmethod
|
||||
def _compare(test, db, obj):
|
||||
for field, value in db.items():
|
||||
test.assertEqual(db[field], obj[field])
|
||||
|
||||
@mock.patch('cinder.db.backup_get', return_value=fake_backup)
|
||||
def test_get_by_id(self, backup_get):
|
||||
backup = objects.Backup.get_by_id(self.context, 1)
|
||||
self._compare(self, fake_backup, backup)
|
||||
|
||||
@mock.patch('cinder.db.backup_create', return_value=fake_backup)
|
||||
def test_create(self, backup_create):
|
||||
backup = objects.Backup(context=self.context)
|
||||
backup.create()
|
||||
self.assertEqual(fake_backup['id'], backup.id)
|
||||
self.assertEqual(fake_backup['volume_id'], backup.volume_id)
|
||||
|
||||
@mock.patch('cinder.db.backup_update')
|
||||
def test_save(self, backup_update):
|
||||
backup = objects.Backup._from_db_object(
|
||||
self.context, objects.Backup(), fake_backup)
|
||||
backup.display_name = 'foobar'
|
||||
backup.save()
|
||||
backup_update.assert_called_once_with(self.context, backup.id,
|
||||
{'display_name': 'foobar'})
|
||||
|
||||
@mock.patch('cinder.db.backup_destroy')
|
||||
def test_destroy(self, backup_destroy):
|
||||
backup = objects.Backup(context=self.context, id=1)
|
||||
backup.destroy()
|
||||
backup_destroy.assert_called_once_with(self.context, '1')
|
||||
|
||||
|
||||
class TestBackupList(test_objects._LocalTest):
|
||||
@mock.patch('cinder.db.backup_get_all', return_value=[fake_backup])
|
||||
def test_get_all(self, backup_get_all):
|
||||
backups = objects.BackupList.get_all(self.context)
|
||||
self.assertEqual(1, len(backups))
|
||||
TestBackup._compare(self, fake_backup, backups[0])
|
||||
|
||||
@mock.patch('cinder.db.backup_get_all_by_project',
|
||||
return_value=[fake_backup])
|
||||
def test_get_all_by_project(self, get_all_by_project):
|
||||
backups = objects.BackupList.get_all_by_project(
|
||||
self.context, self.project_id)
|
||||
self.assertEqual(1, len(backups))
|
||||
TestBackup._compare(self, fake_backup, backups[0])
|
||||
|
||||
@mock.patch('cinder.db.backup_get_all_by_host',
|
||||
return_value=[fake_backup])
|
||||
def test_get_all_for_volume(self, get_all_by_host):
|
||||
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
|
||||
|
||||
backups = objects.BackupList.get_all_by_host(self.context,
|
||||
fake_volume_obj.id)
|
||||
self.assertEqual(1, len(backups))
|
||||
TestBackup._compare(self, fake_backup, backups[0])
|
@ -29,6 +29,7 @@ from cinder.backup import manager
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.tests.unit.backup import fake_service_with_verify as fake_service
|
||||
|
||||
@ -63,28 +64,31 @@ class BaseBackupTest(test.TestCase):
|
||||
status='creating',
|
||||
size=1,
|
||||
object_count=0,
|
||||
project_id='fake'):
|
||||
project_id='fake',
|
||||
service=None):
|
||||
"""Create a backup entry in the DB.
|
||||
|
||||
Return the entry ID
|
||||
"""
|
||||
backup = {}
|
||||
backup['volume_id'] = volume_id
|
||||
backup['user_id'] = 'fake'
|
||||
backup['project_id'] = project_id
|
||||
backup['host'] = 'testhost'
|
||||
backup['availability_zone'] = '1'
|
||||
backup['display_name'] = display_name
|
||||
backup['display_description'] = display_description
|
||||
backup['container'] = container
|
||||
backup['status'] = status
|
||||
backup['fail_reason'] = ''
|
||||
backup['service'] = CONF.backup_driver
|
||||
backup['snapshot'] = False
|
||||
backup['parent_id'] = None
|
||||
backup['size'] = size
|
||||
backup['object_count'] = object_count
|
||||
return db.backup_create(self.ctxt, backup)['id']
|
||||
kwargs = {}
|
||||
kwargs['volume_id'] = volume_id
|
||||
kwargs['user_id'] = 'fake'
|
||||
kwargs['project_id'] = project_id
|
||||
kwargs['host'] = 'testhost'
|
||||
kwargs['availability_zone'] = '1'
|
||||
kwargs['display_name'] = display_name
|
||||
kwargs['display_description'] = display_description
|
||||
kwargs['container'] = container
|
||||
kwargs['status'] = status
|
||||
kwargs['fail_reason'] = ''
|
||||
kwargs['service'] = service or CONF.backup_driver
|
||||
kwargs['snapshot'] = False
|
||||
kwargs['parent_id'] = None
|
||||
kwargs['size'] = size
|
||||
kwargs['object_count'] = object_count
|
||||
backup = objects.Backup(context=self.ctxt, **kwargs)
|
||||
backup.create()
|
||||
return backup
|
||||
|
||||
def _create_volume_db_entry(self, display_name='test_volume',
|
||||
display_description='this is a test volume',
|
||||
@ -116,10 +120,10 @@ class BaseBackupTest(test.TestCase):
|
||||
"""Create backup metadata export entry."""
|
||||
vol_id = self._create_volume_db_entry(status='available',
|
||||
size=vol_size)
|
||||
backup_id = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
|
||||
export = self.backup_mgr.export_record(self.ctxt, backup_id)
|
||||
export = self.backup_mgr.export_record(self.ctxt, backup)
|
||||
return export
|
||||
|
||||
def _create_export_record_db_entry(self,
|
||||
@ -130,12 +134,14 @@ class BaseBackupTest(test.TestCase):
|
||||
|
||||
Return the entry ID
|
||||
"""
|
||||
backup = {}
|
||||
backup['volume_id'] = volume_id
|
||||
backup['user_id'] = 'fake'
|
||||
backup['project_id'] = project_id
|
||||
backup['status'] = status
|
||||
return db.backup_create(self.ctxt, backup)['id']
|
||||
kwargs = {}
|
||||
kwargs['volume_id'] = volume_id
|
||||
kwargs['user_id'] = 'fake'
|
||||
kwargs['project_id'] = project_id
|
||||
kwargs['status'] = status
|
||||
backup = objects.Backup(context=self.ctxt, **kwargs)
|
||||
backup.create()
|
||||
return backup
|
||||
|
||||
|
||||
class BackupTestCase(BaseBackupTest):
|
||||
@ -151,9 +157,9 @@ class BackupTestCase(BaseBackupTest):
|
||||
vol2_id = self._create_volume_db_entry()
|
||||
self._create_volume_attach(vol2_id)
|
||||
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
|
||||
backup1_id = self._create_backup_db_entry(status='creating')
|
||||
backup2_id = self._create_backup_db_entry(status='restoring')
|
||||
backup3_id = self._create_backup_db_entry(status='deleting')
|
||||
backup1 = self._create_backup_db_entry(status='creating')
|
||||
backup2 = self._create_backup_db_entry(status='restoring')
|
||||
backup3 = self._create_backup_db_entry(status='deleting')
|
||||
|
||||
self.backup_mgr.init_host()
|
||||
vol1 = db.volume_get(self.ctxt, vol1_id)
|
||||
@ -161,52 +167,52 @@ class BackupTestCase(BaseBackupTest):
|
||||
vol2 = db.volume_get(self.ctxt, vol2_id)
|
||||
self.assertEqual(vol2['status'], 'error_restoring')
|
||||
|
||||
backup1 = db.backup_get(self.ctxt, backup1_id)
|
||||
backup1 = db.backup_get(self.ctxt, backup1.id)
|
||||
self.assertEqual(backup1['status'], 'error')
|
||||
backup2 = db.backup_get(self.ctxt, backup2_id)
|
||||
backup2 = db.backup_get(self.ctxt, backup2.id)
|
||||
self.assertEqual(backup2['status'], 'available')
|
||||
self.assertRaises(exception.BackupNotFound,
|
||||
db.backup_get,
|
||||
self.ctxt,
|
||||
backup3_id)
|
||||
backup3.id)
|
||||
|
||||
def test_create_backup_with_bad_volume_status(self):
|
||||
"""Test error handling when creating a backup from a volume
|
||||
with a bad status
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(status='available', size=1)
|
||||
backup_id = self._create_backup_db_entry(volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.backup_mgr.create_backup,
|
||||
self.ctxt,
|
||||
backup_id)
|
||||
backup)
|
||||
|
||||
def test_create_backup_with_bad_backup_status(self):
|
||||
"""Test error handling when creating a backup with a backup
|
||||
with a bad status
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup_id = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.backup_mgr.create_backup,
|
||||
self.ctxt,
|
||||
backup_id)
|
||||
backup)
|
||||
|
||||
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
|
||||
def test_create_backup_with_error(self, _mock_volume_backup):
|
||||
"""Test error handling when error occurs during backup creation."""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup_id = self._create_backup_db_entry(volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
|
||||
_mock_volume_backup.side_effect = FakeBackupException('fake')
|
||||
self.assertRaises(FakeBackupException,
|
||||
self.backup_mgr.create_backup,
|
||||
self.ctxt,
|
||||
backup_id)
|
||||
backup)
|
||||
vol = db.volume_get(self.ctxt, vol_id)
|
||||
self.assertEqual(vol['status'], 'available')
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'error')
|
||||
self.assertTrue(_mock_volume_backup.called)
|
||||
|
||||
@ -215,12 +221,12 @@ class BackupTestCase(BaseBackupTest):
|
||||
"""Test normal backup creation."""
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(size=vol_size)
|
||||
backup_id = self._create_backup_db_entry(volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
|
||||
self.backup_mgr.create_backup(self.ctxt, backup_id)
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
vol = db.volume_get(self.ctxt, vol_id)
|
||||
self.assertEqual(vol['status'], 'available')
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'available')
|
||||
self.assertEqual(backup['size'], vol_size)
|
||||
self.assertTrue(_mock_volume_backup.called)
|
||||
@ -231,9 +237,9 @@ class BackupTestCase(BaseBackupTest):
|
||||
"""Test normal backup creation with notifications."""
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(size=vol_size)
|
||||
backup_id = self._create_backup_db_entry(volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
|
||||
self.backup_mgr.create_backup(self.ctxt, backup_id)
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
self.assertEqual(2, notify.call_count)
|
||||
|
||||
def test_restore_backup_with_bad_volume_status(self):
|
||||
@ -241,13 +247,13 @@ class BackupTestCase(BaseBackupTest):
|
||||
with a bad status.
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(status='available', size=1)
|
||||
backup_id = self._create_backup_db_entry(volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.backup_mgr.restore_backup,
|
||||
self.ctxt,
|
||||
backup_id,
|
||||
backup,
|
||||
vol_id)
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'available')
|
||||
|
||||
def test_restore_backup_with_bad_backup_status(self):
|
||||
@ -256,16 +262,16 @@ class BackupTestCase(BaseBackupTest):
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(status='restoring-backup',
|
||||
size=1)
|
||||
backup_id = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.backup_mgr.restore_backup,
|
||||
self.ctxt,
|
||||
backup_id,
|
||||
backup,
|
||||
vol_id)
|
||||
vol = db.volume_get(self.ctxt, vol_id)
|
||||
self.assertEqual(vol['status'], 'error')
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'error')
|
||||
|
||||
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
|
||||
@ -273,18 +279,18 @@ class BackupTestCase(BaseBackupTest):
|
||||
"""Test error handling when an error occurs during backup restore."""
|
||||
vol_id = self._create_volume_db_entry(status='restoring-backup',
|
||||
size=1)
|
||||
backup_id = self._create_backup_db_entry(status='restoring',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='restoring',
|
||||
volume_id=vol_id)
|
||||
|
||||
_mock_volume_restore.side_effect = FakeBackupException('fake')
|
||||
self.assertRaises(FakeBackupException,
|
||||
self.backup_mgr.restore_backup,
|
||||
self.ctxt,
|
||||
backup_id,
|
||||
backup,
|
||||
vol_id)
|
||||
vol = db.volume_get(self.ctxt, vol_id)
|
||||
self.assertEqual(vol['status'], 'error_restoring')
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'available')
|
||||
self.assertTrue(_mock_volume_restore.called)
|
||||
|
||||
@ -294,19 +300,19 @@ class BackupTestCase(BaseBackupTest):
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(status='restoring-backup',
|
||||
size=1)
|
||||
backup_id = self._create_backup_db_entry(status='restoring',
|
||||
volume_id=vol_id)
|
||||
service = 'cinder.tests.backup.bad_service'
|
||||
backup = self._create_backup_db_entry(status='restoring',
|
||||
volume_id=vol_id,
|
||||
service=service)
|
||||
|
||||
service = 'cinder.tests.unit.backup.bad_service'
|
||||
db.backup_update(self.ctxt, backup_id, {'service': service})
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.backup_mgr.restore_backup,
|
||||
self.ctxt,
|
||||
backup_id,
|
||||
backup,
|
||||
vol_id)
|
||||
vol = db.volume_get(self.ctxt, vol_id)
|
||||
self.assertEqual(vol['status'], 'error')
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'available')
|
||||
|
||||
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
|
||||
@ -315,13 +321,13 @@ class BackupTestCase(BaseBackupTest):
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(status='restoring-backup',
|
||||
size=vol_size)
|
||||
backup_id = self._create_backup_db_entry(status='restoring',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='restoring',
|
||||
volume_id=vol_id)
|
||||
|
||||
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
|
||||
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
|
||||
vol = db.volume_get(self.ctxt, vol_id)
|
||||
self.assertEqual(vol['status'], 'available')
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'available')
|
||||
self.assertTrue(_mock_volume_restore.called)
|
||||
|
||||
@ -332,10 +338,10 @@ class BackupTestCase(BaseBackupTest):
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(status='restoring-backup',
|
||||
size=vol_size)
|
||||
backup_id = self._create_backup_db_entry(status='restoring',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='restoring',
|
||||
volume_id=vol_id)
|
||||
|
||||
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id)
|
||||
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
|
||||
self.assertEqual(2, notify.call_count)
|
||||
|
||||
def test_delete_backup_with_bad_backup_status(self):
|
||||
@ -343,26 +349,26 @@ class BackupTestCase(BaseBackupTest):
|
||||
with a bad status.
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup_id = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.backup_mgr.delete_backup,
|
||||
self.ctxt,
|
||||
backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'error')
|
||||
|
||||
def test_delete_backup_with_error(self):
|
||||
"""Test error handling when an error occurs during backup deletion."""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup_id = self._create_backup_db_entry(status='deleting',
|
||||
display_name='fail_on_delete',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='deleting',
|
||||
display_name='fail_on_delete',
|
||||
volume_id=vol_id)
|
||||
self.assertRaises(IOError,
|
||||
self.backup_mgr.delete_backup,
|
||||
self.ctxt,
|
||||
backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'error')
|
||||
|
||||
def test_delete_backup_with_bad_service(self):
|
||||
@ -370,15 +376,15 @@ class BackupTestCase(BaseBackupTest):
|
||||
with a different service to that used to create the backup.
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup_id = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol_id)
|
||||
service = 'cinder.tests.unit.backup.bad_service'
|
||||
db.backup_update(self.ctxt, backup_id, {'service': service})
|
||||
service = 'cinder.tests.backup.bad_service'
|
||||
backup = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol_id,
|
||||
service=service)
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.backup_mgr.delete_backup,
|
||||
self.ctxt,
|
||||
backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'error')
|
||||
|
||||
def test_delete_backup_with_no_service(self):
|
||||
@ -386,24 +392,25 @@ class BackupTestCase(BaseBackupTest):
|
||||
with no service defined for that backup, relates to bug #1162908
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup_id = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol_id)
|
||||
db.backup_update(self.ctxt, backup_id, {'service': None})
|
||||
self.backup_mgr.delete_backup(self.ctxt, backup_id)
|
||||
backup = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol_id)
|
||||
backup.service = None
|
||||
backup.save()
|
||||
self.backup_mgr.delete_backup(self.ctxt, backup)
|
||||
|
||||
def test_delete_backup(self):
|
||||
"""Test normal backup deletion."""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup_id = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol_id)
|
||||
self.backup_mgr.delete_backup(self.ctxt, backup_id)
|
||||
backup = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol_id)
|
||||
self.backup_mgr.delete_backup(self.ctxt, backup)
|
||||
self.assertRaises(exception.BackupNotFound,
|
||||
db.backup_get,
|
||||
self.ctxt,
|
||||
backup_id)
|
||||
backup.id)
|
||||
|
||||
ctxt_read_deleted = context.get_admin_context('yes')
|
||||
backup = db.backup_get(ctxt_read_deleted, backup_id)
|
||||
backup = db.backup_get(ctxt_read_deleted, backup.id)
|
||||
self.assertEqual(backup.deleted, True)
|
||||
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
|
||||
self.assertEqual(backup.status, 'deleted')
|
||||
@ -412,9 +419,9 @@ class BackupTestCase(BaseBackupTest):
|
||||
def test_delete_backup_with_notify(self, notify):
|
||||
"""Test normal backup deletion with notifications."""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup_id = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol_id)
|
||||
self.backup_mgr.delete_backup(self.ctxt, backup_id)
|
||||
backup = self._create_backup_db_entry(status='deleting',
|
||||
volume_id=vol_id)
|
||||
self.backup_mgr.delete_backup(self.ctxt, backup)
|
||||
self.assertEqual(2, notify.call_count)
|
||||
|
||||
def test_list_backup(self):
|
||||
@ -425,7 +432,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
b2 = self._create_backup_db_entry(project_id='project1')
|
||||
backups = db.backup_get_all_by_project(self.ctxt, 'project1')
|
||||
self.assertEqual(len(backups), 1)
|
||||
self.assertEqual(backups[0].id, b2)
|
||||
self.assertEqual(backups[0].id, b2.id)
|
||||
|
||||
def test_backup_get_all_by_project_with_deleted(self):
|
||||
"""Test deleted backups don't show up in backup_get_all_by_project.
|
||||
@ -434,13 +441,13 @@ class BackupTestCase(BaseBackupTest):
|
||||
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
|
||||
self.assertEqual(len(backups), 0)
|
||||
|
||||
backup_id_keep = self._create_backup_db_entry()
|
||||
backup_id = self._create_backup_db_entry()
|
||||
db.backup_destroy(self.ctxt, backup_id)
|
||||
backup_keep = self._create_backup_db_entry()
|
||||
backup = self._create_backup_db_entry()
|
||||
db.backup_destroy(self.ctxt, backup.id)
|
||||
|
||||
backups = db.backup_get_all_by_project(self.ctxt, 'fake')
|
||||
self.assertEqual(len(backups), 1)
|
||||
self.assertEqual(backups[0].id, backup_id_keep)
|
||||
self.assertEqual(backups[0].id, backup_keep.id)
|
||||
|
||||
ctxt_read_deleted = context.get_admin_context('yes')
|
||||
backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake')
|
||||
@ -453,13 +460,13 @@ class BackupTestCase(BaseBackupTest):
|
||||
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
|
||||
self.assertEqual(len(backups), 0)
|
||||
|
||||
backup_id_keep = self._create_backup_db_entry()
|
||||
backup_id = self._create_backup_db_entry()
|
||||
db.backup_destroy(self.ctxt, backup_id)
|
||||
backup_keep = self._create_backup_db_entry()
|
||||
backup = self._create_backup_db_entry()
|
||||
db.backup_destroy(self.ctxt, backup.id)
|
||||
|
||||
backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
|
||||
self.assertEqual(len(backups), 1)
|
||||
self.assertEqual(backups[0].id, backup_id_keep)
|
||||
self.assertEqual(backups[0].id, backup_keep.id)
|
||||
|
||||
ctxt_read_deleted = context.get_admin_context('yes')
|
||||
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
|
||||
@ -478,14 +485,15 @@ class BackupTestCase(BaseBackupTest):
|
||||
record with a different service to that used to create the backup.
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup_id = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
service = 'cinder.tests.unit.backup.bad_service'
|
||||
db.backup_update(self.ctxt, backup_id, {'service': service})
|
||||
service = 'cinder.tests.backup.bad_service'
|
||||
backup = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id,
|
||||
service=service)
|
||||
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.backup_mgr.export_record,
|
||||
self.ctxt,
|
||||
backup_id)
|
||||
backup)
|
||||
|
||||
def test_export_record_with_bad_backup_status(self):
|
||||
"""Test error handling when exporting a backup record with a backup
|
||||
@ -493,22 +501,22 @@ class BackupTestCase(BaseBackupTest):
|
||||
"""
|
||||
vol_id = self._create_volume_db_entry(status='available',
|
||||
size=1)
|
||||
backup_id = self._create_backup_db_entry(status='error',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='error',
|
||||
volume_id=vol_id)
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.backup_mgr.export_record,
|
||||
self.ctxt,
|
||||
backup_id)
|
||||
backup)
|
||||
|
||||
def test_export_record(self):
|
||||
"""Test normal backup record export."""
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(status='available',
|
||||
size=vol_size)
|
||||
backup_id = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='available',
|
||||
volume_id=vol_id)
|
||||
|
||||
export = self.backup_mgr.export_record(self.ctxt, backup_id)
|
||||
export = self.backup_mgr.export_record(self.ctxt, backup)
|
||||
self.assertEqual(export['backup_service'], CONF.backup_driver)
|
||||
self.assertTrue('backup_url' in export)
|
||||
|
||||
@ -527,7 +535,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
export['backup_service'],
|
||||
export['backup_url'],
|
||||
backup_hosts)
|
||||
backup = db.backup_get(self.ctxt, imported_record)
|
||||
backup = db.backup_get(self.ctxt, imported_record.id)
|
||||
self.assertEqual(backup['status'], 'available')
|
||||
self.assertEqual(backup['size'], vol_size)
|
||||
|
||||
@ -583,7 +591,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
export['backup_url'],
|
||||
backup_hosts)
|
||||
self.assertTrue(_mock_record_import.called)
|
||||
backup = db.backup_get(self.ctxt, imported_record)
|
||||
backup = db.backup_get(self.ctxt, imported_record.id)
|
||||
self.assertEqual(backup['status'], 'error')
|
||||
|
||||
|
||||
@ -617,7 +625,7 @@ class BackupTestCaseWithVerify(BaseBackupTest):
|
||||
export['backup_service'],
|
||||
export['backup_url'],
|
||||
backup_hosts)
|
||||
backup = db.backup_get(self.ctxt, imported_record)
|
||||
backup = db.backup_get(self.ctxt, imported_record.id)
|
||||
self.assertEqual(backup['status'], 'available')
|
||||
self.assertEqual(backup['size'], vol_size)
|
||||
|
||||
@ -646,24 +654,24 @@ class BackupTestCaseWithVerify(BaseBackupTest):
|
||||
export['backup_url'],
|
||||
backup_hosts)
|
||||
self.assertTrue(_mock_record_verify.called)
|
||||
backup = db.backup_get(self.ctxt, imported_record)
|
||||
backup = db.backup_get(self.ctxt, imported_record.id)
|
||||
self.assertEqual(backup['status'], 'error')
|
||||
|
||||
def test_backup_reset_status_from_nonrestoring_to_available(
|
||||
self):
|
||||
vol_id = self._create_volume_db_entry(status='available',
|
||||
size=1)
|
||||
backup_id = self._create_backup_db_entry(status='error',
|
||||
volume_id=vol_id)
|
||||
backup = self._create_backup_db_entry(status='error',
|
||||
volume_id=vol_id)
|
||||
with mock.patch.object(manager.BackupManager,
|
||||
'_map_service_to_driver') as \
|
||||
mock_map_service_to_driver:
|
||||
mock_map_service_to_driver.return_value = \
|
||||
fake_service.get_backup_driver(self.ctxt)
|
||||
self.backup_mgr.reset_status(self.ctxt,
|
||||
backup_id,
|
||||
backup,
|
||||
'available')
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'available')
|
||||
|
||||
def test_backup_reset_status_to_available_invalid_backup(self):
|
||||
@ -671,11 +679,8 @@ class BackupTestCaseWithVerify(BaseBackupTest):
|
||||
'host': 'test',
|
||||
'provider_location': '',
|
||||
'size': 1})
|
||||
backup = db.backup_create(self.ctxt,
|
||||
{'status': 'error',
|
||||
'service':
|
||||
CONF.backup_driver,
|
||||
'volume_id': volume['id']})
|
||||
backup = self._create_backup_db_entry(status='error',
|
||||
volume_id=volume['id'])
|
||||
|
||||
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
|
||||
_mock_backup_verify_class = ('%s.%s.%s' %
|
||||
@ -690,9 +695,9 @@ class BackupTestCaseWithVerify(BaseBackupTest):
|
||||
self.assertRaises(exception.BackupVerifyUnsupportedDriver,
|
||||
self.backup_mgr.reset_status,
|
||||
self.ctxt,
|
||||
backup['id'],
|
||||
backup,
|
||||
'available')
|
||||
backup = db.backup_get(self.ctxt, backup['id'])
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'error')
|
||||
|
||||
def test_backup_reset_status_from_restoring_to_available(self):
|
||||
@ -701,16 +706,11 @@ class BackupTestCaseWithVerify(BaseBackupTest):
|
||||
'host': 'test',
|
||||
'provider_location': '',
|
||||
'size': 1})
|
||||
backup = db.backup_create(self.ctxt,
|
||||
{'status': 'restoring',
|
||||
'service':
|
||||
CONF.backup_driver,
|
||||
'volume_id': volume['id']})
|
||||
backup = self._create_backup_db_entry(status='restoring',
|
||||
volume_id=volume['id'])
|
||||
|
||||
self.backup_mgr.reset_status(self.ctxt,
|
||||
backup['id'],
|
||||
'available')
|
||||
backup = db.backup_get(self.ctxt, backup['id'])
|
||||
self.backup_mgr.reset_status(self.ctxt, backup, 'available')
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(backup['status'], 'available')
|
||||
|
||||
def test_backup_reset_status_to_error(self):
|
||||
@ -719,13 +719,8 @@ class BackupTestCaseWithVerify(BaseBackupTest):
|
||||
'host': 'test',
|
||||
'provider_location': '',
|
||||
'size': 1})
|
||||
backup = db.backup_create(self.ctxt,
|
||||
{'status': 'creating',
|
||||
'service':
|
||||
CONF.backup_driver,
|
||||
'volume_id': volume['id']})
|
||||
self.backup_mgr.reset_status(self.ctxt,
|
||||
backup['id'],
|
||||
'error')
|
||||
backup = self._create_backup_db_entry(status='creating',
|
||||
volume_id=volume['id'])
|
||||
self.backup_mgr.reset_status(self.ctxt, backup, 'error')
|
||||
backup = db.backup_get(self.ctxt, backup['id'])
|
||||
self.assertEqual(backup['status'], 'error')
|
||||
|
@ -32,6 +32,7 @@ from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.volume.drivers import rbd as rbddriver
|
||||
|
||||
@ -105,8 +106,10 @@ class BackupCephTestCase(test.TestCase):
|
||||
vol = {'id': id, 'size': size, 'status': 'available'}
|
||||
return db.volume_create(self.ctxt, vol)['id']
|
||||
|
||||
def _create_backup_db_entry(self, backupid, volid, size):
|
||||
backup = {'id': backupid, 'size': size, 'volume_id': volid}
|
||||
def _create_backup_db_entry(self, backupid, volid, size,
|
||||
userid='user-id', projectid='project-id'):
|
||||
backup = {'id': backupid, 'size': size, 'volume_id': volid,
|
||||
'user_id': userid, 'project_id': projectid}
|
||||
return db.backup_create(self.ctxt, backup)['id']
|
||||
|
||||
def time_inc(self):
|
||||
@ -157,7 +160,7 @@ class BackupCephTestCase(test.TestCase):
|
||||
self.backup_id = str(uuid.uuid4())
|
||||
self._create_backup_db_entry(self.backup_id, self.volume_id,
|
||||
self.volume_size)
|
||||
self.backup = db.backup_get(self.ctxt, self.backup_id)
|
||||
self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
|
||||
|
||||
# Create alternate volume.
|
||||
self.alt_volume_id = str(uuid.uuid4())
|
||||
@ -596,7 +599,7 @@ class BackupCephTestCase(test.TestCase):
|
||||
|
||||
backup_id = str(uuid.uuid4())
|
||||
self._create_backup_db_entry(backup_id, volume_id, 1)
|
||||
backup = db.backup_get(self.ctxt, backup_id)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, backup_id)
|
||||
|
||||
self.assertRaises(exception.InvalidParameterValue, self.service.backup,
|
||||
backup, self.volume_file)
|
||||
|
@ -23,6 +23,7 @@ from cinder.backup import driver
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.tests.unit.backup import fake_service
|
||||
|
||||
@ -40,8 +41,10 @@ class BackupBaseDriverTestCase(test.TestCase):
|
||||
vol = {'id': id, 'size': size, 'status': 'available'}
|
||||
return db.volume_create(self.ctxt, vol)['id']
|
||||
|
||||
def _create_backup_db_entry(self, backupid, volid, size):
|
||||
backup = {'id': backupid, 'size': size, 'volume_id': volid}
|
||||
def _create_backup_db_entry(self, backupid, volid, size,
|
||||
userid='user-id', projectid='project-id'):
|
||||
backup = {'id': backupid, 'size': size, 'volume_id': volid,
|
||||
'user_id': userid, 'project_id': projectid}
|
||||
return db.backup_create(self.ctxt, backup)['id']
|
||||
|
||||
def setUp(self):
|
||||
@ -53,7 +56,7 @@ class BackupBaseDriverTestCase(test.TestCase):
|
||||
|
||||
self._create_backup_db_entry(self.backup_id, self.volume_id, 1)
|
||||
self._create_volume_db_entry(self.volume_id, 1)
|
||||
self.backup = db.backup_get(self.ctxt, self.backup_id)
|
||||
self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
|
||||
self.driver = fake_service.FakeBackupService(self.ctxt)
|
||||
|
||||
def test_get_metadata(self):
|
||||
|
@ -35,6 +35,7 @@ from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.tests.unit.backup import fake_swift_client
|
||||
from cinder.tests.unit.backup import fake_swift_client2
|
||||
@ -69,7 +70,10 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
'size': 1,
|
||||
'container': container,
|
||||
'volume_id': '1234-5678-1234-8888',
|
||||
'parent_id': parent_id}
|
||||
'parent_id': parent_id,
|
||||
'user_id': 'user-id',
|
||||
'project_id': 'project-id',
|
||||
}
|
||||
return db.backup_create(self.ctxt, backup)['id']
|
||||
|
||||
def setUp(self):
|
||||
@ -127,7 +131,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='none')
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
def test_backup_bz2(self):
|
||||
@ -135,7 +139,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='bz2')
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
def test_backup_zlib(self):
|
||||
@ -143,16 +147,16 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='zlib')
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
def test_backup_default_container(self):
|
||||
self._create_backup_db_entry(container=None)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], 'volumebackups')
|
||||
|
||||
@mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.'
|
||||
@ -168,7 +172,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
CONF.set_override("backup_swift_enable_progress_timer", False)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
self.assertTrue(_send_progress.called)
|
||||
self.assertTrue(_send_progress_end.called)
|
||||
@ -180,7 +184,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
CONF.set_override("backup_object_number_per_notification", 10)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
self.assertFalse(_send_progress.called)
|
||||
self.assertTrue(_send_progress_end.called)
|
||||
@ -193,7 +197,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
CONF.set_override("backup_swift_enable_progress_timer", True)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
self.assertTrue(_send_progress.called)
|
||||
self.assertTrue(_send_progress_end.called)
|
||||
@ -203,9 +207,9 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
self._create_backup_db_entry(container=container_name)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
def test_backup_shafile(self):
|
||||
@ -230,9 +234,9 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
fake_swift_client2.FakeSwiftClient2.Connection)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
# Verify sha contents
|
||||
@ -262,9 +266,9 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
fake_swift_client2.FakeSwiftClient2.Connection)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
# Create incremental backup with no change to contents
|
||||
@ -274,9 +278,9 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
fake_swift_client2.FakeSwiftClient2.Connection)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.backup(deltabackup, self.volume_file)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
self.assertEqual(deltabackup['container'], container_name)
|
||||
|
||||
# Compare shas from both files
|
||||
@ -311,9 +315,9 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
fake_swift_client2.FakeSwiftClient2.Connection)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
# Create incremental backup with no change to contents
|
||||
@ -328,9 +332,9 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
fake_swift_client2.FakeSwiftClient2.Connection)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.backup(deltabackup, self.volume_file)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
self.assertEqual(deltabackup['container'], container_name)
|
||||
|
||||
content1 = service._read_sha256file(backup)
|
||||
@ -365,9 +369,9 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
fake_swift_client2.FakeSwiftClient2.Connection)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertEqual(backup['container'], container_name)
|
||||
|
||||
# Create incremental backup with no change to contents
|
||||
@ -382,9 +386,9 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
fake_swift_client2.FakeSwiftClient2.Connection)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.backup(deltabackup, self.volume_file)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
self.assertEqual(deltabackup['container'], container_name)
|
||||
|
||||
# Verify that two shas are changed at index 16 and 20
|
||||
@ -398,7 +402,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
self._create_backup_db_entry(container=container_name)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertRaises(exception.SwiftConnectionFailed,
|
||||
service.backup,
|
||||
backup, self.volume_file)
|
||||
@ -414,7 +418,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='none')
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
|
||||
def fake_backup_metadata(self, backup, object_meta):
|
||||
raise exception.BackupDriverException(message=_('fake'))
|
||||
@ -439,7 +443,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
self.flags(backup_compression_algorithm='none')
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
|
||||
def fake_backup_metadata(self, backup, object_meta):
|
||||
raise exception.BackupDriverException(message=_('fake'))
|
||||
@ -464,7 +468,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as volume_file:
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.restore(backup, '1234-5678-1234-8888', volume_file)
|
||||
|
||||
def test_restore_delta(self):
|
||||
@ -492,7 +496,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
fake_swift_client2.FakeSwiftClient2.Connection)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
self.volume_file.seek(0)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.backup(backup, self.volume_file)
|
||||
|
||||
# Create incremental backup with no change to contents
|
||||
@ -504,12 +508,12 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
self._create_backup_db_entry(container=container_name, backup_id=124,
|
||||
parent_id=123)
|
||||
self.volume_file.seek(0)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.backup(deltabackup, self.volume_file, True)
|
||||
deltabackup = db.backup_get(self.ctxt, 124)
|
||||
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as restored_file:
|
||||
backup = db.backup_get(self.ctxt, 124)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 124)
|
||||
service.restore(backup, '1234-5678-1234-8888',
|
||||
restored_file)
|
||||
self.assertTrue(filecmp.cmp(self.volume_file.name,
|
||||
@ -521,7 +525,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as volume_file:
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertRaises(exception.SwiftConnectionFailed,
|
||||
service.restore,
|
||||
backup, '1234-5678-1234-8888', volume_file)
|
||||
@ -532,7 +536,7 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as volume_file:
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
service.restore,
|
||||
backup, '1234-5678-1234-8888', volume_file)
|
||||
@ -540,14 +544,14 @@ class BackupSwiftTestCase(test.TestCase):
|
||||
def test_delete(self):
|
||||
self._create_backup_db_entry()
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
service.delete(backup)
|
||||
|
||||
def test_delete_wraps_socket_error(self):
|
||||
container_name = 'socket_error_on_delete'
|
||||
self._create_backup_db_entry(container=container_name)
|
||||
service = swift_dr.SwiftBackupDriver(self.ctxt)
|
||||
backup = db.backup_get(self.ctxt, 123)
|
||||
backup = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertRaises(exception.SwiftConnectionFailed,
|
||||
service.delete,
|
||||
backup)
|
||||
|
@ -29,6 +29,7 @@ from cinder.backup.drivers import tsm
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder import utils
|
||||
|
||||
@ -260,7 +261,10 @@ class BackupTSMTestCase(test.TestCase):
|
||||
'size': 1,
|
||||
'container': 'test-container',
|
||||
'volume_id': '1234-5678-1234-8888',
|
||||
'service_metadata': service_metadata}
|
||||
'service_metadata': service_metadata,
|
||||
'user_id': 'user-id',
|
||||
'project_id': 'project-id',
|
||||
}
|
||||
return db.backup_create(self.ctxt, backup)['id']
|
||||
|
||||
def test_backup_image(self):
|
||||
@ -277,13 +281,13 @@ class BackupTSMTestCase(test.TestCase):
|
||||
|
||||
with open(VOLUME_PATH, 'rw') as volume_file:
|
||||
# Create two backups of the volume
|
||||
backup1 = db.backup_get(self.ctxt, backup_id1)
|
||||
backup1 = objects.Backup.get_by_id(self.ctxt, backup_id1)
|
||||
self.driver.backup(backup1, volume_file)
|
||||
backup2 = db.backup_get(self.ctxt, backup_id2)
|
||||
backup2 = objects.Backup.get_by_id(self.ctxt, backup_id2)
|
||||
self.driver.backup(backup2, volume_file)
|
||||
|
||||
# Create a backup that fails
|
||||
fail_back = db.backup_get(self.ctxt, backup_id3)
|
||||
fail_back = objects.Backup.get_by_id(self.ctxt, backup_id3)
|
||||
self.sim.error_injection('backup', 'fail')
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.driver.backup, fail_back, volume_file)
|
||||
@ -309,14 +313,14 @@ class BackupTSMTestCase(test.TestCase):
|
||||
|
||||
with open(VOLUME_PATH, 'rw') as volume_file:
|
||||
# Create two backups of the volume
|
||||
backup1 = db.backup_get(self.ctxt, 123)
|
||||
backup1 = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.driver.backup(backup1, volume_file)
|
||||
backup2 = db.backup_get(self.ctxt, 456)
|
||||
backup2 = objects.Backup.get_by_id(self.ctxt, 456)
|
||||
self.driver.backup(backup2, volume_file)
|
||||
|
||||
# Create a backup that fails
|
||||
self._create_backup_db_entry(666, mode)
|
||||
fail_back = db.backup_get(self.ctxt, 666)
|
||||
fail_back = objects.Backup.get_by_id(self.ctxt, 666)
|
||||
self.sim.error_injection('backup', 'fail')
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.driver.backup, fail_back, volume_file)
|
||||
@ -340,7 +344,7 @@ class BackupTSMTestCase(test.TestCase):
|
||||
|
||||
with open(VOLUME_PATH, 'rw') as volume_file:
|
||||
# Create two backups of the volume
|
||||
backup1 = db.backup_get(self.ctxt, 123)
|
||||
backup1 = objects.Backup.get_by_id(self.ctxt, 123)
|
||||
self.assertRaises(exception.InvalidBackup,
|
||||
self.driver.backup, backup1, volume_file)
|
||||
|
||||
|
@ -574,7 +574,9 @@ class TestCinderManageCmd(test.TestCase):
|
||||
'container': 'fake-container',
|
||||
'status': 'fake-status',
|
||||
'size': 123,
|
||||
'object_count': 1}
|
||||
'object_count': 1,
|
||||
'volume_id': 'fake-volume-id',
|
||||
}
|
||||
backup_get_all.return_value = [backup]
|
||||
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
|
||||
hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s'
|
||||
@ -605,7 +607,7 @@ class TestCinderManageCmd(test.TestCase):
|
||||
backup_cmds.list()
|
||||
|
||||
get_admin_context.assert_called_once_with()
|
||||
backup_get_all.assert_called_once_with(ctxt)
|
||||
backup_get_all.assert_called_once_with(ctxt, None)
|
||||
self.assertEqual(expected_out, fake_out.getvalue())
|
||||
|
||||
@mock.patch('cinder.utils.service_is_up')
|
||||
|
@ -64,6 +64,8 @@ objects_ignore_messages = [
|
||||
"No value passed for parameter 'id' in function call",
|
||||
"Module 'cinder.objects' has no 'Snapshot' member",
|
||||
"Module 'cinder.objects' has no 'SnapshotList' member",
|
||||
"Module 'cinder.objects' has no 'Backup' member",
|
||||
"Module 'cinder.objects' has no 'BackupList' member",
|
||||
]
|
||||
objects_ignore_modules = ["cinder/objects/"]
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user