Backup object

This patch adds VersionedObjects abstraction layer to volume backups.
The object derives from CinderObjectDictCompat, so it supports both
object (obj.prop) and dict (obj['prop']) syntax to access properties.
Complete move to object notation will be made in a follow up clean up
patch.

Co-Authored-By: Grzegorz Grasza <grzegorz.grasza@intel.com>

Change-Id: Icff37261b367463b71a1268be16f9c97f595bf0c
Partial-Implements: blueprint cinder-objects
This commit is contained in:
Michal Dulko 2015-06-03 14:25:26 +02:00
parent 703ce63c1a
commit 0e76126136
22 changed files with 732 additions and 490 deletions

View File

@ -219,7 +219,7 @@ class BackupsController(wsgi.Controller):
backups = self.backup_api.get_all(context, search_opts=filters) backups = self.backup_api.get_all(context, search_opts=filters)
backup_count = len(backups) backup_count = len(backups)
limited_list = common.limited(backups, req) limited_list = common.limited(backups.objects, req)
req.cache_db_backups(limited_list) req.cache_db_backups(limited_list)
if is_detail: if is_detail:

View File

@ -30,6 +30,7 @@ from cinder import context
from cinder.db import base from cinder.db import base
from cinder import exception from cinder import exception
from cinder.i18n import _, _LI, _LW from cinder.i18n import _, _LI, _LW
from cinder import objects
import cinder.policy import cinder.policy
from cinder import quota from cinder import quota
from cinder import utils from cinder import utils
@ -60,8 +61,7 @@ class API(base.Base):
def get(self, context, backup_id): def get(self, context, backup_id):
check_policy(context, 'get') check_policy(context, 'get')
rv = self.db.backup_get(context, backup_id) return objects.Backup.get_by_id(context, backup_id)
return dict(rv)
def delete(self, context, backup_id): def delete(self, context, backup_id):
"""Make the RPC call to delete a volume backup.""" """Make the RPC call to delete a volume backup."""
@ -78,21 +78,23 @@ class API(base.Base):
msg = _('Incremental backups exist for this backup.') msg = _('Incremental backups exist for this backup.')
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
self.db.backup_update(context, backup_id, {'status': 'deleting'}) backup.status = 'deleting'
self.backup_rpcapi.delete_backup(context, backup.save()
backup['host'], self.backup_rpcapi.delete_backup(context, backup)
backup['id'])
def get_all(self, context, search_opts=None): def get_all(self, context, search_opts=None):
if search_opts is None: if search_opts is None:
search_opts = {} search_opts = {}
check_policy(context, 'get_all') check_policy(context, 'get_all')
if context.is_admin: if context.is_admin:
backups = self.db.backup_get_all(context, filters=search_opts) backups = objects.BackupList.get_all(context, filters=search_opts)
else: else:
backups = self.db.backup_get_all_by_project(context, backups = objects.BackupList.get_all_by_project(
context.project_id, context,
filters=search_opts) context.project_id,
filters=search_opts
)
return backups return backups
@ -177,50 +179,51 @@ class API(base.Base):
# backup to do an incremental backup. # backup to do an incremental backup.
latest_backup = None latest_backup = None
if incremental: if incremental:
backups = self.db.backup_get_all_by_volume(context.elevated(), backups = objects.BackupList.get_all_by_volume(context.elevated(),
volume_id) volume_id)
if backups: if backups.objects:
latest_backup = max(backups, key=lambda x: x['created_at']) latest_backup = max(backups.objects,
key=lambda x: x['created_at'])
else: else:
msg = _('No backups available to do an incremental backup.') msg = _('No backups available to do an incremental backup.')
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
parent_id = None parent_id = None
if latest_backup: if latest_backup:
parent_id = latest_backup['id'] parent_id = latest_backup.id
if latest_backup['status'] != "available": if latest_backup['status'] != "available":
msg = _('The parent backup must be available for ' msg = _('The parent backup must be available for '
'incremental backup.') 'incremental backup.')
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
self.db.volume_update(context, volume_id, {'status': 'backing-up'}) self.db.volume_update(context, volume_id, {'status': 'backing-up'})
options = {'user_id': context.user_id,
'project_id': context.project_id,
'display_name': name,
'display_description': description,
'volume_id': volume_id,
'status': 'creating',
'container': container,
'parent_id': parent_id,
'size': volume['size'],
'host': volume_host, }
try: try:
backup = self.db.backup_create(context, options) kwargs = {
'user_id': context.user_id,
'project_id': context.project_id,
'display_name': name,
'display_description': description,
'volume_id': volume_id,
'status': 'creating',
'container': container,
'parent_id': parent_id,
'size': volume['size'],
'host': volume_host,
}
backup = objects.Backup(context=context, **kwargs)
backup.create()
QUOTAS.commit(context, reservations) QUOTAS.commit(context, reservations)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
try: try:
self.db.backup_destroy(context, backup['id']) backup.destroy()
finally: finally:
QUOTAS.rollback(context, reservations) QUOTAS.rollback(context, reservations)
# TODO(DuncanT): In future, when we have a generic local attach, # TODO(DuncanT): In future, when we have a generic local attach,
# this can go via the scheduler, which enables # this can go via the scheduler, which enables
# better load balancing and isolation of services # better load balancing and isolation of services
self.backup_rpcapi.create_backup(context, self.backup_rpcapi.create_backup(context, backup)
backup['host'],
backup['id'],
volume_id)
return backup return backup
@ -277,14 +280,13 @@ class API(base.Base):
# Setting the status here rather than setting at start and unrolling # Setting the status here rather than setting at start and unrolling
# for each error condition, it should be a very small window # for each error condition, it should be a very small window
self.db.backup_update(context, backup_id, {'status': 'restoring'}) backup.status = 'restoring'
backup.save()
volume_host = volume_utils.extract_host(volume['host'], 'host')
self.db.volume_update(context, volume_id, {'status': self.db.volume_update(context, volume_id, {'status':
'restoring-backup'}) 'restoring-backup'})
volume_host = volume_utils.extract_host(volume['host'], 'host') self.backup_rpcapi.restore_backup(context, volume_host, backup,
self.backup_rpcapi.restore_backup(context,
volume_host,
backup['id'],
volume_id) volume_id)
d = {'backup_id': backup_id, d = {'backup_id': backup_id,
@ -304,8 +306,8 @@ class API(base.Base):
# get backup info # get backup info
backup = self.get(context, backup_id) backup = self.get(context, backup_id)
# send to manager to do reset operation # send to manager to do reset operation
self.backup_rpcapi.reset_status(ctxt=context, host=backup['host'], self.backup_rpcapi.reset_status(ctxt=context, backup=backup,
backup_id=backup_id, status=status) status=status)
def export_record(self, context, backup_id): def export_record(self, context, backup_id):
"""Make the RPC call to export a volume backup. """Make the RPC call to export a volume backup.
@ -330,9 +332,7 @@ class API(base.Base):
{'ctx': context, {'ctx': context,
'host': backup['host'], 'host': backup['host'],
'id': backup['id']}) 'id': backup['id']})
export_data = self.backup_rpcapi.export_record(context, export_data = self.backup_rpcapi.export_record(context, backup)
backup['host'],
backup['id'])
return export_data return export_data
@ -357,15 +357,18 @@ class API(base.Base):
if len(hosts) == 0: if len(hosts) == 0:
raise exception.ServiceNotFound(service_id=backup_service) raise exception.ServiceNotFound(service_id=backup_service)
options = {'user_id': context.user_id, kwargs = {
'project_id': context.project_id, 'user_id': context.user_id,
'volume_id': '0000-0000-0000-0000', 'project_id': context.project_id,
'status': 'creating', } 'volume_id': '0000-0000-0000-0000',
backup = self.db.backup_create(context, options) 'status': 'creating',
}
backup = objects.Backup(context=context, **kwargs)
backup.create()
first_host = hosts.pop() first_host = hosts.pop()
self.backup_rpcapi.import_record(context, self.backup_rpcapi.import_record(context,
first_host, first_host,
backup['id'], backup,
backup_service, backup_service,
backup_url, backup_url,
hosts) hosts)

View File

@ -36,6 +36,7 @@ import six
from cinder.backup import driver from cinder.backup import driver
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW from cinder.i18n import _, _LE, _LI, _LW
from cinder import objects
from cinder.openstack.common import loopingcall from cinder.openstack.common import loopingcall
from cinder.volume import utils as volume_utils from cinder.volume import utils as volume_utils
@ -152,18 +153,15 @@ class ChunkedBackupDriver(driver.BackupDriver):
return return
def _create_container(self, context, backup): def _create_container(self, context, backup):
backup_id = backup['id'] backup.container = self.update_container_name(backup, backup.container)
backup['container'] = self.update_container_name(backup,
backup['container'])
container = backup['container']
LOG.debug('_create_container started, container: %(container)s,' LOG.debug('_create_container started, container: %(container)s,'
'backup: %(backup_id)s.', 'backup: %(backup_id)s.',
{'container': container, 'backup_id': backup_id}) {'container': backup.container, 'backup_id': backup.id})
if container is None: if backup.container is None:
container = self.backup_default_container backup.container = self.backup_default_container
self.db.backup_update(context, backup_id, {'container': container}) backup.save()
self.put_container(container) self.put_container(backup.container)
return container return backup.container
def _generate_object_names(self, backup): def _generate_object_names(self, backup):
prefix = backup['service_metadata'] prefix = backup['service_metadata']
@ -249,9 +247,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
def _prepare_backup(self, backup): def _prepare_backup(self, backup):
"""Prepare the backup process and return the backup metadata.""" """Prepare the backup process and return the backup metadata."""
backup_id = backup['id'] volume = self.db.volume_get(self.context, backup.volume_id)
volume_id = backup['volume_id']
volume = self.db.volume_get(self.context, volume_id)
if volume['size'] <= 0: if volume['size'] <= 0:
err = _('volume size %d is invalid.') % volume['size'] err = _('volume size %d is invalid.') % volume['size']
@ -260,9 +256,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
container = self._create_container(self.context, backup) container = self._create_container(self.context, backup)
object_prefix = self._generate_object_name_prefix(backup) object_prefix = self._generate_object_name_prefix(backup)
backup['service_metadata'] = object_prefix backup.service_metadata = object_prefix
self.db.backup_update(self.context, backup_id, {'service_metadata': backup.save()
object_prefix})
volume_size_bytes = volume['size'] * units.Gi volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s,' LOG.debug('starting backup of volume: %(volume_id)s,'
@ -270,7 +266,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
' prefix %(object_prefix)s, availability zone:' ' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s', ' %(availability_zone)s',
{ {
'volume_id': volume_id, 'volume_id': backup.volume_id,
'volume_size_bytes': volume_size_bytes, 'volume_size_bytes': volume_size_bytes,
'object_prefix': object_prefix, 'object_prefix': object_prefix,
'availability_zone': availability_zone, 'availability_zone': availability_zone,
@ -349,17 +345,17 @@ class ChunkedBackupDriver(driver.BackupDriver):
sha256_list = object_sha256['sha256s'] sha256_list = object_sha256['sha256s']
extra_metadata = object_meta.get('extra_metadata') extra_metadata = object_meta.get('extra_metadata')
self._write_sha256file(backup, self._write_sha256file(backup,
backup['volume_id'], backup.volume_id,
container, container,
sha256_list) sha256_list)
self._write_metadata(backup, self._write_metadata(backup,
backup['volume_id'], backup.volume_id,
container, container,
object_list, object_list,
volume_meta, volume_meta,
extra_metadata) extra_metadata)
self.db.backup_update(self.context, backup['id'], backup.object_count = object_id
{'object_count': object_id}) backup.save()
LOG.debug('backup %s finished.', backup['id']) LOG.debug('backup %s finished.', backup['id'])
def _backup_metadata(self, backup, object_meta): def _backup_metadata(self, backup, object_meta):
@ -410,9 +406,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
# is given. # is given.
parent_backup_shafile = None parent_backup_shafile = None
parent_backup = None parent_backup = None
if backup['parent_id']: if backup.parent_id:
parent_backup = self.db.backup_get(self.context, parent_backup = objects.Backup.get_by_id(self.context,
backup['parent_id']) backup.parent_id)
parent_backup_shafile = self._read_sha256file(parent_backup) parent_backup_shafile = self._read_sha256file(parent_backup)
parent_backup_shalist = parent_backup_shafile['sha256s'] parent_backup_shalist = parent_backup_shafile['sha256s']
if (parent_backup_shafile['chunk_size'] != if (parent_backup_shafile['chunk_size'] !=
@ -425,7 +421,7 @@ class ChunkedBackupDriver(driver.BackupDriver):
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
# If the volume size increased since the last backup, fail # If the volume size increased since the last backup, fail
# the incremental backup and ask user to do a full backup. # the incremental backup and ask user to do a full backup.
if backup['size'] > parent_backup['size']: if backup.size > parent_backup.size:
err = _('Volume size increased since the last ' err = _('Volume size increased since the last '
'backup. Do a full backup.') 'backup. Do a full backup.')
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
@ -637,9 +633,9 @@ class ChunkedBackupDriver(driver.BackupDriver):
backup_list = [] backup_list = []
backup_list.append(backup) backup_list.append(backup)
current_backup = backup current_backup = backup
while current_backup['parent_id']: while current_backup.parent_id:
prev_backup = (self.db.backup_get( prev_backup = objects.Backup.get_by_id(self.context,
self.context, current_backup['parent_id'])) current_backup.parent_id)
backup_list.append(prev_backup) backup_list.append(prev_backup)
current_backup = prev_backup current_backup = prev_backup

View File

@ -874,8 +874,8 @@ class CephBackupDriver(driver.BackupDriver):
self._full_backup(backup_id, volume_id, volume_file, self._full_backup(backup_id, volume_id, volume_file,
volume_name, length) volume_name, length)
self.db.backup_update(self.context, backup_id, backup.container = self._ceph_backup_pool
{'container': self._ceph_backup_pool}) backup.save()
if backup_metadata: if backup_metadata:
try: try:

View File

@ -61,9 +61,8 @@ VALID_BACKUP_MODES = ['image', 'file']
def _get_backup_metadata(backup, operation): def _get_backup_metadata(backup, operation):
"""Return metadata persisted with backup object.""" """Return metadata persisted with backup object."""
svc_metadata = backup['service_metadata']
try: try:
svc_dict = json.loads(svc_metadata) svc_dict = json.loads(backup.service_metadata)
backup_path = svc_dict.get('backup_path') backup_path = svc_dict.get('backup_path')
backup_mode = svc_dict.get('backup_mode') backup_mode = svc_dict.get('backup_mode')
except TypeError: except TypeError:
@ -364,35 +363,31 @@ class TSMBackupDriver(driver.BackupDriver):
"not yet support this feature.") "not yet support this feature.")
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
backup_id = backup['id']
volume_id = backup['volume_id']
volume_path, backup_mode = _get_volume_realpath(volume_file, volume_path, backup_mode = _get_volume_realpath(volume_file,
volume_id) backup.volume_id)
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,' LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s, mode: %(mode)s.', ' volume path: %(volume_path)s, mode: %(mode)s.',
{'volume_id': volume_id, {'volume_id': backup.volume_id,
'volume_path': volume_path, 'volume_path': volume_path,
'mode': backup_mode}) 'mode': backup_mode})
backup_path = _create_unique_device_link(backup_id, backup_path = _create_unique_device_link(backup.id,
volume_path, volume_path,
volume_id, backup.volume_id,
backup_mode) backup_mode)
service_metadata = {'backup_mode': backup_mode, service_metadata = {'backup_mode': backup_mode,
'backup_path': backup_path} 'backup_path': backup_path}
self.db.backup_update(self.context, backup.service_metadata = json.dumps(service_metadata)
backup_id, backup.save()
{'service_metadata':
json.dumps(service_metadata)})
try: try:
self._do_backup(backup_path, volume_id, backup_mode) self._do_backup(backup_path, backup.volume_id, backup_mode)
except processutils.ProcessExecutionError as exc: except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to run dsmc ' err = (_('backup: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n' 'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s') 'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id, % {'vol_id': backup.volume_id,
'bpath': backup_path, 'bpath': backup_path,
'out': exc.stdout, 'out': exc.stdout,
'err': exc.stderr}) 'err': exc.stderr})
@ -403,7 +398,7 @@ class TSMBackupDriver(driver.BackupDriver):
'due to invalid arguments ' 'due to invalid arguments '
'on %(bpath)s.\n' 'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s') 'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id, % {'vol_id': backup.volume_id,
'bpath': backup_path, 'bpath': backup_path,
'out': exc.stdout, 'out': exc.stdout,
'err': exc.stderr}) 'err': exc.stderr})
@ -411,9 +406,10 @@ class TSMBackupDriver(driver.BackupDriver):
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
finally: finally:
_cleanup_device_hardlink(backup_path, volume_path, volume_id) _cleanup_device_hardlink(backup_path, volume_path,
backup.volume_id)
LOG.debug('Backup %s finished.', backup_id) LOG.debug('Backup %s finished.', backup.id)
def restore(self, backup, volume_id, volume_file): def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server. """Restore the given volume backup from TSM server.
@ -424,8 +420,6 @@ class TSMBackupDriver(driver.BackupDriver):
:raises InvalidBackup :raises InvalidBackup
""" """
backup_id = backup['id']
# backup_path is the path that was originally backed up. # backup_path is the path that was originally backed up.
backup_path, backup_mode = _get_backup_metadata(backup, 'restore') backup_path, backup_mode = _get_backup_metadata(backup, 'restore')
@ -434,7 +428,7 @@ class TSMBackupDriver(driver.BackupDriver):
'backup: %(backup_id)s, ' 'backup: %(backup_id)s, '
'mode: %(mode)s.', 'mode: %(mode)s.',
{'volume_id': volume_id, {'volume_id': volume_id,
'backup_id': backup_id, 'backup_id': backup.id,
'mode': backup_mode}) 'mode': backup_mode})
# volume_path is the path to restore into. This may # volume_path is the path to restore into. This may
@ -442,7 +436,7 @@ class TSMBackupDriver(driver.BackupDriver):
volume_path, unused = _get_volume_realpath(volume_file, volume_path, unused = _get_volume_realpath(volume_file,
volume_id) volume_id)
restore_path = _create_unique_device_link(backup_id, restore_path = _create_unique_device_link(backup.id,
volume_path, volume_path,
volume_id, volume_id,
backup_mode) backup_mode)
@ -475,7 +469,7 @@ class TSMBackupDriver(driver.BackupDriver):
_cleanup_device_hardlink(restore_path, volume_path, volume_id) _cleanup_device_hardlink(restore_path, volume_path, volume_id)
LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.', LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup_id, {'backup_id': backup.id,
'volume_id': volume_id}) 'volume_id': volume_id})
def delete(self, backup): def delete(self, backup):
@ -487,10 +481,9 @@ class TSMBackupDriver(driver.BackupDriver):
delete_attrs = {'Total number of objects deleted': '1'} delete_attrs = {'Total number of objects deleted': '1'}
delete_path, backup_mode = _get_backup_metadata(backup, 'restore') delete_path, backup_mode = _get_backup_metadata(backup, 'restore')
volume_id = backup['volume_id']
LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.', LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.',
{'backup': backup['id'], {'backup': backup.id,
'mode': backup_mode}) 'mode': backup_mode})
try: try:
@ -508,7 +501,7 @@ class TSMBackupDriver(driver.BackupDriver):
except processutils.ProcessExecutionError as exc: except processutils.ProcessExecutionError as exc:
err = (_('delete: %(vol_id)s failed to run dsmc with ' err = (_('delete: %(vol_id)s failed to run dsmc with '
'stdout: %(out)s\n stderr: %(err)s') 'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id, % {'vol_id': backup.volume_id,
'out': exc.stdout, 'out': exc.stdout,
'err': exc.stderr}) 'err': exc.stderr})
LOG.error(err) LOG.error(err)
@ -517,7 +510,7 @@ class TSMBackupDriver(driver.BackupDriver):
err = (_('delete: %(vol_id)s failed to run dsmc ' err = (_('delete: %(vol_id)s failed to run dsmc '
'due to invalid arguments with ' 'due to invalid arguments with '
'stdout: %(out)s\n stderr: %(err)s') 'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id, % {'vol_id': backup.volume_id,
'out': exc.stdout, 'out': exc.stdout,
'err': exc.stderr}) 'err': exc.stderr})
LOG.error(err) LOG.error(err)
@ -530,7 +523,7 @@ class TSMBackupDriver(driver.BackupDriver):
# object can be removed. # object can be removed.
LOG.error(_LE('delete: %(vol_id)s failed with ' LOG.error(_LE('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s'), 'stdout: %(out)s\n stderr: %(err)s'),
{'vol_id': volume_id, {'vol_id': backup.volume_id,
'out': out, 'out': out,
'err': err}) 'err': err})

View File

@ -46,6 +46,7 @@ from cinder import context
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW from cinder.i18n import _, _LE, _LI, _LW
from cinder import manager from cinder import manager
from cinder import objects
from cinder import quota from cinder import quota
from cinder import rpc from cinder import rpc
from cinder import utils from cinder import utils
@ -183,6 +184,11 @@ class BackupManager(manager.SchedulerDependentManager):
driver.set_initialized() driver.set_initialized()
def _update_backup_error(self, backup, context, err):
backup.status = 'error'
backup.fail_reason = err
backup.save()
def init_host(self): def init_host(self):
"""Do any initialization that needs to be run if this is a """Do any initialization that needs to be run if this is a
standalone service. standalone service.
@ -222,40 +228,38 @@ class BackupManager(manager.SchedulerDependentManager):
# TODO(smulcahy) implement full resume of backup and restore # TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting) # operations on restart (rather than simply resetting)
backups = self.db.backup_get_all_by_host(ctxt, self.host) backups = objects.BackupList.get_all_by_host(ctxt, self.host)
for backup in backups: for backup in backups:
if backup['status'] == 'creating': if backup['status'] == 'creating':
LOG.info(_LI('Resetting backup %s to error (was creating).'), LOG.info(_LI('Resetting backup %s to error (was creating).'),
backup['id']) backup['id'])
err = 'incomplete backup reset on manager restart' err = 'incomplete backup reset on manager restart'
self.db.backup_update(ctxt, backup['id'], {'status': 'error', self._update_backup_error(backup, ctxt, err)
'fail_reason': err})
if backup['status'] == 'restoring': if backup['status'] == 'restoring':
LOG.info(_LI('Resetting backup %s to ' LOG.info(_LI('Resetting backup %s to '
'available (was restoring).'), 'available (was restoring).'),
backup['id']) backup['id'])
self.db.backup_update(ctxt, backup['id'], backup.status = 'available'
{'status': 'available'}) backup.save()
if backup['status'] == 'deleting': if backup['status'] == 'deleting':
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id']) LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
self.delete_backup(ctxt, backup['id']) self.delete_backup(ctxt, backup)
def create_backup(self, context, backup_id): def create_backup(self, context, backup):
"""Create volume backups using configured backup service.""" """Create volume backups using configured backup service."""
backup = self.db.backup_get(context, backup_id) volume_id = backup.volume_id
volume_id = backup['volume_id']
volume = self.db.volume_get(context, volume_id) volume = self.db.volume_get(context, volume_id)
LOG.info(_LI('Create backup started, backup: %(backup_id)s ' LOG.info(_LI('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'), 'volume: %(volume_id)s.'),
{'backup_id': backup_id, 'volume_id': volume_id}) {'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "create.start") self._notify_about_backup_usage(context, backup, "create.start")
volume_host = volume_utils.extract_host(volume['host'], 'backend') volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host) backend = self._get_volume_backend(host=volume_host)
self.db.backup_update(context, backup_id, {'host': self.host, backup.host = self.host
'service': backup.service = self.driver_name
self.driver_name}) backup.save()
expected_status = 'backing-up' expected_status = 'backing-up'
actual_status = volume['status'] actual_status = volume['status']
@ -265,21 +269,19 @@ class BackupManager(manager.SchedulerDependentManager):
'expected_status': expected_status, 'expected_status': expected_status,
'actual_status': actual_status, 'actual_status': actual_status,
} }
self.db.backup_update(context, backup_id, {'status': 'error', self._update_backup_error(backup, context, err)
'fail_reason': err})
raise exception.InvalidVolume(reason=err) raise exception.InvalidVolume(reason=err)
expected_status = 'creating' expected_status = 'creating'
actual_status = backup['status'] actual_status = backup.status
if actual_status != expected_status: if actual_status != expected_status:
err = _('Create backup aborted, expected backup status ' err = _('Create backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % { '%(expected_status)s but got %(actual_status)s.') % {
'expected_status': expected_status, 'expected_status': expected_status,
'actual_status': actual_status, 'actual_status': actual_status,
} }
self.db.volume_update(context, volume_id, {'status': 'available'}) self._update_backup_error(backup, context, err)
self.db.backup_update(context, backup_id, {'status': 'error', backup.save()
'fail_reason': err})
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
try: try:
@ -296,31 +298,29 @@ class BackupManager(manager.SchedulerDependentManager):
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id, self.db.volume_update(context, volume_id,
{'status': 'available'}) {'status': 'available'})
self.db.backup_update(context, backup_id, self._update_backup_error(backup, context, six.text_type(err))
{'status': 'error',
'fail_reason': six.text_type(err)})
self.db.volume_update(context, volume_id, {'status': 'available'}) self.db.volume_update(context, volume_id, {'status': 'available'})
backup = self.db.backup_update(context, backup_id, backup.status = 'available'
{'status': 'available', backup.size = volume['size']
'size': volume['size'], backup.availability_zone = self.az
'availability_zone': self.az}) backup.save()
LOG.info(_LI('Create backup finished. backup: %s.'), backup_id) LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
self._notify_about_backup_usage(context, backup, "create.end") self._notify_about_backup_usage(context, backup, "create.end")
def restore_backup(self, context, backup_id, volume_id): def restore_backup(self, context, backup, volume_id):
"""Restore volume backups from configured backup service.""" """Restore volume backups from configured backup service."""
LOG.info(_LI('Restore backup started, backup: %(backup_id)s ' LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.'), 'volume: %(volume_id)s.'),
{'backup_id': backup_id, 'volume_id': volume_id}) {'backup_id': backup.id, 'volume_id': volume_id})
backup = self.db.backup_get(context, backup_id)
volume = self.db.volume_get(context, volume_id) volume = self.db.volume_get(context, volume_id)
volume_host = volume_utils.extract_host(volume['host'], 'backend') volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host) backend = self._get_volume_backend(host=volume_host)
self._notify_about_backup_usage(context, backup, "restore.start") self._notify_about_backup_usage(context, backup, "restore.start")
self.db.backup_update(context, backup_id, {'host': self.host}) backup.host = self.host
backup.save()
expected_status = 'restoring-backup' expected_status = 'restoring-backup'
actual_status = volume['status'] actual_status = volume['status']
@ -329,7 +329,8 @@ class BackupManager(manager.SchedulerDependentManager):
'%(expected_status)s but got %(actual_status)s.') % '%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status, {'expected_status': expected_status,
'actual_status': actual_status}) 'actual_status': actual_status})
self.db.backup_update(context, backup_id, {'status': 'available'}) backup.status = 'available'
backup.save()
raise exception.InvalidVolume(reason=err) raise exception.InvalidVolume(reason=err)
expected_status = 'restoring' expected_status = 'restoring'
@ -339,8 +340,7 @@ class BackupManager(manager.SchedulerDependentManager):
'%(expected_status)s but got %(actual_status)s.') % '%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status, {'expected_status': expected_status,
'actual_status': actual_status}) 'actual_status': actual_status})
self.db.backup_update(context, backup_id, {'status': 'error', self._update_backup_error(backup, context, err)
'fail_reason': err})
self.db.volume_update(context, volume_id, {'status': 'error'}) self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
@ -363,7 +363,8 @@ class BackupManager(manager.SchedulerDependentManager):
'configured_service': configured_service, 'configured_service': configured_service,
'backup_service': backup_service, 'backup_service': backup_service,
} }
self.db.backup_update(context, backup_id, {'status': 'available'}) backup.status = 'available'
backup.save()
self.db.volume_update(context, volume_id, {'status': 'error'}) self.db.volume_update(context, volume_id, {'status': 'error'})
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
@ -382,19 +383,21 @@ class BackupManager(manager.SchedulerDependentManager):
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id, self.db.volume_update(context, volume_id,
{'status': 'error_restoring'}) {'status': 'error_restoring'})
self.db.backup_update(context, backup_id, backup.status = 'available'
{'status': 'available'}) backup.save()
self.db.volume_update(context, volume_id, {'status': 'available'}) self.db.volume_update(context, volume_id, {'status': 'available'})
backup = self.db.backup_update(context, backup_id, backup.status = 'available'
{'status': 'available'}) backup.save()
LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored' LOG.info(_LI('Restore backup finished, backup %(backup_id)s restored'
' to volume %(volume_id)s.'), ' to volume %(volume_id)s.'),
{'backup_id': backup_id, 'volume_id': volume_id}) {'backup_id': backup.id, 'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end") self._notify_about_backup_usage(context, backup, "restore.end")
def delete_backup(self, context, backup_id): def delete_backup(self, context, backup):
"""Delete volume backup from configured backup service.""" """Delete volume backup from configured backup service."""
LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
try: try:
# NOTE(flaper87): Verify the driver is enabled # NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught # before going forward. The exception will be caught
@ -403,25 +406,20 @@ class BackupManager(manager.SchedulerDependentManager):
utils.require_driver_initialized(self.driver) utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized as err: except exception.DriverNotInitialized as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self.db.backup_update(context, backup_id, self._update_backup_error(backup, context, six.text_type(err))
{'status': 'error',
'fail_reason':
six.text_type(err)})
LOG.info(_LI('Delete backup started, backup: %s.'), backup_id)
backup = self.db.backup_get(context, backup_id)
self._notify_about_backup_usage(context, backup, "delete.start") self._notify_about_backup_usage(context, backup, "delete.start")
self.db.backup_update(context, backup_id, {'host': self.host}) backup.host = self.host
backup.save()
expected_status = 'deleting' expected_status = 'deleting'
actual_status = backup['status'] actual_status = backup.status
if actual_status != expected_status: if actual_status != expected_status:
err = _('Delete_backup aborted, expected backup status ' err = _('Delete_backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') \ '%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status, % {'expected_status': expected_status,
'actual_status': actual_status} 'actual_status': actual_status}
self.db.backup_update(context, backup_id, self._update_backup_error(backup, context, err)
{'status': 'error', 'fail_reason': err})
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service']) backup_service = self._map_service_to_driver(backup['service'])
@ -434,8 +432,7 @@ class BackupManager(manager.SchedulerDependentManager):
' backup [%(backup_service)s].')\ ' backup [%(backup_service)s].')\
% {'configured_service': configured_service, % {'configured_service': configured_service,
'backup_service': backup_service} 'backup_service': backup_service}
self.db.backup_update(context, backup_id, self._update_backup_error(backup, context, err)
{'status': 'error'})
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
try: try:
@ -443,33 +440,31 @@ class BackupManager(manager.SchedulerDependentManager):
backup_service.delete(backup) backup_service.delete(backup)
except Exception as err: except Exception as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self.db.backup_update(context, backup_id, self._update_backup_error(backup, context,
{'status': 'error', six.text_type(err))
'fail_reason':
six.text_type(err)})
# Get reservations # Get reservations
try: try:
reserve_opts = { reserve_opts = {
'backups': -1, 'backups': -1,
'backup_gigabytes': -backup['size'], 'backup_gigabytes': -backup.size,
} }
reservations = QUOTAS.reserve(context, reservations = QUOTAS.reserve(context,
project_id=backup['project_id'], project_id=backup.project_id,
**reserve_opts) **reserve_opts)
except Exception: except Exception:
reservations = None reservations = None
LOG.exception(_LE("Failed to update usages deleting backup")) LOG.exception(_LE("Failed to update usages deleting backup"))
context = context.elevated() context = context.elevated()
self.db.backup_destroy(context, backup_id) backup.destroy()
# Commit the reservations # Commit the reservations
if reservations: if reservations:
QUOTAS.commit(context, reservations, QUOTAS.commit(context, reservations,
project_id=backup['project_id']) project_id=backup.project_id)
LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup_id) LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
self._notify_about_backup_usage(context, backup, "delete.end") self._notify_about_backup_usage(context, backup, "delete.end")
def _notify_about_backup_usage(self, def _notify_about_backup_usage(self,
@ -482,25 +477,23 @@ class BackupManager(manager.SchedulerDependentManager):
extra_usage_info=extra_usage_info, extra_usage_info=extra_usage_info,
host=self.host) host=self.host)
def export_record(self, context, backup_id): def export_record(self, context, backup):
"""Export all volume backup metadata details to allow clean import. """Export all volume backup metadata details to allow clean import.
Export backup metadata so it could be re-imported into the database Export backup metadata so it could be re-imported into the database
without any prerequisite in the backup database. without any prerequisite in the backup database.
:param context: running context :param context: running context
:param backup_id: backup id to export :param backup: backup object to export
:returns: backup_record - a description of how to import the backup :returns: backup_record - a description of how to import the backup
:returns: contains 'backup_url' - how to import the backup, and :returns: contains 'backup_url' - how to import the backup, and
:returns: 'backup_service' describing the needed driver. :returns: 'backup_service' describing the needed driver.
:raises: InvalidBackup :raises: InvalidBackup
""" """
LOG.info(_LI('Export record started, backup: %s.'), backup_id) LOG.info(_LI('Export record started, backup: %s.'), backup.id)
backup = self.db.backup_get(context, backup_id)
expected_status = 'available' expected_status = 'available'
actual_status = backup['status'] actual_status = backup.status
if actual_status != expected_status: if actual_status != expected_status:
err = (_('Export backup aborted, expected backup status ' err = (_('Export backup aborted, expected backup status '
'%(expected_status)s but got %(actual_status)s.') % '%(expected_status)s but got %(actual_status)s.') %
@ -509,8 +502,8 @@ class BackupManager(manager.SchedulerDependentManager):
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
backup_record = {} backup_record = {}
backup_record['backup_service'] = backup['service'] backup_record['backup_service'] = backup.service
backup_service = self._map_service_to_driver(backup['service']) backup_service = self._map_service_to_driver(backup.service)
configured_service = self.driver_name configured_service = self.driver_name
if backup_service != configured_service: if backup_service != configured_service:
err = (_('Export record aborted, the backup service currently' err = (_('Export record aborted, the backup service currently'
@ -531,19 +524,19 @@ class BackupManager(manager.SchedulerDependentManager):
msg = six.text_type(err) msg = six.text_type(err)
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
LOG.info(_LI('Export record finished, backup %s exported.'), backup_id) LOG.info(_LI('Export record finished, backup %s exported.'), backup.id)
return backup_record return backup_record
def import_record(self, def import_record(self,
context, context,
backup_id, backup,
backup_service, backup_service,
backup_url, backup_url,
backup_hosts): backup_hosts):
"""Import all volume backup metadata details to the backup db. """Import all volume backup metadata details to the backup db.
:param context: running context :param context: running context
:param backup_id: The new backup id for the import :param backup: The new backup object for the import
:param backup_service: The needed backup driver for import :param backup_service: The needed backup driver for import
:param backup_url: An identifier string to locate the backup :param backup_url: An identifier string to locate the backup
:param backup_hosts: Potential hosts to execute the import :param backup_hosts: Potential hosts to execute the import
@ -560,7 +553,7 @@ class BackupManager(manager.SchedulerDependentManager):
first_host = backup_hosts.pop() first_host = backup_hosts.pop()
self.backup_rpcapi.import_record(context, self.backup_rpcapi.import_record(context,
first_host, first_host,
backup_id, backup.id,
backup_service, backup_service,
backup_url, backup_url,
backup_hosts) backup_hosts)
@ -569,8 +562,7 @@ class BackupManager(manager.SchedulerDependentManager):
err = _('Import record failed, cannot find backup ' err = _('Import record failed, cannot find backup '
'service to perform the import. Request service ' 'service to perform the import. Request service '
'%(service)s') % {'service': backup_service} '%(service)s') % {'service': backup_service}
self.db.backup_update(context, backup_id, {'status': 'error', self._update_backup_error(backup, context, err)
'fail_reason': err})
raise exception.ServiceNotFound(service_id=backup_service) raise exception.ServiceNotFound(service_id=backup_service)
else: else:
# Yes... # Yes...
@ -580,10 +572,7 @@ class BackupManager(manager.SchedulerDependentManager):
backup_options = backup_service.import_record(backup_url) backup_options = backup_service.import_record(backup_url)
except Exception as err: except Exception as err:
msg = six.text_type(err) msg = six.text_type(err)
self.db.backup_update(context, self._update_backup_error(backup, context, msg)
backup_id,
{'status': 'error',
'fail_reason': msg})
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
required_import_options = ['display_name', required_import_options = ['display_name',
@ -603,40 +592,36 @@ class BackupManager(manager.SchedulerDependentManager):
if entry not in backup_options: if entry not in backup_options:
msg = (_('Backup metadata received from driver for ' msg = (_('Backup metadata received from driver for '
'import is missing %s.'), entry) 'import is missing %s.'), entry)
self.db.backup_update(context, self._update_backup_error(backup, context, msg)
backup_id,
{'status': 'error',
'fail_reason': msg})
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
backup_update[entry] = backup_options[entry] backup_update[entry] = backup_options[entry]
# Update the database # Update the database
self.db.backup_update(context, backup_id, backup_update) backup.update(backup_update)
backup.save()
# Verify backup # Verify backup
try: try:
if isinstance(backup_service, driver.BackupDriverWithVerify): if isinstance(backup_service, driver.BackupDriverWithVerify):
backup_service.verify(backup_id) backup_service.verify(backup.id)
else: else:
LOG.warning(_LW('Backup service %(service)s does not ' LOG.warning(_LW('Backup service %(service)s does not '
'support verify. Backup id %(id)s is ' 'support verify. Backup id %(id)s is '
'not verified. Skipping verify.'), 'not verified. Skipping verify.'),
{'service': self.driver_name, {'service': self.driver_name,
'id': backup_id}) 'id': backup.id})
except exception.InvalidBackup as err: except exception.InvalidBackup as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self.db.backup_update(context, backup_id, self._update_backup_error(backup, context,
{'status': 'error', six.text_type(err))
'fail_reason':
six.text_type(err)})
LOG.info(_LI('Import record id %s metadata from driver ' LOG.info(_LI('Import record id %s metadata from driver '
'finished.'), backup_id) 'finished.'), backup.id)
def reset_status(self, context, backup_id, status): def reset_status(self, context, backup, status):
"""Reset volume backup status. """Reset volume backup status.
:param context: running context :param context: running context
:param backup_id: The backup id for reset status operation :param backup: The backup object for reset status operation
:param status: The status to be set :param status: The status to be set
:raises: InvalidBackup :raises: InvalidBackup
:raises: BackupVerifyUnsupportedDriver :raises: BackupVerifyUnsupportedDriver
@ -644,7 +629,7 @@ class BackupManager(manager.SchedulerDependentManager):
""" """
LOG.info(_LI('Reset backup status started, backup_id: ' LOG.info(_LI('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'), '%(backup_id)s, status: %(status)s.'),
{'backup_id': backup_id, {'backup_id': backup.id,
'status': status}) 'status': status})
try: try:
# NOTE(flaper87): Verify the driver is enabled # NOTE(flaper87): Verify the driver is enabled
@ -656,8 +641,7 @@ class BackupManager(manager.SchedulerDependentManager):
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("Backup driver has not been initialized")) LOG.exception(_LE("Backup driver has not been initialized"))
backup = self.db.backup_get(context, backup_id) backup_service = self._map_service_to_driver(backup.service)
backup_service = self._map_service_to_driver(backup['service'])
LOG.info(_LI('Backup service: %s.'), backup_service) LOG.info(_LI('Backup service: %s.'), backup_service)
if backup_service is not None: if backup_service is not None:
configured_service = self.driver_name configured_service = self.driver_name
@ -676,9 +660,9 @@ class BackupManager(manager.SchedulerDependentManager):
# check whether we could verify the backup is ok or not # check whether we could verify the backup is ok or not
if isinstance(backup_service, if isinstance(backup_service,
driver.BackupDriverWithVerify): driver.BackupDriverWithVerify):
backup_service.verify(backup_id) backup_service.verify(backup.id)
self.db.backup_update(context, backup_id, backup.status = status
{'status': status}) backup.save()
# driver does not support verify function # driver does not support verify function
else: else:
msg = (_('Backup service %(configured_service)s ' msg = (_('Backup service %(configured_service)s '
@ -686,20 +670,20 @@ class BackupManager(manager.SchedulerDependentManager):
' %(id)s is not verified. ' ' %(id)s is not verified. '
'Skipping verify.') % 'Skipping verify.') %
{'configured_service': self.driver_name, {'configured_service': self.driver_name,
'id': backup_id}) 'id': backup.id})
raise exception.BackupVerifyUnsupportedDriver( raise exception.BackupVerifyUnsupportedDriver(
reason=msg) reason=msg)
# reset status to error or from restoring to available # reset status to error or from restoring to available
else: else:
if (status == 'error' or if (status == 'error' or
(status == 'available' and (status == 'available' and
backup['status'] == 'restoring')): backup.status == 'restoring')):
self.db.backup_update(context, backup_id, backup.status = status
{'status': status}) backup.save()
except exception.InvalidBackup: except exception.InvalidBackup:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Backup id %s is not invalid. " LOG.error(_LE("Backup id %s is not invalid. "
"Skipping reset."), backup_id) "Skipping reset."), backup.id)
except exception.BackupVerifyUnsupportedDriver: except exception.BackupVerifyUnsupportedDriver:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE('Backup service %(configured_service)s ' LOG.error(_LE('Backup service %(configured_service)s '
@ -707,19 +691,19 @@ class BackupManager(manager.SchedulerDependentManager):
'%(id)s is not verified. ' '%(id)s is not verified. '
'Skipping verify.'), 'Skipping verify.'),
{'configured_service': self.driver_name, {'configured_service': self.driver_name,
'id': backup_id}) 'id': backup.id})
except AttributeError: except AttributeError:
msg = (_('Backup service %(service)s does not support ' msg = (_('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. ' 'verify. Backup id %(id)s is not verified. '
'Skipping reset.') % 'Skipping reset.') %
{'service': self.driver_name, {'service': self.driver_name,
'id': backup_id}) 'id': backup.id})
LOG.error(msg) LOG.error(msg)
raise exception.BackupVerifyUnsupportedDriver( raise exception.BackupVerifyUnsupportedDriver(
reason=msg) reason=msg)
# send notification to ceilometer # send notification to ceilometer
notifier_info = {'id': backup_id, 'update': {'status': status}} notifier_info = {'id': backup.id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate') notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups.reset_status.end", notifier.info(context, "backups.reset_status.end",
notifier_info) notifier_info)

View File

@ -22,6 +22,7 @@ from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import oslo_messaging as messaging import oslo_messaging as messaging
from cinder.objects import base as objects_base
from cinder import rpc from cinder import rpc
@ -36,6 +37,7 @@ class BackupAPI(object):
API version history: API version history:
1.0 - Initial version. 1.0 - Initial version.
1.1 - Changed methods to accept backup objects instaed of IDs.
""" """
BASE_RPC_API_VERSION = '1.0' BASE_RPC_API_VERSION = '1.0'
@ -44,56 +46,57 @@ class BackupAPI(object):
super(BackupAPI, self).__init__() super(BackupAPI, self).__init__()
target = messaging.Target(topic=CONF.backup_topic, target = messaging.Target(topic=CONF.backup_topic,
version=self.BASE_RPC_API_VERSION) version=self.BASE_RPC_API_VERSION)
self.client = rpc.get_client(target, '1.0') serializer = objects_base.CinderObjectSerializer()
self.client = rpc.get_client(target, '1.1', serializer=serializer)
def create_backup(self, ctxt, host, backup_id, volume_id): def create_backup(self, ctxt, backup):
LOG.debug("create_backup in rpcapi backup_id %s", backup_id) LOG.debug("create_backup in rpcapi backup_id %s", backup.id)
cctxt = self.client.prepare(server=host) cctxt = self.client.prepare(server=backup.host)
cctxt.cast(ctxt, 'create_backup', backup_id=backup_id) cctxt.cast(ctxt, 'create_backup', backup=backup)
def restore_backup(self, ctxt, host, backup_id, volume_id): def restore_backup(self, ctxt, volume_host, backup, volume_id):
LOG.debug("restore_backup in rpcapi backup_id %s", backup_id) LOG.debug("restore_backup in rpcapi backup_id %s", backup.id)
cctxt = self.client.prepare(server=host) cctxt = self.client.prepare(server=volume_host)
cctxt.cast(ctxt, 'restore_backup', backup_id=backup_id, cctxt.cast(ctxt, 'restore_backup', backup=backup,
volume_id=volume_id) volume_id=volume_id)
def delete_backup(self, ctxt, host, backup_id): def delete_backup(self, ctxt, backup):
LOG.debug("delete_backup rpcapi backup_id %s", backup_id) LOG.debug("delete_backup rpcapi backup_id %s", backup.id)
cctxt = self.client.prepare(server=host) cctxt = self.client.prepare(server=backup.host)
cctxt.cast(ctxt, 'delete_backup', backup_id=backup_id) cctxt.cast(ctxt, 'delete_backup', backup=backup)
def export_record(self, ctxt, host, backup_id): def export_record(self, ctxt, backup):
LOG.debug("export_record in rpcapi backup_id %(id)s " LOG.debug("export_record in rpcapi backup_id %(id)s "
"on host %(host)s.", "on host %(host)s.",
{'id': backup_id, {'id': backup.id,
'host': host}) 'host': backup.host})
cctxt = self.client.prepare(server=host) cctxt = self.client.prepare(server=backup.host)
return cctxt.call(ctxt, 'export_record', backup_id=backup_id) return cctxt.call(ctxt, 'export_record', backup_id=backup.id)
def import_record(self, def import_record(self,
ctxt, ctxt,
host, host,
backup_id, backup,
backup_service, backup_service,
backup_url, backup_url,
backup_hosts): backup_hosts):
LOG.debug("import_record rpcapi backup id %(id)s " LOG.debug("import_record rpcapi backup id %(id)s "
"on host %(host)s for backup_url %(url)s.", "on host %(host)s for backup_url %(url)s.",
{'id': backup_id, {'id': backup.id,
'host': host, 'host': host,
'url': backup_url}) 'url': backup_url})
cctxt = self.client.prepare(server=host) cctxt = self.client.prepare(server=host)
cctxt.cast(ctxt, 'import_record', cctxt.cast(ctxt, 'import_record',
backup_id=backup_id, backup_id=backup.id,
backup_service=backup_service, backup_service=backup_service,
backup_url=backup_url, backup_url=backup_url,
backup_hosts=backup_hosts) backup_hosts=backup_hosts)
def reset_status(self, ctxt, host, backup_id, status): def reset_status(self, ctxt, backup, status):
LOG.debug("reset_status in rpcapi backup_id %(id)s " LOG.debug("reset_status in rpcapi backup_id %(id)s "
"on host %(host)s.", "on host %(host)s.",
{'id': backup_id, {'id': backup.id,
'host': host}) 'host': backup.host})
cctxt = self.client.prepare(server=host) cctxt = self.client.prepare(server=backup.host)
return cctxt.cast(ctxt, 'reset_status', backup_id=backup_id, return cctxt.cast(ctxt, 'reset_status', backup_id=backup.id,
status=status) status=status)

View File

@ -33,6 +33,7 @@ i18n.enable_lazy()
# Need to register global_opts # Need to register global_opts
from cinder.common import config # noqa from cinder.common import config # noqa
from cinder import objects
from cinder.openstack.common.report import guru_meditation_report as gmr from cinder.openstack.common.report import guru_meditation_report as gmr
from cinder import service from cinder import service
from cinder import utils from cinder import utils
@ -43,6 +44,7 @@ CONF = cfg.CONF
def main(): def main():
objects.register_all()
CONF(sys.argv[1:], project='cinder', CONF(sys.argv[1:], project='cinder',
version=version.version_string()) version=version.version_string())
logging.setup(CONF, "cinder") logging.setup(CONF, "cinder")

View File

@ -77,7 +77,7 @@ from cinder import db
from cinder.db import migration as db_migration from cinder.db import migration as db_migration
from cinder.db.sqlalchemy import api as db_api from cinder.db.sqlalchemy import api as db_api
from cinder.i18n import _ from cinder.i18n import _
from cinder.objects import base as objects_base from cinder import objects
from cinder import rpc from cinder import rpc
from cinder import utils from cinder import utils
from cinder import version from cinder import version
@ -272,7 +272,7 @@ class VolumeCommands(object):
if not rpc.initialized(): if not rpc.initialized():
rpc.init(CONF) rpc.init(CONF)
target = messaging.Target(topic=CONF.volume_topic) target = messaging.Target(topic=CONF.volume_topic)
serializer = objects_base.CinderObjectSerializer() serializer = objects.base.CinderObjectSerializer()
self._client = rpc.get_client(target, serializer=serializer) self._client = rpc.get_client(target, serializer=serializer)
return self._client return self._client
@ -402,7 +402,7 @@ class BackupCommands(object):
on which the backup operation is running. on which the backup operation is running.
""" """
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
backups = db.backup_get_all(ctxt) backups = objects.BackupList.get_all(ctxt)
hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s" hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s"
print(hdr % (_('ID'), print(hdr % (_('ID'),
@ -531,6 +531,7 @@ def fetch_func_args(func):
def main(): def main():
objects.register_all()
"""Parse options and call the appropriate class/method.""" """Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt) CONF.register_cli_opt(category_opt)
script_name = sys.argv[0] script_name = sys.argv[0]

View File

@ -26,3 +26,4 @@ def register_all():
# need to receive it via RPC. # need to receive it via RPC.
__import__('cinder.objects.volume') __import__('cinder.objects.volume')
__import__('cinder.objects.snapshot') __import__('cinder.objects.snapshot')
__import__('cinder.objects.backup')

144
cinder/objects/backup.py Normal file
View File

@ -0,0 +1,144 @@
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import base
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@base.CinderObjectRegistry.register
class Backup(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.UUIDField(),
'user_id': fields.UUIDField(),
'project_id': fields.UUIDField(),
'volume_id': fields.UUIDField(),
'host': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'container': fields.StringField(nullable=True),
'parent_id': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'fail_reason': fields.StringField(nullable=True),
'size': fields.IntegerField(),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
# NOTE(dulek): Metadata field is used to store any strings by backup
# drivers, that's why it can't be DictOfStringsField.
'service_metadata': fields.StringField(nullable=True),
'service': fields.StringField(nullable=True),
'object_count': fields.IntegerField(),
}
obj_extra_fields = ['name']
@property
def name(self):
return CONF.backup_name_template % self.id
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version."""
target_version = utils.convert_version_to_tuple(target_version)
@staticmethod
def _from_db_object(context, backup, db_backup):
for name, field in backup.fields.items():
value = db_backup.get(name)
if isinstance(field, fields.IntegerField):
value = value if value is not None else 0
backup[name] = value
backup._context = context
backup.obj_reset_changes()
return backup
@base.remotable_classmethod
def get_by_id(cls, context, id):
db_backup = db.backup_get(context, id)
return cls._from_db_object(context, cls(context), db_backup)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
db_backup = db.backup_create(self._context, updates)
self._from_db_object(self._context, self, db_backup)
@base.remotable
def save(self):
updates = self.obj_get_changes()
if updates:
db.backup_update(self._context, self.id, updates)
self.obj_reset_changes()
@base.remotable
def destroy(self):
db.backup_destroy(self._context, self.id)
@base.CinderObjectRegistry.register
class BackupList(base.ObjectListBase, base.CinderObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('Backup'),
}
child_versions = {
'1.0': '1.0'
}
@base.remotable_classmethod
def get_all(cls, context, filters=None):
backups = db.backup_get_all(context, filters)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
@base.remotable_classmethod
def get_all_by_host(cls, context, host):
backups = db.backup_get_all_by_host(context, host)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
@base.remotable_classmethod
def get_all_by_project(cls, context, project_id, filters=None):
backups = db.backup_get_all_by_project(context, project_id, filters)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
@base.remotable_classmethod
def get_all_by_volume(cls, context, volume_id, filters=None):
backups = db.backup_get_all_by_volume(context, volume_id, filters)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)

View File

@ -198,10 +198,14 @@ class AdminActionsTest(test.TestCase):
def test_backup_reset_status_as_admin(self): def test_backup_reset_status_as_admin(self):
ctx = context.RequestContext('admin', 'fake', True) ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available'}) volume = db.volume_create(ctx, {'status': 'available',
'user_id': 'user',
'project_id': 'project'})
backup = db.backup_create(ctx, {'status': 'available', backup = db.backup_create(ctx, {'status': 'available',
'size': 1, 'size': 1,
'volume_id': volume['id']}) 'volume_id': volume['id'],
'user_id': 'user',
'project_id': 'project'})
resp = self._issue_backup_reset(ctx, resp = self._issue_backup_reset(ctx,
backup, backup,
@ -225,7 +229,9 @@ class AdminActionsTest(test.TestCase):
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test', volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1}) 'provider_location': '', 'size': 1})
backup = db.backup_create(ctx, {'status': 'available', backup = db.backup_create(ctx, {'status': 'available',
'volume_id': volume['id']}) 'volume_id': volume['id'],
'user_id': 'user',
'project_id': 'project'})
resp = self._issue_backup_reset(ctx, resp = self._issue_backup_reset(ctx,
backup, backup,

View File

@ -1195,10 +1195,10 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(res.status_int, 202) self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id) self.assertEqual(res_dict['restore']['backup_id'], backup_id)
self.assertEqual(res_dict['restore']['volume_id'], volume_id) self.assertEqual(res_dict['restore']['volume_id'], volume_id)
mock_restore_backup.assert_called_once_with(mock.ANY, mock_restore_backup.assert_called_once_with(mock.ANY, u'HostB',
'HostB', mock.ANY, volume_id)
backup_id, # Manually check if restore_backup was called with appropriate backup.
volume_id) self.assertEqual(backup_id, mock_restore_backup.call_args[0][2].id)
db.volume_destroy(context.get_admin_context(), volume_id) db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id) db.backup_destroy(context.get_admin_context(), backup_id)

View File

@ -36,6 +36,7 @@ from cinder import context
from cinder import db from cinder import db
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _
from cinder import objects
from cinder import test from cinder import test
from cinder import utils from cinder import utils
@ -266,7 +267,10 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
'size': 1, 'size': 1,
'container': container, 'container': container,
'volume_id': '1234-5678-1234-8888', 'volume_id': '1234-5678-1234-8888',
'parent_id': parent_id} 'parent_id': parent_id,
'user_id': 'user-id',
'project_id': 'project-id',
}
return db.backup_create(self.ctxt, backup)['id'] return db.backup_create(self.ctxt, backup)['id']
def setUp(self): def setUp(self):
@ -297,7 +301,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self.flags(backup_compression_algorithm='none') self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
def test_backup_bz2(self): def test_backup_bz2(self):
@ -305,7 +309,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self.flags(backup_compression_algorithm='bz2') self.flags(backup_compression_algorithm='bz2')
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
def test_backup_zlib(self): def test_backup_zlib(self):
@ -313,7 +317,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self.flags(backup_compression_algorithm='zlib') self.flags(backup_compression_algorithm='zlib')
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
def test_backup_default_container(self): def test_backup_default_container(self):
@ -321,9 +325,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
backup_id=FAKE_BACKUP_ID) backup_id=FAKE_BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, FAKE_BACKUP_ID) backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, FAKE_BACKUP_ID) backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME) self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
@ -339,7 +343,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
CONF.set_override("backup_enable_progress_timer", False) CONF.set_override("backup_enable_progress_timer", False)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called) self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called) self.assertTrue(_send_progress_end.called)
@ -351,7 +355,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
CONF.set_override("backup_object_number_per_notification", 10) CONF.set_override("backup_object_number_per_notification", 10)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called) self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called) self.assertTrue(_send_progress_end.called)
@ -364,7 +368,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
CONF.set_override("backup_enable_progress_timer", True) CONF.set_override("backup_enable_progress_timer", True)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called) self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called) self.assertTrue(_send_progress_end.called)
@ -374,9 +378,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self._create_backup_db_entry(container=container_name) self._create_backup_db_entry(container=container_name)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
def test_backup_shafile(self): def test_backup_shafile(self):
@ -399,9 +403,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self._create_backup_db_entry(container=container_name) self._create_backup_db_entry(container=container_name)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
# Verify sha contents # Verify sha contents
@ -430,9 +434,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
backup_id=123) backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents # Create incremental backup with no change to contents
@ -440,9 +444,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
parent_id=123) parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file) service.backup(deltabackup, self.volume_file)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name) self.assertEqual(deltabackup['container'], container_name)
# Compare shas from both files # Compare shas from both files
@ -475,9 +479,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self._create_backup_db_entry(container=container_name, backup_id=123) self._create_backup_db_entry(container=container_name, backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents # Create incremental backup with no change to contents
@ -490,9 +494,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
parent_id=123) parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file) service.backup(deltabackup, self.volume_file)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name) self.assertEqual(deltabackup['container'], container_name)
content1 = service._read_sha256file(backup) content1 = service._read_sha256file(backup)
@ -525,9 +529,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self._create_backup_db_entry(container=container_name, backup_id=123) self._create_backup_db_entry(container=container_name, backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents # Create incremental backup with no change to contents
@ -540,9 +544,9 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
parent_id=123) parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file) service.backup(deltabackup, self.volume_file)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name) self.assertEqual(deltabackup['container'], container_name)
# Verify that two shas are changed at index 16 and 20 # Verify that two shas are changed at index 16 and 20
@ -562,7 +566,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self.flags(backup_compression_algorithm='none') self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta): def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake')) raise exception.BackupDriverException(message=_('fake'))
@ -587,7 +591,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self.flags(backup_compression_algorithm='none') self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta): def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake')) raise exception.BackupDriverException(message=_('fake'))
@ -613,11 +617,12 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self.flags(backup_sha_block_size_bytes=32) self.flags(backup_sha_block_size_bytes=32)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file: with tempfile.NamedTemporaryFile() as restored_file:
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', restored_file) service.restore(backup, '1234-5678-1234-8888', restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name, self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name)) restored_file.name))
@ -629,11 +634,11 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self.flags(backup_sha_block_size_bytes=1024) self.flags(backup_sha_block_size_bytes=1024)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file: with tempfile.NamedTemporaryFile() as restored_file:
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', restored_file) service.restore(backup, '1234-5678-1234-8888', restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name, self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name)) restored_file.name))
@ -645,11 +650,11 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self.flags(backup_sha_block_size_bytes = 1024) self.flags(backup_sha_block_size_bytes = 1024)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file: with tempfile.NamedTemporaryFile() as restored_file:
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', restored_file) service.restore(backup, '1234-5678-1234-8888', restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name, self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name)) restored_file.name))
@ -677,7 +682,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self._create_backup_db_entry(container=container_name, backup_id=123) self._create_backup_db_entry(container=container_name, backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents # Create incremental backup with no change to contents
@ -689,12 +694,12 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
self._create_backup_db_entry(container=container_name, backup_id=124, self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123) parent_id=123)
self.volume_file.seek(0) self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file, True) service.backup(deltabackup, self.volume_file, True)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
with tempfile.NamedTemporaryFile() as restored_file: with tempfile.NamedTemporaryFile() as restored_file:
backup = db.backup_get(self.ctxt, 124) backup = objects.Backup.get_by_id(self.ctxt, 124)
service.restore(backup, '1234-5678-1234-8888', service.restore(backup, '1234-5678-1234-8888',
restored_file) restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name, self.assertTrue(filecmp.cmp(self.volume_file.name,
@ -703,7 +708,7 @@ class BackupNFSSwiftBasedTestCase(test.TestCase):
def test_delete(self): def test_delete(self):
self._create_backup_db_entry() self._create_backup_db_entry()
service = nfs.NFSBackupDriver(self.ctxt) service = nfs.NFSBackupDriver(self.ctxt)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.delete(backup) service.delete(backup)
def test_get_compressor(self): def test_get_compressor(self):

View File

@ -0,0 +1,91 @@
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_versionedobjects.tests import test_objects
from cinder import objects
from cinder.tests.unit import fake_volume
fake_backup = {
'id': '1',
'volume_id': 'fake_id',
'status': "creating",
'size': 1,
'display_name': 'fake_name',
'display_description': 'fake_description',
'user_id': 'fake_user',
'project_id': 'fake_project',
}
class TestBackup(test_objects._LocalTest):
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
@mock.patch('cinder.db.backup_get', return_value=fake_backup)
def test_get_by_id(self, backup_get):
backup = objects.Backup.get_by_id(self.context, 1)
self._compare(self, fake_backup, backup)
@mock.patch('cinder.db.backup_create', return_value=fake_backup)
def test_create(self, backup_create):
backup = objects.Backup(context=self.context)
backup.create()
self.assertEqual(fake_backup['id'], backup.id)
self.assertEqual(fake_backup['volume_id'], backup.volume_id)
@mock.patch('cinder.db.backup_update')
def test_save(self, backup_update):
backup = objects.Backup._from_db_object(
self.context, objects.Backup(), fake_backup)
backup.display_name = 'foobar'
backup.save()
backup_update.assert_called_once_with(self.context, backup.id,
{'display_name': 'foobar'})
@mock.patch('cinder.db.backup_destroy')
def test_destroy(self, backup_destroy):
backup = objects.Backup(context=self.context, id=1)
backup.destroy()
backup_destroy.assert_called_once_with(self.context, '1')
class TestBackupList(test_objects._LocalTest):
@mock.patch('cinder.db.backup_get_all', return_value=[fake_backup])
def test_get_all(self, backup_get_all):
backups = objects.BackupList.get_all(self.context)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
@mock.patch('cinder.db.backup_get_all_by_project',
return_value=[fake_backup])
def test_get_all_by_project(self, get_all_by_project):
backups = objects.BackupList.get_all_by_project(
self.context, self.project_id)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])
@mock.patch('cinder.db.backup_get_all_by_host',
return_value=[fake_backup])
def test_get_all_for_volume(self, get_all_by_host):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
backups = objects.BackupList.get_all_by_host(self.context,
fake_volume_obj.id)
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])

View File

@ -29,6 +29,7 @@ from cinder.backup import manager
from cinder import context from cinder import context
from cinder import db from cinder import db
from cinder import exception from cinder import exception
from cinder import objects
from cinder import test from cinder import test
from cinder.tests.unit.backup import fake_service_with_verify as fake_service from cinder.tests.unit.backup import fake_service_with_verify as fake_service
@ -63,28 +64,31 @@ class BaseBackupTest(test.TestCase):
status='creating', status='creating',
size=1, size=1,
object_count=0, object_count=0,
project_id='fake'): project_id='fake',
service=None):
"""Create a backup entry in the DB. """Create a backup entry in the DB.
Return the entry ID Return the entry ID
""" """
backup = {} kwargs = {}
backup['volume_id'] = volume_id kwargs['volume_id'] = volume_id
backup['user_id'] = 'fake' kwargs['user_id'] = 'fake'
backup['project_id'] = project_id kwargs['project_id'] = project_id
backup['host'] = 'testhost' kwargs['host'] = 'testhost'
backup['availability_zone'] = '1' kwargs['availability_zone'] = '1'
backup['display_name'] = display_name kwargs['display_name'] = display_name
backup['display_description'] = display_description kwargs['display_description'] = display_description
backup['container'] = container kwargs['container'] = container
backup['status'] = status kwargs['status'] = status
backup['fail_reason'] = '' kwargs['fail_reason'] = ''
backup['service'] = CONF.backup_driver kwargs['service'] = service or CONF.backup_driver
backup['snapshot'] = False kwargs['snapshot'] = False
backup['parent_id'] = None kwargs['parent_id'] = None
backup['size'] = size kwargs['size'] = size
backup['object_count'] = object_count kwargs['object_count'] = object_count
return db.backup_create(self.ctxt, backup)['id'] backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
def _create_volume_db_entry(self, display_name='test_volume', def _create_volume_db_entry(self, display_name='test_volume',
display_description='this is a test volume', display_description='this is a test volume',
@ -116,10 +120,10 @@ class BaseBackupTest(test.TestCase):
"""Create backup metadata export entry.""" """Create backup metadata export entry."""
vol_id = self._create_volume_db_entry(status='available', vol_id = self._create_volume_db_entry(status='available',
size=vol_size) size=vol_size)
backup_id = self._create_backup_db_entry(status='available', backup = self._create_backup_db_entry(status='available',
volume_id=vol_id) volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup_id) export = self.backup_mgr.export_record(self.ctxt, backup)
return export return export
def _create_export_record_db_entry(self, def _create_export_record_db_entry(self,
@ -130,12 +134,14 @@ class BaseBackupTest(test.TestCase):
Return the entry ID Return the entry ID
""" """
backup = {} kwargs = {}
backup['volume_id'] = volume_id kwargs['volume_id'] = volume_id
backup['user_id'] = 'fake' kwargs['user_id'] = 'fake'
backup['project_id'] = project_id kwargs['project_id'] = project_id
backup['status'] = status kwargs['status'] = status
return db.backup_create(self.ctxt, backup)['id'] backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup
class BackupTestCase(BaseBackupTest): class BackupTestCase(BaseBackupTest):
@ -151,9 +157,9 @@ class BackupTestCase(BaseBackupTest):
vol2_id = self._create_volume_db_entry() vol2_id = self._create_volume_db_entry()
self._create_volume_attach(vol2_id) self._create_volume_attach(vol2_id)
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'}) db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
backup1_id = self._create_backup_db_entry(status='creating') backup1 = self._create_backup_db_entry(status='creating')
backup2_id = self._create_backup_db_entry(status='restoring') backup2 = self._create_backup_db_entry(status='restoring')
backup3_id = self._create_backup_db_entry(status='deleting') backup3 = self._create_backup_db_entry(status='deleting')
self.backup_mgr.init_host() self.backup_mgr.init_host()
vol1 = db.volume_get(self.ctxt, vol1_id) vol1 = db.volume_get(self.ctxt, vol1_id)
@ -161,52 +167,52 @@ class BackupTestCase(BaseBackupTest):
vol2 = db.volume_get(self.ctxt, vol2_id) vol2 = db.volume_get(self.ctxt, vol2_id)
self.assertEqual(vol2['status'], 'error_restoring') self.assertEqual(vol2['status'], 'error_restoring')
backup1 = db.backup_get(self.ctxt, backup1_id) backup1 = db.backup_get(self.ctxt, backup1.id)
self.assertEqual(backup1['status'], 'error') self.assertEqual(backup1['status'], 'error')
backup2 = db.backup_get(self.ctxt, backup2_id) backup2 = db.backup_get(self.ctxt, backup2.id)
self.assertEqual(backup2['status'], 'available') self.assertEqual(backup2['status'], 'available')
self.assertRaises(exception.BackupNotFound, self.assertRaises(exception.BackupNotFound,
db.backup_get, db.backup_get,
self.ctxt, self.ctxt,
backup3_id) backup3.id)
def test_create_backup_with_bad_volume_status(self): def test_create_backup_with_bad_volume_status(self):
"""Test error handling when creating a backup from a volume """Test error handling when creating a backup from a volume
with a bad status with a bad status
""" """
vol_id = self._create_volume_db_entry(status='available', size=1) vol_id = self._create_volume_db_entry(status='available', size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id) backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume, self.assertRaises(exception.InvalidVolume,
self.backup_mgr.create_backup, self.backup_mgr.create_backup,
self.ctxt, self.ctxt,
backup_id) backup)
def test_create_backup_with_bad_backup_status(self): def test_create_backup_with_bad_backup_status(self):
"""Test error handling when creating a backup with a backup """Test error handling when creating a backup with a backup
with a bad status with a bad status
""" """
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available', backup = self._create_backup_db_entry(status='available',
volume_id=vol_id) volume_id=vol_id)
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.backup_mgr.create_backup, self.backup_mgr.create_backup,
self.ctxt, self.ctxt,
backup_id) backup)
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume')) @mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
def test_create_backup_with_error(self, _mock_volume_backup): def test_create_backup_with_error(self, _mock_volume_backup):
"""Test error handling when error occurs during backup creation.""" """Test error handling when error occurs during backup creation."""
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id) backup = self._create_backup_db_entry(volume_id=vol_id)
_mock_volume_backup.side_effect = FakeBackupException('fake') _mock_volume_backup.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException, self.assertRaises(FakeBackupException,
self.backup_mgr.create_backup, self.backup_mgr.create_backup,
self.ctxt, self.ctxt,
backup_id) backup)
vol = db.volume_get(self.ctxt, vol_id) vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available') self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
self.assertTrue(_mock_volume_backup.called) self.assertTrue(_mock_volume_backup.called)
@ -215,12 +221,12 @@ class BackupTestCase(BaseBackupTest):
"""Test normal backup creation.""" """Test normal backup creation."""
vol_size = 1 vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size) vol_id = self._create_volume_db_entry(size=vol_size)
backup_id = self._create_backup_db_entry(volume_id=vol_id) backup = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup_id) self.backup_mgr.create_backup(self.ctxt, backup)
vol = db.volume_get(self.ctxt, vol_id) vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available') self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size) self.assertEqual(backup['size'], vol_size)
self.assertTrue(_mock_volume_backup.called) self.assertTrue(_mock_volume_backup.called)
@ -231,9 +237,9 @@ class BackupTestCase(BaseBackupTest):
"""Test normal backup creation with notifications.""" """Test normal backup creation with notifications."""
vol_size = 1 vol_size = 1
vol_id = self._create_volume_db_entry(size=vol_size) vol_id = self._create_volume_db_entry(size=vol_size)
backup_id = self._create_backup_db_entry(volume_id=vol_id) backup = self._create_backup_db_entry(volume_id=vol_id)
self.backup_mgr.create_backup(self.ctxt, backup_id) self.backup_mgr.create_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count) self.assertEqual(2, notify.call_count)
def test_restore_backup_with_bad_volume_status(self): def test_restore_backup_with_bad_volume_status(self):
@ -241,13 +247,13 @@ class BackupTestCase(BaseBackupTest):
with a bad status. with a bad status.
""" """
vol_id = self._create_volume_db_entry(status='available', size=1) vol_id = self._create_volume_db_entry(status='available', size=1)
backup_id = self._create_backup_db_entry(volume_id=vol_id) backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertRaises(exception.InvalidVolume, self.assertRaises(exception.InvalidVolume,
self.backup_mgr.restore_backup, self.backup_mgr.restore_backup,
self.ctxt, self.ctxt,
backup_id, backup,
vol_id) vol_id)
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
def test_restore_backup_with_bad_backup_status(self): def test_restore_backup_with_bad_backup_status(self):
@ -256,16 +262,16 @@ class BackupTestCase(BaseBackupTest):
""" """
vol_id = self._create_volume_db_entry(status='restoring-backup', vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1) size=1)
backup_id = self._create_backup_db_entry(status='available', backup = self._create_backup_db_entry(status='available',
volume_id=vol_id) volume_id=vol_id)
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup, self.backup_mgr.restore_backup,
self.ctxt, self.ctxt,
backup_id, backup,
vol_id) vol_id)
vol = db.volume_get(self.ctxt, vol_id) vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error') self.assertEqual(vol['status'], 'error')
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup')) @mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
@ -273,18 +279,18 @@ class BackupTestCase(BaseBackupTest):
"""Test error handling when an error occurs during backup restore.""" """Test error handling when an error occurs during backup restore."""
vol_id = self._create_volume_db_entry(status='restoring-backup', vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1) size=1)
backup_id = self._create_backup_db_entry(status='restoring', backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id) volume_id=vol_id)
_mock_volume_restore.side_effect = FakeBackupException('fake') _mock_volume_restore.side_effect = FakeBackupException('fake')
self.assertRaises(FakeBackupException, self.assertRaises(FakeBackupException,
self.backup_mgr.restore_backup, self.backup_mgr.restore_backup,
self.ctxt, self.ctxt,
backup_id, backup,
vol_id) vol_id)
vol = db.volume_get(self.ctxt, vol_id) vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error_restoring') self.assertEqual(vol['status'], 'error_restoring')
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called) self.assertTrue(_mock_volume_restore.called)
@ -294,19 +300,19 @@ class BackupTestCase(BaseBackupTest):
""" """
vol_id = self._create_volume_db_entry(status='restoring-backup', vol_id = self._create_volume_db_entry(status='restoring-backup',
size=1) size=1)
backup_id = self._create_backup_db_entry(status='restoring', service = 'cinder.tests.backup.bad_service'
volume_id=vol_id) backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id,
service=service)
service = 'cinder.tests.unit.backup.bad_service'
db.backup_update(self.ctxt, backup_id, {'service': service})
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.backup_mgr.restore_backup, self.backup_mgr.restore_backup,
self.ctxt, self.ctxt,
backup_id, backup,
vol_id) vol_id)
vol = db.volume_get(self.ctxt, vol_id) vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'error') self.assertEqual(vol['status'], 'error')
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup')) @mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
@ -315,13 +321,13 @@ class BackupTestCase(BaseBackupTest):
vol_size = 1 vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup', vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size) size=vol_size)
backup_id = self._create_backup_db_entry(status='restoring', backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id) volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id) self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
vol = db.volume_get(self.ctxt, vol_id) vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual(vol['status'], 'available') self.assertEqual(vol['status'], 'available')
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
self.assertTrue(_mock_volume_restore.called) self.assertTrue(_mock_volume_restore.called)
@ -332,10 +338,10 @@ class BackupTestCase(BaseBackupTest):
vol_size = 1 vol_size = 1
vol_id = self._create_volume_db_entry(status='restoring-backup', vol_id = self._create_volume_db_entry(status='restoring-backup',
size=vol_size) size=vol_size)
backup_id = self._create_backup_db_entry(status='restoring', backup = self._create_backup_db_entry(status='restoring',
volume_id=vol_id) volume_id=vol_id)
self.backup_mgr.restore_backup(self.ctxt, backup_id, vol_id) self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
self.assertEqual(2, notify.call_count) self.assertEqual(2, notify.call_count)
def test_delete_backup_with_bad_backup_status(self): def test_delete_backup_with_bad_backup_status(self):
@ -343,26 +349,26 @@ class BackupTestCase(BaseBackupTest):
with a bad status. with a bad status.
""" """
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available', backup = self._create_backup_db_entry(status='available',
volume_id=vol_id) volume_id=vol_id)
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup, self.backup_mgr.delete_backup,
self.ctxt, self.ctxt,
backup_id) backup)
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_error(self): def test_delete_backup_with_error(self):
"""Test error handling when an error occurs during backup deletion.""" """Test error handling when an error occurs during backup deletion."""
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting', backup = self._create_backup_db_entry(status='deleting',
display_name='fail_on_delete', display_name='fail_on_delete',
volume_id=vol_id) volume_id=vol_id)
self.assertRaises(IOError, self.assertRaises(IOError,
self.backup_mgr.delete_backup, self.backup_mgr.delete_backup,
self.ctxt, self.ctxt,
backup_id) backup)
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_bad_service(self): def test_delete_backup_with_bad_service(self):
@ -370,15 +376,15 @@ class BackupTestCase(BaseBackupTest):
with a different service to that used to create the backup. with a different service to that used to create the backup.
""" """
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting', service = 'cinder.tests.backup.bad_service'
volume_id=vol_id) backup = self._create_backup_db_entry(status='deleting',
service = 'cinder.tests.unit.backup.bad_service' volume_id=vol_id,
db.backup_update(self.ctxt, backup_id, {'service': service}) service=service)
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.backup_mgr.delete_backup, self.backup_mgr.delete_backup,
self.ctxt, self.ctxt,
backup_id) backup)
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
def test_delete_backup_with_no_service(self): def test_delete_backup_with_no_service(self):
@ -386,24 +392,25 @@ class BackupTestCase(BaseBackupTest):
with no service defined for that backup, relates to bug #1162908 with no service defined for that backup, relates to bug #1162908
""" """
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting', backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id) volume_id=vol_id)
db.backup_update(self.ctxt, backup_id, {'service': None}) backup.service = None
self.backup_mgr.delete_backup(self.ctxt, backup_id) backup.save()
self.backup_mgr.delete_backup(self.ctxt, backup)
def test_delete_backup(self): def test_delete_backup(self):
"""Test normal backup deletion.""" """Test normal backup deletion."""
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting', backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id) volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup_id) self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertRaises(exception.BackupNotFound, self.assertRaises(exception.BackupNotFound,
db.backup_get, db.backup_get,
self.ctxt, self.ctxt,
backup_id) backup.id)
ctxt_read_deleted = context.get_admin_context('yes') ctxt_read_deleted = context.get_admin_context('yes')
backup = db.backup_get(ctxt_read_deleted, backup_id) backup = db.backup_get(ctxt_read_deleted, backup.id)
self.assertEqual(backup.deleted, True) self.assertEqual(backup.deleted, True)
self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at) self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at)
self.assertEqual(backup.status, 'deleted') self.assertEqual(backup.status, 'deleted')
@ -412,9 +419,9 @@ class BackupTestCase(BaseBackupTest):
def test_delete_backup_with_notify(self, notify): def test_delete_backup_with_notify(self, notify):
"""Test normal backup deletion with notifications.""" """Test normal backup deletion with notifications."""
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='deleting', backup = self._create_backup_db_entry(status='deleting',
volume_id=vol_id) volume_id=vol_id)
self.backup_mgr.delete_backup(self.ctxt, backup_id) self.backup_mgr.delete_backup(self.ctxt, backup)
self.assertEqual(2, notify.call_count) self.assertEqual(2, notify.call_count)
def test_list_backup(self): def test_list_backup(self):
@ -425,7 +432,7 @@ class BackupTestCase(BaseBackupTest):
b2 = self._create_backup_db_entry(project_id='project1') b2 = self._create_backup_db_entry(project_id='project1')
backups = db.backup_get_all_by_project(self.ctxt, 'project1') backups = db.backup_get_all_by_project(self.ctxt, 'project1')
self.assertEqual(len(backups), 1) self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, b2) self.assertEqual(backups[0].id, b2.id)
def test_backup_get_all_by_project_with_deleted(self): def test_backup_get_all_by_project_with_deleted(self):
"""Test deleted backups don't show up in backup_get_all_by_project. """Test deleted backups don't show up in backup_get_all_by_project.
@ -434,13 +441,13 @@ class BackupTestCase(BaseBackupTest):
backups = db.backup_get_all_by_project(self.ctxt, 'fake') backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 0) self.assertEqual(len(backups), 0)
backup_id_keep = self._create_backup_db_entry() backup_keep = self._create_backup_db_entry()
backup_id = self._create_backup_db_entry() backup = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup_id) db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_project(self.ctxt, 'fake') backups = db.backup_get_all_by_project(self.ctxt, 'fake')
self.assertEqual(len(backups), 1) self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, backup_id_keep) self.assertEqual(backups[0].id, backup_keep.id)
ctxt_read_deleted = context.get_admin_context('yes') ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake') backups = db.backup_get_all_by_project(ctxt_read_deleted, 'fake')
@ -453,13 +460,13 @@ class BackupTestCase(BaseBackupTest):
backups = db.backup_get_all_by_host(self.ctxt, 'testhost') backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 0) self.assertEqual(len(backups), 0)
backup_id_keep = self._create_backup_db_entry() backup_keep = self._create_backup_db_entry()
backup_id = self._create_backup_db_entry() backup = self._create_backup_db_entry()
db.backup_destroy(self.ctxt, backup_id) db.backup_destroy(self.ctxt, backup.id)
backups = db.backup_get_all_by_host(self.ctxt, 'testhost') backups = db.backup_get_all_by_host(self.ctxt, 'testhost')
self.assertEqual(len(backups), 1) self.assertEqual(len(backups), 1)
self.assertEqual(backups[0].id, backup_id_keep) self.assertEqual(backups[0].id, backup_keep.id)
ctxt_read_deleted = context.get_admin_context('yes') ctxt_read_deleted = context.get_admin_context('yes')
backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost') backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost')
@ -478,14 +485,15 @@ class BackupTestCase(BaseBackupTest):
record with a different service to that used to create the backup. record with a different service to that used to create the backup.
""" """
vol_id = self._create_volume_db_entry(size=1) vol_id = self._create_volume_db_entry(size=1)
backup_id = self._create_backup_db_entry(status='available', service = 'cinder.tests.backup.bad_service'
volume_id=vol_id) backup = self._create_backup_db_entry(status='available',
service = 'cinder.tests.unit.backup.bad_service' volume_id=vol_id,
db.backup_update(self.ctxt, backup_id, {'service': service}) service=service)
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record, self.backup_mgr.export_record,
self.ctxt, self.ctxt,
backup_id) backup)
def test_export_record_with_bad_backup_status(self): def test_export_record_with_bad_backup_status(self):
"""Test error handling when exporting a backup record with a backup """Test error handling when exporting a backup record with a backup
@ -493,22 +501,22 @@ class BackupTestCase(BaseBackupTest):
""" """
vol_id = self._create_volume_db_entry(status='available', vol_id = self._create_volume_db_entry(status='available',
size=1) size=1)
backup_id = self._create_backup_db_entry(status='error', backup = self._create_backup_db_entry(status='error',
volume_id=vol_id) volume_id=vol_id)
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.backup_mgr.export_record, self.backup_mgr.export_record,
self.ctxt, self.ctxt,
backup_id) backup)
def test_export_record(self): def test_export_record(self):
"""Test normal backup record export.""" """Test normal backup record export."""
vol_size = 1 vol_size = 1
vol_id = self._create_volume_db_entry(status='available', vol_id = self._create_volume_db_entry(status='available',
size=vol_size) size=vol_size)
backup_id = self._create_backup_db_entry(status='available', backup = self._create_backup_db_entry(status='available',
volume_id=vol_id) volume_id=vol_id)
export = self.backup_mgr.export_record(self.ctxt, backup_id) export = self.backup_mgr.export_record(self.ctxt, backup)
self.assertEqual(export['backup_service'], CONF.backup_driver) self.assertEqual(export['backup_service'], CONF.backup_driver)
self.assertTrue('backup_url' in export) self.assertTrue('backup_url' in export)
@ -527,7 +535,7 @@ class BackupTestCase(BaseBackupTest):
export['backup_service'], export['backup_service'],
export['backup_url'], export['backup_url'],
backup_hosts) backup_hosts)
backup = db.backup_get(self.ctxt, imported_record) backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size) self.assertEqual(backup['size'], vol_size)
@ -583,7 +591,7 @@ class BackupTestCase(BaseBackupTest):
export['backup_url'], export['backup_url'],
backup_hosts) backup_hosts)
self.assertTrue(_mock_record_import.called) self.assertTrue(_mock_record_import.called)
backup = db.backup_get(self.ctxt, imported_record) backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
@ -617,7 +625,7 @@ class BackupTestCaseWithVerify(BaseBackupTest):
export['backup_service'], export['backup_service'],
export['backup_url'], export['backup_url'],
backup_hosts) backup_hosts)
backup = db.backup_get(self.ctxt, imported_record) backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
self.assertEqual(backup['size'], vol_size) self.assertEqual(backup['size'], vol_size)
@ -646,24 +654,24 @@ class BackupTestCaseWithVerify(BaseBackupTest):
export['backup_url'], export['backup_url'],
backup_hosts) backup_hosts)
self.assertTrue(_mock_record_verify.called) self.assertTrue(_mock_record_verify.called)
backup = db.backup_get(self.ctxt, imported_record) backup = db.backup_get(self.ctxt, imported_record.id)
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_nonrestoring_to_available( def test_backup_reset_status_from_nonrestoring_to_available(
self): self):
vol_id = self._create_volume_db_entry(status='available', vol_id = self._create_volume_db_entry(status='available',
size=1) size=1)
backup_id = self._create_backup_db_entry(status='error', backup = self._create_backup_db_entry(status='error',
volume_id=vol_id) volume_id=vol_id)
with mock.patch.object(manager.BackupManager, with mock.patch.object(manager.BackupManager,
'_map_service_to_driver') as \ '_map_service_to_driver') as \
mock_map_service_to_driver: mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \ mock_map_service_to_driver.return_value = \
fake_service.get_backup_driver(self.ctxt) fake_service.get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt, self.backup_mgr.reset_status(self.ctxt,
backup_id, backup,
'available') 'available')
backup = db.backup_get(self.ctxt, backup_id) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_available_invalid_backup(self): def test_backup_reset_status_to_available_invalid_backup(self):
@ -671,11 +679,8 @@ class BackupTestCaseWithVerify(BaseBackupTest):
'host': 'test', 'host': 'test',
'provider_location': '', 'provider_location': '',
'size': 1}) 'size': 1})
backup = db.backup_create(self.ctxt, backup = self._create_backup_db_entry(status='error',
{'status': 'error', volume_id=volume['id'])
'service':
CONF.backup_driver,
'volume_id': volume['id']})
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' % _mock_backup_verify_class = ('%s.%s.%s' %
@ -690,9 +695,9 @@ class BackupTestCaseWithVerify(BaseBackupTest):
self.assertRaises(exception.BackupVerifyUnsupportedDriver, self.assertRaises(exception.BackupVerifyUnsupportedDriver,
self.backup_mgr.reset_status, self.backup_mgr.reset_status,
self.ctxt, self.ctxt,
backup['id'], backup,
'available') 'available')
backup = db.backup_get(self.ctxt, backup['id']) backup = db.backup_get(self.ctxt, backup.id)
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_restoring_to_available(self): def test_backup_reset_status_from_restoring_to_available(self):
@ -701,16 +706,11 @@ class BackupTestCaseWithVerify(BaseBackupTest):
'host': 'test', 'host': 'test',
'provider_location': '', 'provider_location': '',
'size': 1}) 'size': 1})
backup = db.backup_create(self.ctxt, backup = self._create_backup_db_entry(status='restoring',
{'status': 'restoring', volume_id=volume['id'])
'service':
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt, self.backup_mgr.reset_status(self.ctxt, backup, 'available')
backup['id'], backup = db.backup_get(self.ctxt, backup.id)
'available')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'available') self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_error(self): def test_backup_reset_status_to_error(self):
@ -719,13 +719,8 @@ class BackupTestCaseWithVerify(BaseBackupTest):
'host': 'test', 'host': 'test',
'provider_location': '', 'provider_location': '',
'size': 1}) 'size': 1})
backup = db.backup_create(self.ctxt, backup = self._create_backup_db_entry(status='creating',
{'status': 'creating', volume_id=volume['id'])
'service': self.backup_mgr.reset_status(self.ctxt, backup, 'error')
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt,
backup['id'],
'error')
backup = db.backup_get(self.ctxt, backup['id']) backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error') self.assertEqual(backup['status'], 'error')

View File

@ -32,6 +32,7 @@ from cinder import context
from cinder import db from cinder import db
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _
from cinder import objects
from cinder import test from cinder import test
from cinder.volume.drivers import rbd as rbddriver from cinder.volume.drivers import rbd as rbddriver
@ -105,8 +106,10 @@ class BackupCephTestCase(test.TestCase):
vol = {'id': id, 'size': size, 'status': 'available'} vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id'] return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backupid, volid, size): def _create_backup_db_entry(self, backupid, volid, size,
backup = {'id': backupid, 'size': size, 'volume_id': volid} userid='user-id', projectid='project-id'):
backup = {'id': backupid, 'size': size, 'volume_id': volid,
'user_id': userid, 'project_id': projectid}
return db.backup_create(self.ctxt, backup)['id'] return db.backup_create(self.ctxt, backup)['id']
def time_inc(self): def time_inc(self):
@ -157,7 +160,7 @@ class BackupCephTestCase(test.TestCase):
self.backup_id = str(uuid.uuid4()) self.backup_id = str(uuid.uuid4())
self._create_backup_db_entry(self.backup_id, self.volume_id, self._create_backup_db_entry(self.backup_id, self.volume_id,
self.volume_size) self.volume_size)
self.backup = db.backup_get(self.ctxt, self.backup_id) self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
# Create alternate volume. # Create alternate volume.
self.alt_volume_id = str(uuid.uuid4()) self.alt_volume_id = str(uuid.uuid4())
@ -596,7 +599,7 @@ class BackupCephTestCase(test.TestCase):
backup_id = str(uuid.uuid4()) backup_id = str(uuid.uuid4())
self._create_backup_db_entry(backup_id, volume_id, 1) self._create_backup_db_entry(backup_id, volume_id, 1)
backup = db.backup_get(self.ctxt, backup_id) backup = objects.Backup.get_by_id(self.ctxt, backup_id)
self.assertRaises(exception.InvalidParameterValue, self.service.backup, self.assertRaises(exception.InvalidParameterValue, self.service.backup,
backup, self.volume_file) backup, self.volume_file)

View File

@ -23,6 +23,7 @@ from cinder.backup import driver
from cinder import context from cinder import context
from cinder import db from cinder import db
from cinder import exception from cinder import exception
from cinder import objects
from cinder import test from cinder import test
from cinder.tests.unit.backup import fake_service from cinder.tests.unit.backup import fake_service
@ -40,8 +41,10 @@ class BackupBaseDriverTestCase(test.TestCase):
vol = {'id': id, 'size': size, 'status': 'available'} vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id'] return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backupid, volid, size): def _create_backup_db_entry(self, backupid, volid, size,
backup = {'id': backupid, 'size': size, 'volume_id': volid} userid='user-id', projectid='project-id'):
backup = {'id': backupid, 'size': size, 'volume_id': volid,
'user_id': userid, 'project_id': projectid}
return db.backup_create(self.ctxt, backup)['id'] return db.backup_create(self.ctxt, backup)['id']
def setUp(self): def setUp(self):
@ -53,7 +56,7 @@ class BackupBaseDriverTestCase(test.TestCase):
self._create_backup_db_entry(self.backup_id, self.volume_id, 1) self._create_backup_db_entry(self.backup_id, self.volume_id, 1)
self._create_volume_db_entry(self.volume_id, 1) self._create_volume_db_entry(self.volume_id, 1)
self.backup = db.backup_get(self.ctxt, self.backup_id) self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
self.driver = fake_service.FakeBackupService(self.ctxt) self.driver = fake_service.FakeBackupService(self.ctxt)
def test_get_metadata(self): def test_get_metadata(self):

View File

@ -35,6 +35,7 @@ from cinder import context
from cinder import db from cinder import db
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _
from cinder import objects
from cinder import test from cinder import test
from cinder.tests.unit.backup import fake_swift_client from cinder.tests.unit.backup import fake_swift_client
from cinder.tests.unit.backup import fake_swift_client2 from cinder.tests.unit.backup import fake_swift_client2
@ -69,7 +70,10 @@ class BackupSwiftTestCase(test.TestCase):
'size': 1, 'size': 1,
'container': container, 'container': container,
'volume_id': '1234-5678-1234-8888', 'volume_id': '1234-5678-1234-8888',
'parent_id': parent_id} 'parent_id': parent_id,
'user_id': 'user-id',
'project_id': 'project-id',
}
return db.backup_create(self.ctxt, backup)['id'] return db.backup_create(self.ctxt, backup)['id']
def setUp(self): def setUp(self):
@ -127,7 +131,7 @@ class BackupSwiftTestCase(test.TestCase):
self.flags(backup_compression_algorithm='none') self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
def test_backup_bz2(self): def test_backup_bz2(self):
@ -135,7 +139,7 @@ class BackupSwiftTestCase(test.TestCase):
self.flags(backup_compression_algorithm='bz2') self.flags(backup_compression_algorithm='bz2')
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
def test_backup_zlib(self): def test_backup_zlib(self):
@ -143,16 +147,16 @@ class BackupSwiftTestCase(test.TestCase):
self.flags(backup_compression_algorithm='zlib') self.flags(backup_compression_algorithm='zlib')
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
def test_backup_default_container(self): def test_backup_default_container(self):
self._create_backup_db_entry(container=None) self._create_backup_db_entry(container=None)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], 'volumebackups') self.assertEqual(backup['container'], 'volumebackups')
@mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.' @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.'
@ -168,7 +172,7 @@ class BackupSwiftTestCase(test.TestCase):
CONF.set_override("backup_swift_enable_progress_timer", False) CONF.set_override("backup_swift_enable_progress_timer", False)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called) self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called) self.assertTrue(_send_progress_end.called)
@ -180,7 +184,7 @@ class BackupSwiftTestCase(test.TestCase):
CONF.set_override("backup_object_number_per_notification", 10) CONF.set_override("backup_object_number_per_notification", 10)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called) self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called) self.assertTrue(_send_progress_end.called)
@ -193,7 +197,7 @@ class BackupSwiftTestCase(test.TestCase):
CONF.set_override("backup_swift_enable_progress_timer", True) CONF.set_override("backup_swift_enable_progress_timer", True)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called) self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called) self.assertTrue(_send_progress_end.called)
@ -203,9 +207,9 @@ class BackupSwiftTestCase(test.TestCase):
self._create_backup_db_entry(container=container_name) self._create_backup_db_entry(container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
def test_backup_shafile(self): def test_backup_shafile(self):
@ -230,9 +234,9 @@ class BackupSwiftTestCase(test.TestCase):
fake_swift_client2.FakeSwiftClient2.Connection) fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
# Verify sha contents # Verify sha contents
@ -262,9 +266,9 @@ class BackupSwiftTestCase(test.TestCase):
fake_swift_client2.FakeSwiftClient2.Connection) fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents # Create incremental backup with no change to contents
@ -274,9 +278,9 @@ class BackupSwiftTestCase(test.TestCase):
fake_swift_client2.FakeSwiftClient2.Connection) fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file) service.backup(deltabackup, self.volume_file)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name) self.assertEqual(deltabackup['container'], container_name)
# Compare shas from both files # Compare shas from both files
@ -311,9 +315,9 @@ class BackupSwiftTestCase(test.TestCase):
fake_swift_client2.FakeSwiftClient2.Connection) fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents # Create incremental backup with no change to contents
@ -328,9 +332,9 @@ class BackupSwiftTestCase(test.TestCase):
fake_swift_client2.FakeSwiftClient2.Connection) fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file) service.backup(deltabackup, self.volume_file)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name) self.assertEqual(deltabackup['container'], container_name)
content1 = service._read_sha256file(backup) content1 = service._read_sha256file(backup)
@ -365,9 +369,9 @@ class BackupSwiftTestCase(test.TestCase):
fake_swift_client2.FakeSwiftClient2.Connection) fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name) self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents # Create incremental backup with no change to contents
@ -382,9 +386,9 @@ class BackupSwiftTestCase(test.TestCase):
fake_swift_client2.FakeSwiftClient2.Connection) fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file) service.backup(deltabackup, self.volume_file)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name) self.assertEqual(deltabackup['container'], container_name)
# Verify that two shas are changed at index 16 and 20 # Verify that two shas are changed at index 16 and 20
@ -398,7 +402,7 @@ class BackupSwiftTestCase(test.TestCase):
self._create_backup_db_entry(container=container_name) self._create_backup_db_entry(container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed, self.assertRaises(exception.SwiftConnectionFailed,
service.backup, service.backup,
backup, self.volume_file) backup, self.volume_file)
@ -414,7 +418,7 @@ class BackupSwiftTestCase(test.TestCase):
self.flags(backup_compression_algorithm='none') self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta): def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake')) raise exception.BackupDriverException(message=_('fake'))
@ -439,7 +443,7 @@ class BackupSwiftTestCase(test.TestCase):
self.flags(backup_compression_algorithm='none') self.flags(backup_compression_algorithm='none')
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta): def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake')) raise exception.BackupDriverException(message=_('fake'))
@ -464,7 +468,7 @@ class BackupSwiftTestCase(test.TestCase):
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file: with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, '1234-5678-1234-8888', volume_file) service.restore(backup, '1234-5678-1234-8888', volume_file)
def test_restore_delta(self): def test_restore_delta(self):
@ -492,7 +496,7 @@ class BackupSwiftTestCase(test.TestCase):
fake_swift_client2.FakeSwiftClient2.Connection) fake_swift_client2.FakeSwiftClient2.Connection)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0) self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file) service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents # Create incremental backup with no change to contents
@ -504,12 +508,12 @@ class BackupSwiftTestCase(test.TestCase):
self._create_backup_db_entry(container=container_name, backup_id=124, self._create_backup_db_entry(container=container_name, backup_id=124,
parent_id=123) parent_id=123)
self.volume_file.seek(0) self.volume_file.seek(0)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file, True) service.backup(deltabackup, self.volume_file, True)
deltabackup = db.backup_get(self.ctxt, 124) deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
with tempfile.NamedTemporaryFile() as restored_file: with tempfile.NamedTemporaryFile() as restored_file:
backup = db.backup_get(self.ctxt, 124) backup = objects.Backup.get_by_id(self.ctxt, 124)
service.restore(backup, '1234-5678-1234-8888', service.restore(backup, '1234-5678-1234-8888',
restored_file) restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name, self.assertTrue(filecmp.cmp(self.volume_file.name,
@ -521,7 +525,7 @@ class BackupSwiftTestCase(test.TestCase):
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file: with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed, self.assertRaises(exception.SwiftConnectionFailed,
service.restore, service.restore,
backup, '1234-5678-1234-8888', volume_file) backup, '1234-5678-1234-8888', volume_file)
@ -532,7 +536,7 @@ class BackupSwiftTestCase(test.TestCase):
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file: with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
service.restore, service.restore,
backup, '1234-5678-1234-8888', volume_file) backup, '1234-5678-1234-8888', volume_file)
@ -540,14 +544,14 @@ class BackupSwiftTestCase(test.TestCase):
def test_delete(self): def test_delete(self):
self._create_backup_db_entry() self._create_backup_db_entry()
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
service.delete(backup) service.delete(backup)
def test_delete_wraps_socket_error(self): def test_delete_wraps_socket_error(self):
container_name = 'socket_error_on_delete' container_name = 'socket_error_on_delete'
self._create_backup_db_entry(container=container_name) self._create_backup_db_entry(container=container_name)
service = swift_dr.SwiftBackupDriver(self.ctxt) service = swift_dr.SwiftBackupDriver(self.ctxt)
backup = db.backup_get(self.ctxt, 123) backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed, self.assertRaises(exception.SwiftConnectionFailed,
service.delete, service.delete,
backup) backup)

View File

@ -29,6 +29,7 @@ from cinder.backup.drivers import tsm
from cinder import context from cinder import context
from cinder import db from cinder import db
from cinder import exception from cinder import exception
from cinder import objects
from cinder import test from cinder import test
from cinder import utils from cinder import utils
@ -260,7 +261,10 @@ class BackupTSMTestCase(test.TestCase):
'size': 1, 'size': 1,
'container': 'test-container', 'container': 'test-container',
'volume_id': '1234-5678-1234-8888', 'volume_id': '1234-5678-1234-8888',
'service_metadata': service_metadata} 'service_metadata': service_metadata,
'user_id': 'user-id',
'project_id': 'project-id',
}
return db.backup_create(self.ctxt, backup)['id'] return db.backup_create(self.ctxt, backup)['id']
def test_backup_image(self): def test_backup_image(self):
@ -277,13 +281,13 @@ class BackupTSMTestCase(test.TestCase):
with open(VOLUME_PATH, 'rw') as volume_file: with open(VOLUME_PATH, 'rw') as volume_file:
# Create two backups of the volume # Create two backups of the volume
backup1 = db.backup_get(self.ctxt, backup_id1) backup1 = objects.Backup.get_by_id(self.ctxt, backup_id1)
self.driver.backup(backup1, volume_file) self.driver.backup(backup1, volume_file)
backup2 = db.backup_get(self.ctxt, backup_id2) backup2 = objects.Backup.get_by_id(self.ctxt, backup_id2)
self.driver.backup(backup2, volume_file) self.driver.backup(backup2, volume_file)
# Create a backup that fails # Create a backup that fails
fail_back = db.backup_get(self.ctxt, backup_id3) fail_back = objects.Backup.get_by_id(self.ctxt, backup_id3)
self.sim.error_injection('backup', 'fail') self.sim.error_injection('backup', 'fail')
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.driver.backup, fail_back, volume_file) self.driver.backup, fail_back, volume_file)
@ -309,14 +313,14 @@ class BackupTSMTestCase(test.TestCase):
with open(VOLUME_PATH, 'rw') as volume_file: with open(VOLUME_PATH, 'rw') as volume_file:
# Create two backups of the volume # Create two backups of the volume
backup1 = db.backup_get(self.ctxt, 123) backup1 = objects.Backup.get_by_id(self.ctxt, 123)
self.driver.backup(backup1, volume_file) self.driver.backup(backup1, volume_file)
backup2 = db.backup_get(self.ctxt, 456) backup2 = objects.Backup.get_by_id(self.ctxt, 456)
self.driver.backup(backup2, volume_file) self.driver.backup(backup2, volume_file)
# Create a backup that fails # Create a backup that fails
self._create_backup_db_entry(666, mode) self._create_backup_db_entry(666, mode)
fail_back = db.backup_get(self.ctxt, 666) fail_back = objects.Backup.get_by_id(self.ctxt, 666)
self.sim.error_injection('backup', 'fail') self.sim.error_injection('backup', 'fail')
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.driver.backup, fail_back, volume_file) self.driver.backup, fail_back, volume_file)
@ -340,7 +344,7 @@ class BackupTSMTestCase(test.TestCase):
with open(VOLUME_PATH, 'rw') as volume_file: with open(VOLUME_PATH, 'rw') as volume_file:
# Create two backups of the volume # Create two backups of the volume
backup1 = db.backup_get(self.ctxt, 123) backup1 = objects.Backup.get_by_id(self.ctxt, 123)
self.assertRaises(exception.InvalidBackup, self.assertRaises(exception.InvalidBackup,
self.driver.backup, backup1, volume_file) self.driver.backup, backup1, volume_file)

View File

@ -574,7 +574,9 @@ class TestCinderManageCmd(test.TestCase):
'container': 'fake-container', 'container': 'fake-container',
'status': 'fake-status', 'status': 'fake-status',
'size': 123, 'size': 123,
'object_count': 1} 'object_count': 1,
'volume_id': 'fake-volume-id',
}
backup_get_all.return_value = [backup] backup_get_all.return_value = [backup]
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out: with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s' hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s'
@ -605,7 +607,7 @@ class TestCinderManageCmd(test.TestCase):
backup_cmds.list() backup_cmds.list()
get_admin_context.assert_called_once_with() get_admin_context.assert_called_once_with()
backup_get_all.assert_called_once_with(ctxt) backup_get_all.assert_called_once_with(ctxt, None)
self.assertEqual(expected_out, fake_out.getvalue()) self.assertEqual(expected_out, fake_out.getvalue())
@mock.patch('cinder.utils.service_is_up') @mock.patch('cinder.utils.service_is_up')

View File

@ -64,6 +64,8 @@ objects_ignore_messages = [
"No value passed for parameter 'id' in function call", "No value passed for parameter 'id' in function call",
"Module 'cinder.objects' has no 'Snapshot' member", "Module 'cinder.objects' has no 'Snapshot' member",
"Module 'cinder.objects' has no 'SnapshotList' member", "Module 'cinder.objects' has no 'SnapshotList' member",
"Module 'cinder.objects' has no 'Backup' member",
"Module 'cinder.objects' has no 'BackupList' member",
] ]
objects_ignore_modules = ["cinder/objects/"] objects_ignore_modules = ["cinder/objects/"]