Merge "Scaling backup service"
This commit is contained in:
commit
bcbfe05a95
@ -20,13 +20,13 @@ Handles all requests relating to the volume backups service.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from eventlet import greenthread
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import strutils
|
||||
from pytz import timezone
|
||||
import random
|
||||
|
||||
from cinder.backup import rpcapi as backup_rpcapi
|
||||
from cinder import context
|
||||
@ -39,9 +39,15 @@ import cinder.policy
|
||||
from cinder import quota
|
||||
from cinder import utils
|
||||
import cinder.volume
|
||||
from cinder.volume import utils as volume_utils
|
||||
|
||||
backup_api_opts = [
|
||||
cfg.BoolOpt('backup_use_same_backend',
|
||||
default=False,
|
||||
help='Backup services use same backend.')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(backup_api_opts)
|
||||
LOG = logging.getLogger(__name__)
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
@ -92,9 +98,6 @@ class API(base.Base):
|
||||
backup.host):
|
||||
msg = _('force delete')
|
||||
raise exception.NotSupportedOperation(operation=msg)
|
||||
if not self._is_backup_service_enabled(backup['availability_zone'],
|
||||
backup.host):
|
||||
raise exception.ServiceNotFound(service_id='cinder-backup')
|
||||
|
||||
# Don't allow backup to be deleted if there are incremental
|
||||
# backups dependent on it.
|
||||
@ -104,6 +107,8 @@ class API(base.Base):
|
||||
raise exception.InvalidBackup(reason=msg)
|
||||
|
||||
backup.status = fields.BackupStatus.DELETING
|
||||
backup.host = self._get_available_backup_service_host(
|
||||
backup.host, backup.availability_zone)
|
||||
backup.save()
|
||||
self.backup_rpcapi.delete_backup(context, backup)
|
||||
|
||||
@ -130,6 +135,10 @@ class API(base.Base):
|
||||
|
||||
return backups
|
||||
|
||||
def _az_matched(self, service, availability_zone):
|
||||
return ((not availability_zone) or
|
||||
service.availability_zone == availability_zone)
|
||||
|
||||
def _is_backup_service_enabled(self, availability_zone, host):
|
||||
"""Check if there is a backup service available."""
|
||||
topic = CONF.backup_topic
|
||||
@ -137,12 +146,42 @@ class API(base.Base):
|
||||
services = objects.ServiceList.get_all_by_topic(
|
||||
ctxt, topic, disabled=False)
|
||||
for srv in services:
|
||||
if (srv.availability_zone == availability_zone and
|
||||
if (self._az_matched(srv, availability_zone) and
|
||||
srv.host == host and
|
||||
utils.service_is_up(srv)):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_any_available_backup_service(self, availability_zone):
|
||||
"""Get an available backup service host.
|
||||
|
||||
Get an available backup service host in the specified
|
||||
availability zone.
|
||||
"""
|
||||
services = [srv for srv in self._list_backup_services()]
|
||||
random.shuffle(services)
|
||||
# Get the next running service with matching availability zone.
|
||||
idx = 0
|
||||
while idx < len(services):
|
||||
srv = services[idx]
|
||||
if(self._az_matched(srv, availability_zone) and
|
||||
utils.service_is_up(srv)):
|
||||
return srv.host
|
||||
idx = idx + 1
|
||||
return None
|
||||
|
||||
def _get_available_backup_service_host(self, host, availability_zone):
|
||||
"""Return an appropriate backup service host."""
|
||||
backup_host = None
|
||||
if host and self._is_backup_service_enabled(availability_zone, host):
|
||||
backup_host = host
|
||||
if not backup_host and (not host or CONF.backup_use_same_backend):
|
||||
backup_host = self._get_any_available_backup_service(
|
||||
availability_zone)
|
||||
if not backup_host:
|
||||
raise exception.ServiceNotFound(service_id='cinder-backup')
|
||||
return backup_host
|
||||
|
||||
def _list_backup_services(self):
|
||||
"""List all enabled backup services.
|
||||
|
||||
@ -150,8 +189,14 @@ class API(base.Base):
|
||||
"""
|
||||
topic = CONF.backup_topic
|
||||
ctxt = context.get_admin_context()
|
||||
services = objects.ServiceList.get_all_by_topic(ctxt, topic)
|
||||
return [srv.host for srv in services if not srv.disabled]
|
||||
services = objects.ServiceList.get_all_by_topic(
|
||||
ctxt, topic, disabled=False)
|
||||
return services
|
||||
|
||||
def _list_backup_hosts(self):
|
||||
services = self._list_backup_services()
|
||||
return [srv.host for srv in services
|
||||
if not srv.disabled and utils.service_is_up(srv)]
|
||||
|
||||
def create(self, context, name, description, volume_id,
|
||||
container, incremental=False, availability_zone=None,
|
||||
@ -179,10 +224,8 @@ class API(base.Base):
|
||||
raise exception.InvalidSnapshot(reason=msg)
|
||||
|
||||
previous_status = volume['status']
|
||||
volume_host = volume_utils.extract_host(volume['host'], 'host')
|
||||
if not self._is_backup_service_enabled(volume['availability_zone'],
|
||||
volume_host):
|
||||
raise exception.ServiceNotFound(service_id='cinder-backup')
|
||||
host = self._get_available_backup_service_host(
|
||||
None, volume.availability_zone)
|
||||
|
||||
# Reserve a quota before setting volume status and backup status
|
||||
try:
|
||||
@ -284,7 +327,7 @@ class API(base.Base):
|
||||
'container': container,
|
||||
'parent_id': parent_id,
|
||||
'size': volume['size'],
|
||||
'host': volume_host,
|
||||
'host': host,
|
||||
'snapshot_id': snapshot_id,
|
||||
'data_timestamp': data_timestamp,
|
||||
}
|
||||
@ -364,14 +407,15 @@ class API(base.Base):
|
||||
|
||||
# Setting the status here rather than setting at start and unrolling
|
||||
# for each error condition, it should be a very small window
|
||||
backup.host = self._get_available_backup_service_host(
|
||||
backup.host, backup.availability_zone)
|
||||
backup.status = fields.BackupStatus.RESTORING
|
||||
backup.restore_volume_id = volume.id
|
||||
backup.save()
|
||||
volume_host = volume_utils.extract_host(volume.host, 'host')
|
||||
self.db.volume_update(context, volume_id, {'status':
|
||||
'restoring-backup'})
|
||||
|
||||
self.backup_rpcapi.restore_backup(context, volume_host, backup,
|
||||
self.backup_rpcapi.restore_backup(context, backup.host, backup,
|
||||
volume_id)
|
||||
|
||||
d = {'backup_id': backup_id,
|
||||
@ -391,6 +435,9 @@ class API(base.Base):
|
||||
"""
|
||||
# get backup info
|
||||
backup = self.get(context, backup_id)
|
||||
backup.host = self._get_available_backup_service_host(
|
||||
backup.host, backup.availability_zone)
|
||||
backup.save()
|
||||
# send to manager to do reset operation
|
||||
self.backup_rpcapi.reset_status(ctxt=context, backup=backup,
|
||||
status=status)
|
||||
@ -418,6 +465,10 @@ class API(base.Base):
|
||||
{'ctx': context,
|
||||
'host': backup['host'],
|
||||
'id': backup['id']})
|
||||
|
||||
backup.host = self._get_available_backup_service_host(
|
||||
backup.host, backup.availability_zone)
|
||||
backup.save()
|
||||
export_data = self.backup_rpcapi.export_record(context, backup)
|
||||
|
||||
return export_data
|
||||
@ -502,7 +553,7 @@ class API(base.Base):
|
||||
# We send it to the first backup service host, and the backup manager
|
||||
# on that host will forward it to other hosts on the hosts list if it
|
||||
# cannot support correct service itself.
|
||||
hosts = self._list_backup_services()
|
||||
hosts = self._list_backup_hosts()
|
||||
if len(hosts) == 0:
|
||||
raise exception.ServiceNotFound(service_id=backup_service)
|
||||
|
||||
|
@ -51,6 +51,7 @@ from cinder.objects import fields
|
||||
from cinder import quota
|
||||
from cinder import rpc
|
||||
from cinder import utils
|
||||
from cinder.volume import rpcapi as volume_rpcapi
|
||||
from cinder.volume import utils as volume_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -72,6 +73,8 @@ mapper = {'cinder.backup.services.swift': 'cinder.backup.drivers.swift',
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(backup_manager_opts)
|
||||
CONF.import_opt('use_multipath_for_image_xfer', 'cinder.volume.driver')
|
||||
CONF.import_opt('num_volume_device_scan_tries', 'cinder.volume.driver')
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
|
||||
@ -86,8 +89,8 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
self.service = importutils.import_module(self.driver_name)
|
||||
self.az = CONF.storage_availability_zone
|
||||
self.volume_managers = {}
|
||||
self._setup_volume_drivers()
|
||||
self.backup_rpcapi = backup_rpcapi.BackupAPI()
|
||||
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
||||
super(BackupManager, self).__init__(service_name='backup',
|
||||
*args, **kwargs)
|
||||
|
||||
@ -104,90 +107,6 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
return mapper[service]
|
||||
return service
|
||||
|
||||
@property
|
||||
def driver(self):
|
||||
return self._get_driver()
|
||||
|
||||
def _get_volume_backend(self, host=None, allow_null_host=False):
|
||||
if host is None:
|
||||
if not allow_null_host:
|
||||
msg = _("NULL host not allowed for volume backend lookup.")
|
||||
raise exception.BackupFailedToGetVolumeBackend(msg)
|
||||
else:
|
||||
LOG.debug("Checking hostname '%s' for backend info.", host)
|
||||
part = host.partition('@')
|
||||
if (part[1] == '@') and (part[2] != ''):
|
||||
backend = part[2]
|
||||
LOG.debug("Got backend '%s'.", backend)
|
||||
return backend
|
||||
|
||||
LOG.info(_LI("Backend not found in hostname (%s) so using default."),
|
||||
host)
|
||||
|
||||
if 'default' not in self.volume_managers:
|
||||
# For multi-backend we just pick "first" from volume managers dict
|
||||
return next(iter(self.volume_managers))
|
||||
|
||||
return 'default'
|
||||
|
||||
def _get_manager(self, backend):
|
||||
LOG.debug("Manager requested for volume_backend '%s'.",
|
||||
backend)
|
||||
if backend is None:
|
||||
LOG.debug("Fetching default backend.")
|
||||
backend = self._get_volume_backend(allow_null_host=True)
|
||||
if backend not in self.volume_managers:
|
||||
msg = (_("Volume manager for backend '%s' does not exist.") %
|
||||
(backend))
|
||||
raise exception.BackupFailedToGetVolumeBackend(msg)
|
||||
return self.volume_managers[backend]
|
||||
|
||||
def _get_driver(self, backend=None):
|
||||
LOG.debug("Driver requested for volume_backend '%s'.",
|
||||
backend)
|
||||
if backend is None:
|
||||
LOG.debug("Fetching default backend.")
|
||||
backend = self._get_volume_backend(allow_null_host=True)
|
||||
mgr = self._get_manager(backend)
|
||||
mgr.driver.db = self.db
|
||||
return mgr.driver
|
||||
|
||||
def _setup_volume_drivers(self):
|
||||
if CONF.enabled_backends:
|
||||
for backend in CONF.enabled_backends:
|
||||
host = "%s@%s" % (CONF.host, backend)
|
||||
mgr = importutils.import_object(CONF.volume_manager,
|
||||
host=host,
|
||||
service_name=backend)
|
||||
config = mgr.configuration
|
||||
backend_name = config.safe_get('volume_backend_name')
|
||||
LOG.debug("Registering backend %(backend)s (host=%(host)s "
|
||||
"backend_name=%(backend_name)s).",
|
||||
{'backend': backend, 'host': host,
|
||||
'backend_name': backend_name})
|
||||
self.volume_managers[backend] = mgr
|
||||
else:
|
||||
default = importutils.import_object(CONF.volume_manager)
|
||||
LOG.debug("Registering default backend %s.", default)
|
||||
self.volume_managers['default'] = default
|
||||
|
||||
def _init_volume_driver(self, ctxt, driver):
|
||||
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)."),
|
||||
{'driver_name': driver.__class__.__name__,
|
||||
'version': driver.get_version()})
|
||||
try:
|
||||
driver.do_setup(ctxt)
|
||||
driver.check_for_setup_error()
|
||||
except Exception:
|
||||
LOG.exception(_LE("Error encountered during initialization of "
|
||||
"driver: %(name)s."),
|
||||
{'name': driver.__class__.__name__})
|
||||
# we don't want to continue since we failed
|
||||
# to initialize the driver correctly.
|
||||
return
|
||||
|
||||
driver.set_initialized()
|
||||
|
||||
def _update_backup_error(self, backup, context, err):
|
||||
backup.status = fields.BackupStatus.ERROR
|
||||
backup.fail_reason = err
|
||||
@ -197,9 +116,6 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
"""Run initialization needed for a standalone service."""
|
||||
ctxt = context.get_admin_context()
|
||||
|
||||
for mgr in self.volume_managers.values():
|
||||
self._init_volume_driver(ctxt, mgr.driver)
|
||||
|
||||
try:
|
||||
self._cleanup_incomplete_backup_operations(ctxt)
|
||||
except Exception:
|
||||
@ -209,14 +125,6 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
|
||||
def _cleanup_incomplete_backup_operations(self, ctxt):
|
||||
LOG.info(_LI("Cleaning up incomplete backup operations."))
|
||||
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
|
||||
|
||||
for volume in volumes:
|
||||
try:
|
||||
self._cleanup_one_volume(ctxt, volume)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Problem cleaning up volume %(vol)s."),
|
||||
{'vol': volume['id']})
|
||||
|
||||
# TODO(smulcahy) implement full resume of backup and restore
|
||||
# operations on restart (rather than simply resetting)
|
||||
@ -236,11 +144,8 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
{'bkup': backup['id']})
|
||||
|
||||
def _cleanup_one_volume(self, ctxt, volume):
|
||||
volume_host = volume_utils.extract_host(volume['host'], 'backend')
|
||||
backend = self._get_volume_backend(host=volume_host)
|
||||
mgr = self._get_manager(backend)
|
||||
if volume['status'] == 'backing-up':
|
||||
self._detach_all_attachments(ctxt, mgr, volume)
|
||||
self._detach_all_attachments(ctxt, volume)
|
||||
LOG.info(_LI('Resetting volume %(vol_id)s to previous '
|
||||
'status %(status)s (was backing-up).'),
|
||||
{'vol_id': volume['id'],
|
||||
@ -248,7 +153,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
self.db.volume_update(ctxt, volume['id'],
|
||||
{'status': volume['previous_status']})
|
||||
elif volume['status'] == 'restoring-backup':
|
||||
self._detach_all_attachments(ctxt, mgr, volume)
|
||||
self._detach_all_attachments(ctxt, volume)
|
||||
LOG.info(_LI('setting volume %s to error_restoring '
|
||||
'(was restoring-backup).'), volume['id'])
|
||||
self.db.volume_update(ctxt, volume['id'],
|
||||
@ -258,15 +163,22 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
if backup['status'] == fields.BackupStatus.CREATING:
|
||||
LOG.info(_LI('Resetting backup %s to error (was creating).'),
|
||||
backup['id'])
|
||||
|
||||
volume = objects.Volume.get_by_id(ctxt, backup.volume_id)
|
||||
self._cleanup_one_volume(ctxt, volume)
|
||||
|
||||
err = 'incomplete backup reset on manager restart'
|
||||
self._update_backup_error(backup, ctxt, err)
|
||||
if backup['status'] == fields.BackupStatus.RESTORING:
|
||||
elif backup['status'] == fields.BackupStatus.RESTORING:
|
||||
LOG.info(_LI('Resetting backup %s to '
|
||||
'available (was restoring).'),
|
||||
backup['id'])
|
||||
volume = objects.Volume.get_by_id(ctxt, backup.restore_volume_id)
|
||||
self._cleanup_one_volume(ctxt, volume)
|
||||
|
||||
backup.status = fields.BackupStatus.AVAILABLE
|
||||
backup.save()
|
||||
if backup['status'] == fields.BackupStatus.DELETING:
|
||||
elif backup['status'] == fields.BackupStatus.DELETING:
|
||||
LOG.info(_LI('Resuming delete on backup: %s.'), backup['id'])
|
||||
if CONF.backup_service_inithost_offload:
|
||||
# Offload all the pending backup delete operations to the
|
||||
@ -277,97 +189,87 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
# By default, delete backups sequentially
|
||||
self.delete_backup(ctxt, backup)
|
||||
|
||||
def is_working(self):
|
||||
"""Return if Manager is ready to accept requests.
|
||||
|
||||
This is to inform Service class that in case of volume manager(s)
|
||||
initialization failure the manager is actually down and
|
||||
may not accept some or all requests.
|
||||
"""
|
||||
return all(mgr.is_working() for mgr in self.volume_managers.values())
|
||||
|
||||
def _detach_all_attachments(self, ctxt, mgr, volume):
|
||||
def _detach_all_attachments(self, ctxt, volume):
|
||||
attachments = volume['volume_attachment'] or []
|
||||
for attachment in attachments:
|
||||
if (attachment['attached_host'] == self.host and
|
||||
attachment['instance_uuid'] is None):
|
||||
try:
|
||||
mgr.detach_volume(ctxt, volume['id'],
|
||||
attachment['id'])
|
||||
rpcapi = self.volume_rpcapi
|
||||
rpcapi.detach_volume(ctxt, volume, attachment['id'])
|
||||
except Exception:
|
||||
LOG.exception(_LE("Detach attachment %(attach_id)s"
|
||||
" failed."),
|
||||
{'attach_id': attachment['id']},
|
||||
resource=volume)
|
||||
|
||||
def _delete_temp_volume(self, ctxt, backup):
|
||||
try:
|
||||
temp_volume = objects.Volume.get_by_id(
|
||||
ctxt, backup.temp_volume_id)
|
||||
self.volume_rpcapi.delete_volume(ctxt, temp_volume)
|
||||
except exception.VolumeNotFound:
|
||||
LOG.debug("Could not find temp volume %(vol)s to clean up "
|
||||
"for backup %(backup)s.",
|
||||
{'vol': backup.temp_volume_id,
|
||||
'backup': backup.id})
|
||||
backup.temp_volume_id = None
|
||||
backup.save()
|
||||
|
||||
def _delete_temp_snapshot(self, ctxt, backup):
|
||||
try:
|
||||
temp_snapshot = objects.Snapshot.get_by_id(
|
||||
ctxt, backup.temp_snapshot_id)
|
||||
volume = objects.Volume.get_by_id(
|
||||
ctxt, backup.volume_id)
|
||||
# The temp snapshot should be deleted directly thru the
|
||||
# volume driver, not thru the volume manager.
|
||||
self.volume_rpcapi.delete_snapshot(ctxt, temp_snapshot,
|
||||
volume.host)
|
||||
except exception.SnapshotNotFound:
|
||||
LOG.debug("Could not find temp snapshot %(snap)s to clean "
|
||||
"up for backup %(backup)s.",
|
||||
{'snap': backup.temp_snapshot_id,
|
||||
'backup': backup.id})
|
||||
backup.temp_snapshot_id = None
|
||||
backup.save()
|
||||
|
||||
def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup):
|
||||
# NOTE(xyang): If the service crashes or gets restarted during the
|
||||
# backup operation, there could be temporary volumes or snapshots
|
||||
# that are not deleted. Make sure any temporary volumes or snapshots
|
||||
# create by the backup job are deleted when service is started.
|
||||
try:
|
||||
volume = self.db.volume_get(ctxt, backup.volume_id)
|
||||
volume_host = volume_utils.extract_host(volume['host'],
|
||||
'backend')
|
||||
backend = self._get_volume_backend(host=volume_host)
|
||||
mgr = self._get_manager(backend)
|
||||
except (KeyError, exception.VolumeNotFound):
|
||||
LOG.debug("Could not find a volume to clean up for "
|
||||
"backup %s.", backup.id)
|
||||
return
|
||||
|
||||
if (backup.temp_volume_id
|
||||
and backup.status == fields.BackupStatus.ERROR):
|
||||
try:
|
||||
temp_volume = self.db.volume_get(ctxt,
|
||||
backup.temp_volume_id)
|
||||
# The temp volume should be deleted directly thru the
|
||||
# the volume driver, not thru the volume manager.
|
||||
mgr.driver.delete_volume(temp_volume)
|
||||
self.db.volume_destroy(ctxt, temp_volume['id'])
|
||||
except exception.VolumeNotFound:
|
||||
LOG.debug("Could not find temp volume %(vol)s to clean up "
|
||||
"for backup %(backup)s.",
|
||||
{'vol': backup.temp_volume_id,
|
||||
'backup': backup.id})
|
||||
backup.temp_volume_id = None
|
||||
backup.save()
|
||||
self._delete_temp_volume(ctxt, backup)
|
||||
|
||||
if (backup.temp_snapshot_id
|
||||
and backup.status == fields.BackupStatus.ERROR):
|
||||
try:
|
||||
temp_snapshot = objects.Snapshot.get_by_id(
|
||||
ctxt, backup.temp_snapshot_id)
|
||||
# The temp snapshot should be deleted directly thru the
|
||||
# volume driver, not thru the volume manager.
|
||||
mgr.driver.delete_snapshot(temp_snapshot)
|
||||
with temp_snapshot.obj_as_admin():
|
||||
self.db.volume_glance_metadata_delete_by_snapshot(
|
||||
ctxt, temp_snapshot.id)
|
||||
temp_snapshot.destroy()
|
||||
except exception.SnapshotNotFound:
|
||||
LOG.debug("Could not find temp snapshot %(snap)s to clean "
|
||||
"up for backup %(backup)s.",
|
||||
{'snap': backup.temp_snapshot_id,
|
||||
'backup': backup.id})
|
||||
backup.temp_snapshot_id = None
|
||||
backup.save()
|
||||
self._delete_temp_snapshot(ctxt, backup)
|
||||
|
||||
def _cleanup_temp_volumes_snapshots_when_backup_created(
|
||||
self, ctxt, backup):
|
||||
# Delete temp volumes or snapshots when backup creation is completed.
|
||||
if backup.temp_volume_id:
|
||||
self._delete_temp_volume(ctxt, backup)
|
||||
|
||||
if backup.temp_snapshot_id:
|
||||
self._delete_temp_snapshot(ctxt, backup)
|
||||
|
||||
def create_backup(self, context, backup):
|
||||
"""Create volume backups using configured backup service."""
|
||||
volume_id = backup.volume_id
|
||||
volume = self.db.volume_get(context, volume_id)
|
||||
volume = objects.Volume.get_by_id(context, volume_id)
|
||||
previous_status = volume.get('previous_status', None)
|
||||
LOG.info(_LI('Create backup started, backup: %(backup_id)s '
|
||||
'volume: %(volume_id)s.'),
|
||||
{'backup_id': backup.id, 'volume_id': volume_id})
|
||||
|
||||
self._notify_about_backup_usage(context, backup, "create.start")
|
||||
volume_host = volume_utils.extract_host(volume['host'], 'backend')
|
||||
backend = self._get_volume_backend(host=volume_host)
|
||||
|
||||
backup.host = self.host
|
||||
backup.service = self.driver_name
|
||||
backup.availability_zone = self.az
|
||||
backup.save()
|
||||
|
||||
expected_status = 'backing-up'
|
||||
@ -394,15 +296,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
|
||||
try:
|
||||
# NOTE(flaper87): Verify the driver is enabled
|
||||
# before going forward. The exception will be caught,
|
||||
# the volume status will be set back to available and
|
||||
# the backup status to 'error'
|
||||
utils.require_driver_initialized(self._get_driver(backend))
|
||||
|
||||
backup_service = self.service.get_backup_driver(context)
|
||||
self._get_driver(backend).backup_volume(context, backup,
|
||||
backup_service)
|
||||
self._run_backup(context, backup, volume)
|
||||
except Exception as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context, volume_id,
|
||||
@ -416,8 +310,8 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
'previous_status': 'backing-up'})
|
||||
backup.status = fields.BackupStatus.AVAILABLE
|
||||
backup.size = volume['size']
|
||||
backup.availability_zone = self.az
|
||||
backup.save()
|
||||
|
||||
# Handle the num_dependent_backups of parent backup when child backup
|
||||
# has created successfully.
|
||||
if backup.parent_id:
|
||||
@ -428,15 +322,46 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
LOG.info(_LI('Create backup finished. backup: %s.'), backup.id)
|
||||
self._notify_about_backup_usage(context, backup, "create.end")
|
||||
|
||||
def _run_backup(self, context, backup, volume):
|
||||
backup_service = self.service.get_backup_driver(context)
|
||||
|
||||
properties = utils.brick_get_connector_properties()
|
||||
backup_dic = self.volume_rpcapi.get_backup_device(context,
|
||||
backup, volume)
|
||||
try:
|
||||
backup_device = backup_dic.get('backup_device')
|
||||
is_snapshot = backup_dic.get('is_snapshot')
|
||||
attach_info = self._attach_device(context, backup_device,
|
||||
properties, is_snapshot)
|
||||
try:
|
||||
device_path = attach_info['device']['path']
|
||||
if isinstance(device_path, six.string_types):
|
||||
if backup_dic.get('secure_enabled', False):
|
||||
with open(device_path) as device_file:
|
||||
backup_service.backup(backup, device_file)
|
||||
else:
|
||||
with utils.temporary_chown(device_path):
|
||||
with open(device_path) as device_file:
|
||||
backup_service.backup(backup, device_file)
|
||||
else:
|
||||
backup_service.backup(backup, device_path)
|
||||
|
||||
finally:
|
||||
self._detach_device(context, attach_info,
|
||||
backup_device, properties,
|
||||
is_snapshot)
|
||||
finally:
|
||||
backup = objects.Backup.get_by_id(context, backup.id)
|
||||
self._cleanup_temp_volumes_snapshots_when_backup_created(
|
||||
context, backup)
|
||||
|
||||
def restore_backup(self, context, backup, volume_id):
|
||||
"""Restore volume backups from configured backup service."""
|
||||
LOG.info(_LI('Restore backup started, backup: %(backup_id)s '
|
||||
'volume: %(volume_id)s.'),
|
||||
{'backup_id': backup.id, 'volume_id': volume_id})
|
||||
|
||||
volume = self.db.volume_get(context, volume_id)
|
||||
volume_host = volume_utils.extract_host(volume['host'], 'backend')
|
||||
backend = self._get_volume_backend(host=volume_host)
|
||||
volume = objects.Volume.get_by_id(context, volume_id)
|
||||
self._notify_about_backup_usage(context, backup, "restore.start")
|
||||
|
||||
backup.host = self.host
|
||||
@ -489,16 +414,7 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
raise exception.InvalidBackup(reason=err)
|
||||
|
||||
try:
|
||||
# NOTE(flaper87): Verify the driver is enabled
|
||||
# before going forward. The exception will be caught,
|
||||
# the volume status will be set back to available and
|
||||
# the backup status to 'error'
|
||||
utils.require_driver_initialized(self._get_driver(backend))
|
||||
|
||||
backup_service = self.service.get_backup_driver(context)
|
||||
self._get_driver(backend).restore_backup(context, backup,
|
||||
volume,
|
||||
backup_service)
|
||||
self._run_restore(context, backup, volume)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.db.volume_update(context, volume_id,
|
||||
@ -514,20 +430,34 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
{'backup_id': backup.id, 'volume_id': volume_id})
|
||||
self._notify_about_backup_usage(context, backup, "restore.end")
|
||||
|
||||
def _run_restore(self, context, backup, volume):
|
||||
backup_service = self.service.get_backup_driver(context)
|
||||
|
||||
properties = utils.brick_get_connector_properties()
|
||||
secure_enabled = (
|
||||
self.volume_rpcapi.secure_file_operations_enabled(context,
|
||||
volume))
|
||||
attach_info = self._attach_device(context, volume, properties)
|
||||
try:
|
||||
device_path = attach_info['device']['path']
|
||||
if isinstance(device_path, six.string_types):
|
||||
if secure_enabled:
|
||||
with open(device_path, 'wb') as device_file:
|
||||
backup_service.restore(backup, volume.id, device_file)
|
||||
else:
|
||||
with utils.temporary_chown(device_path):
|
||||
with open(device_path, 'wb') as device_file:
|
||||
backup_service.restore(backup, volume.id,
|
||||
device_file)
|
||||
else:
|
||||
backup_service.restore(backup, volume.id, device_path)
|
||||
finally:
|
||||
self._detach_device(context, attach_info, volume, properties)
|
||||
|
||||
def delete_backup(self, context, backup):
|
||||
"""Delete volume backup from configured backup service."""
|
||||
LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)
|
||||
|
||||
try:
|
||||
# NOTE(flaper87): Verify the driver is enabled
|
||||
# before going forward. The exception will be caught
|
||||
# and the backup status updated. Fail early since there
|
||||
# are no other status to change but backup's
|
||||
utils.require_driver_initialized(self.driver)
|
||||
except exception.DriverNotInitialized as err:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self._update_backup_error(backup, context, six.text_type(err))
|
||||
|
||||
self._notify_about_backup_usage(context, backup, "delete.start")
|
||||
backup.host = self.host
|
||||
backup.save()
|
||||
@ -642,7 +572,6 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
|
||||
# Call driver to create backup description string
|
||||
try:
|
||||
utils.require_driver_initialized(self.driver)
|
||||
backup_service = self.service.get_backup_driver(context)
|
||||
driver_info = backup_service.export_record(backup)
|
||||
backup_url = backup.encode_record(driver_info=driver_info)
|
||||
@ -699,7 +628,6 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
|
||||
# Extract driver specific info and pass it to the driver
|
||||
driver_options = backup_options.pop('driver_info', {})
|
||||
utils.require_driver_initialized(self.driver)
|
||||
backup_service = self.service.get_backup_driver(context)
|
||||
backup_service.import_record(backup, driver_options)
|
||||
except Exception as err:
|
||||
@ -783,15 +711,6 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
'%(backup_id)s, status: %(status)s.'),
|
||||
{'backup_id': backup.id,
|
||||
'status': status})
|
||||
try:
|
||||
# NOTE(flaper87): Verify the driver is enabled
|
||||
# before going forward. The exception will be caught
|
||||
# and the backup status updated. Fail early since there
|
||||
# are no other status to change but backup's
|
||||
utils.require_driver_initialized(self.driver)
|
||||
except exception.DriverNotInitialized:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Backup driver has not been initialized"))
|
||||
|
||||
backup_service = self._map_service_to_driver(backup.service)
|
||||
LOG.info(_LI('Backup service: %s.'), backup_service)
|
||||
@ -877,3 +796,57 @@ class BackupManager(manager.SchedulerDependentManager):
|
||||
"""
|
||||
backup_service = self.service.get_backup_driver(context)
|
||||
return backup_service.support_force_delete
|
||||
|
||||
def _attach_device(self, context, backup_device,
|
||||
properties, is_snapshot=False):
|
||||
"""Attach backup device."""
|
||||
if not is_snapshot:
|
||||
return self._attach_volume(context, backup_device, properties)
|
||||
else:
|
||||
msg = _("Can't attach snapshot.")
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def _attach_volume(self, context, volume, properties):
|
||||
"""Attach a volume."""
|
||||
|
||||
try:
|
||||
conn = self.volume_rpcapi.initialize_connection(context,
|
||||
volume,
|
||||
properties)
|
||||
return self._connect_device(conn)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
self.volume_rpcapi.terminate_connection(context, volume,
|
||||
properties,
|
||||
force=True)
|
||||
except Exception:
|
||||
LOG.warning(_LW("Failed to terminate the connection "
|
||||
"of volume %(volume_id)s, but it is "
|
||||
"acceptable."),
|
||||
{'volume_id', volume.id})
|
||||
|
||||
def _connect_device(self, conn):
|
||||
"""Establish connection to device."""
|
||||
use_multipath = CONF.use_multipath_for_image_xfer
|
||||
device_scan_attempts = CONF.num_volume_device_scan_tries
|
||||
protocol = conn['driver_volume_type']
|
||||
connector = utils.brick_get_connector(
|
||||
protocol,
|
||||
use_multipath=use_multipath,
|
||||
device_scan_attempts=device_scan_attempts,
|
||||
conn=conn)
|
||||
vol_handle = connector.connect_volume(conn['data'])
|
||||
|
||||
return {'conn': conn, 'device': vol_handle, 'connector': connector}
|
||||
|
||||
def _detach_device(self, context, attach_info, volume,
|
||||
properties, is_snapshot=False, force=False):
|
||||
"""Disconnect the volume from the host. """
|
||||
connector = attach_info['connector']
|
||||
connector.disconnect_volume(attach_info['conn']['data'],
|
||||
attach_info['device'])
|
||||
|
||||
rpcapi = self.volume_rpcapi
|
||||
rpcapi.terminate_connection(context, volume, properties, force=force)
|
||||
rpcapi.remove_export(context, volume)
|
||||
|
@ -19,6 +19,7 @@ from cinder.api.middleware import auth as cinder_api_middleware_auth
|
||||
from cinder.api.middleware import sizelimit as cinder_api_middleware_sizelimit
|
||||
from cinder.api.v2 import volumes as cinder_api_v2_volumes
|
||||
from cinder.api.views import versions as cinder_api_views_versions
|
||||
from cinder.backup import api as cinder_backup_api
|
||||
from cinder.backup import chunkeddriver as cinder_backup_chunkeddriver
|
||||
from cinder.backup import driver as cinder_backup_driver
|
||||
from cinder.backup.drivers import ceph as cinder_backup_drivers_ceph
|
||||
@ -289,6 +290,7 @@ def list_opts():
|
||||
cinder_volume_drivers_hitachi_hnasiscsi.iSCSI_OPTS,
|
||||
cinder_volume_drivers_rbd.rbd_opts,
|
||||
cinder_volume_drivers_tintri.tintri_opts,
|
||||
cinder_backup_api.backup_api_opts,
|
||||
cinder_volume_drivers_hitachi_hbsdhorcm.volume_opts,
|
||||
cinder_backup_manager.backup_manager_opts,
|
||||
cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon.
|
||||
|
@ -106,8 +106,11 @@ class AdminActionsTest(BaseAdminTest):
|
||||
req.headers['content-type'] = 'application/json'
|
||||
req.body = jsonutils.dump_as_bytes({'os-reset_status': updated_status})
|
||||
req.environ['cinder.context'] = ctx
|
||||
resp = req.get_response(app())
|
||||
return resp
|
||||
with mock.patch('cinder.backup.api.API._is_backup_service_enabled') \
|
||||
as mock_is_service_available:
|
||||
mock_is_service_available.return_value = True
|
||||
resp = req.get_response(app())
|
||||
return resp
|
||||
|
||||
def test_valid_updates(self):
|
||||
vac = admin_actions.VolumeAdminController()
|
||||
@ -206,7 +209,8 @@ class AdminActionsTest(BaseAdminTest):
|
||||
'size': 1,
|
||||
'volume_id': volume['id'],
|
||||
'user_id': 'user',
|
||||
'project_id': 'project'})
|
||||
'project_id': 'project',
|
||||
'host': 'test'})
|
||||
|
||||
resp = self._issue_backup_reset(self.ctx,
|
||||
backup,
|
||||
@ -218,7 +222,8 @@ class AdminActionsTest(BaseAdminTest):
|
||||
ctx = context.RequestContext('fake', 'fake')
|
||||
backup = db.backup_create(ctx, {'status': 'available',
|
||||
'size': 1,
|
||||
'volume_id': "fakeid"})
|
||||
'volume_id': "fakeid",
|
||||
'host': 'test'})
|
||||
resp = self._issue_backup_reset(ctx,
|
||||
backup,
|
||||
{'status': fields.BackupStatus.ERROR})
|
||||
@ -233,7 +238,8 @@ class AdminActionsTest(BaseAdminTest):
|
||||
{'status': fields.BackupStatus.AVAILABLE,
|
||||
'volume_id': volume['id'],
|
||||
'user_id': 'user',
|
||||
'project_id': 'project'})
|
||||
'project_id': 'project',
|
||||
'host': 'test'})
|
||||
|
||||
resp = self._issue_backup_reset(self.ctx,
|
||||
backup,
|
||||
|
@ -558,7 +558,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_create_backup_json(self, mock_validate,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "fake_az", 'host': 'test_host',
|
||||
{'availability_zone': 'fake_az', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
|
||||
volume_id = utils.create_volume(self.context, size=5)['id']
|
||||
@ -589,7 +589,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_create_backup_inuse_no_force(self,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "fake_az", 'host': 'test_host',
|
||||
{'availability_zone': 'fake_az', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
|
||||
volume_id = utils.create_volume(self.context, size=5,
|
||||
@ -619,7 +619,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
@mock.patch('cinder.db.service_get_all_by_topic')
|
||||
def test_create_backup_inuse_force(self, _mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "fake_az", 'host': 'test_host',
|
||||
{'availability_zone': 'fake_az', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
|
||||
volume_id = utils.create_volume(self.context, size=5,
|
||||
@ -655,7 +655,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_create_backup_snapshot_json(self, mock_validate,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "fake_az", 'host': 'test_host',
|
||||
{'availability_zone': 'fake_az', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
|
||||
volume_id = utils.create_volume(self.context, size=5,
|
||||
@ -688,7 +688,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_create_backup_xml(self, mock_validate,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "fake_az", 'host': 'test_host',
|
||||
{'availability_zone': 'fake_az', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
|
||||
volume_id = utils.create_volume(self.context, size=2)['id']
|
||||
@ -723,7 +723,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
mock_validate,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "fake_az", 'host': 'test_host',
|
||||
{'availability_zone': 'fake_az', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
|
||||
volume_id = utils.create_volume(self.context, size=5)['id']
|
||||
@ -766,7 +766,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_create_incremental_backup_invalid_status(
|
||||
self, _mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "fake_az", 'host': 'test_host',
|
||||
{'availability_zone': 'fake_az', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
|
||||
volume_id = utils.create_volume(self.context, size=5)['id']
|
||||
@ -906,7 +906,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_create_incremental_backup_invalid_no_full(
|
||||
self, _mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "fake_az", 'host': 'test_host',
|
||||
{'availability_zone': 'fake_az', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
|
||||
volume_id = utils.create_volume(self.context, size=5,
|
||||
@ -937,26 +937,26 @@ class BackupsAPITestCase(test.TestCase):
|
||||
@mock.patch('cinder.db.service_get_all_by_topic')
|
||||
def test_is_backup_service_enabled(self, _mock_service_get_all_by_topic):
|
||||
|
||||
test_host = 'test_host'
|
||||
testhost = 'test_host'
|
||||
alt_host = 'strange_host'
|
||||
empty_service = []
|
||||
# service host not match with volume's host
|
||||
host_not_match = [{'availability_zone': "fake_az", 'host': alt_host,
|
||||
host_not_match = [{'availability_zone': 'fake_az', 'host': alt_host,
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
# service az not match with volume's az
|
||||
az_not_match = [{'availability_zone': "strange_az", 'host': test_host,
|
||||
az_not_match = [{'availability_zone': 'strange_az', 'host': testhost,
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
# service disabled
|
||||
disabled_service = []
|
||||
|
||||
# dead service that last reported at 20th century
|
||||
dead_service = [{'availability_zone': "fake_az", 'host': alt_host,
|
||||
dead_service = [{'availability_zone': 'fake_az', 'host': alt_host,
|
||||
'disabled': 0, 'updated_at': '1989-04-16 02:55:44'}]
|
||||
|
||||
# first service's host not match but second one works.
|
||||
multi_services = [{'availability_zone': "fake_az", 'host': alt_host,
|
||||
multi_services = [{'availability_zone': 'fake_az', 'host': alt_host,
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()},
|
||||
{'availability_zone': "fake_az", 'host': test_host,
|
||||
{'availability_zone': 'fake_az', 'host': testhost,
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
|
||||
# Setup mock to run through the following service cases
|
||||
@ -968,49 +968,74 @@ class BackupsAPITestCase(test.TestCase):
|
||||
multi_services]
|
||||
|
||||
volume_id = utils.create_volume(self.context, size=2,
|
||||
host=test_host)['id']
|
||||
host=testhost)['id']
|
||||
volume = self.volume_api.get(context.get_admin_context(), volume_id)
|
||||
|
||||
# test empty service
|
||||
self.assertEqual(False,
|
||||
self.backup_api._is_backup_service_enabled(
|
||||
volume['availability_zone'],
|
||||
test_host))
|
||||
testhost))
|
||||
|
||||
# test host not match service
|
||||
self.assertEqual(False,
|
||||
self.backup_api._is_backup_service_enabled(
|
||||
volume['availability_zone'],
|
||||
test_host))
|
||||
testhost))
|
||||
|
||||
# test az not match service
|
||||
self.assertEqual(False,
|
||||
self.backup_api._is_backup_service_enabled(
|
||||
volume['availability_zone'],
|
||||
test_host))
|
||||
testhost))
|
||||
|
||||
# test disabled service
|
||||
self.assertEqual(False,
|
||||
self.backup_api._is_backup_service_enabled(
|
||||
volume['availability_zone'],
|
||||
test_host))
|
||||
testhost))
|
||||
|
||||
# test dead service
|
||||
self.assertEqual(False,
|
||||
self.backup_api._is_backup_service_enabled(
|
||||
volume['availability_zone'],
|
||||
test_host))
|
||||
testhost))
|
||||
|
||||
# test multi services and the last service matches
|
||||
self.assertTrue(self.backup_api._is_backup_service_enabled(
|
||||
volume['availability_zone'],
|
||||
test_host))
|
||||
testhost))
|
||||
|
||||
@mock.patch('cinder.db.service_get_all_by_topic')
|
||||
def test_get_available_backup_service(self,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': 'az1', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()},
|
||||
{'availability_zone': 'az2', 'host': 'fakehost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
actual_host = self.backup_api._get_available_backup_service_host(
|
||||
'testhost', 'az1')
|
||||
self.assertEqual('testhost', actual_host)
|
||||
self.assertRaises(exception.ServiceNotFound,
|
||||
self.backup_api._get_available_backup_service_host,
|
||||
'testhost', 'az2')
|
||||
self.assertRaises(exception.ServiceNotFound,
|
||||
self.backup_api._get_available_backup_service_host,
|
||||
'testhost2', 'az1')
|
||||
self.override_config('backup_use_same_backend', True)
|
||||
actual_host = self.backup_api._get_available_backup_service_host(
|
||||
None, 'az1')
|
||||
self.assertEqual('testhost', actual_host)
|
||||
actual_host = self.backup_api._get_available_backup_service_host(
|
||||
'testhost2', 'az1')
|
||||
self.assertEqual('testhost', actual_host)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all_by_topic')
|
||||
def test_delete_backup_available(self,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "az1", 'host': 'testhost',
|
||||
{'availability_zone': 'az1', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE)
|
||||
req = webob.Request.blank('/v2/fake/backups/%s' %
|
||||
@ -1029,7 +1054,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_delete_delta_backup(self,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "az1", 'host': 'testhost',
|
||||
{'availability_zone': 'az1', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE)
|
||||
delta_id = self._create_backup(status=fields.BackupStatus.AVAILABLE,
|
||||
@ -1051,7 +1076,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_delete_backup_error(self,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "az1", 'host': 'testhost',
|
||||
{'availability_zone': 'az1', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.ERROR)
|
||||
req = webob.Request.blank('/v2/fake/backups/%s' %
|
||||
@ -1099,7 +1124,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_delete_backup_with_InvalidBackup2(self,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "az1", 'host': 'testhost',
|
||||
{'availability_zone': 'az1', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': timeutils.utcnow()}]
|
||||
volume_id = utils.create_volume(self.context, size=5)['id']
|
||||
backup_id = self._create_backup(volume_id,
|
||||
@ -1127,7 +1152,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
def test_delete_backup_service_down(self,
|
||||
_mock_service_get_all_by_topic):
|
||||
_mock_service_get_all_by_topic.return_value = [
|
||||
{'availability_zone': "az1", 'host': 'testhost',
|
||||
{'availability_zone': 'az1', 'host': 'testhost',
|
||||
'disabled': 0, 'updated_at': '1775-04-19 05:00:00'}]
|
||||
backup_id = self._create_backup(status='available')
|
||||
req = webob.Request.blank('/v2/fake/backups/%s' %
|
||||
@ -1140,7 +1165,10 @@ class BackupsAPITestCase(test.TestCase):
|
||||
|
||||
db.backup_destroy(context.get_admin_context(), backup_id)
|
||||
|
||||
def test_restore_backup_volume_id_specified_json(self):
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
def test_restore_backup_volume_id_specified_json(
|
||||
self, _mock_is_service_enabled):
|
||||
_mock_is_service_enabled.return_value = True
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE)
|
||||
# need to create the volume referenced below first
|
||||
volume_name = 'test1'
|
||||
@ -1162,7 +1190,10 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(volume_id, res_dict['restore']['volume_id'])
|
||||
self.assertEqual(volume_name, res_dict['restore']['volume_name'])
|
||||
|
||||
def test_restore_backup_volume_id_specified_xml(self):
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
def test_restore_backup_volume_id_specified_xml(
|
||||
self, _mock_is_service_enabled):
|
||||
_mock_is_service_enabled.return_value = True
|
||||
volume_name = 'test1'
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE)
|
||||
volume_id = utils.create_volume(self.context,
|
||||
@ -1229,9 +1260,11 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual("Missing required element 'restore' in request body.",
|
||||
res_dict['badRequest']['message'])
|
||||
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
@mock.patch('cinder.volume.api.API.create')
|
||||
def test_restore_backup_volume_id_unspecified(self,
|
||||
_mock_volume_api_create):
|
||||
_mock_volume_api_create,
|
||||
_mock_is_service_enabled):
|
||||
|
||||
# intercept volume creation to ensure created volume
|
||||
# has status of available
|
||||
@ -1239,6 +1272,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
volume_id = utils.create_volume(self.context, size=size)['id']
|
||||
return db.volume_get(context, volume_id)
|
||||
|
||||
_mock_is_service_enabled.return_value = True
|
||||
_mock_volume_api_create.side_effect = fake_volume_api_create
|
||||
|
||||
backup_id = self._create_backup(size=5,
|
||||
@ -1256,10 +1290,11 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(202, res.status_int)
|
||||
self.assertEqual(backup_id, res_dict['restore']['backup_id'])
|
||||
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
@mock.patch('cinder.volume.api.API.create')
|
||||
def test_restore_backup_name_specified(self,
|
||||
_mock_volume_api_create):
|
||||
|
||||
_mock_volume_api_create,
|
||||
_mock_is_service_enabled):
|
||||
# Intercept volume creation to ensure created volume
|
||||
# has status of available
|
||||
def fake_volume_api_create(context, size, name, description):
|
||||
@ -1268,6 +1303,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
return db.volume_get(context, volume_id)
|
||||
|
||||
_mock_volume_api_create.side_effect = fake_volume_api_create
|
||||
_mock_is_service_enabled.return_value = True
|
||||
|
||||
backup_id = self._create_backup(size=5,
|
||||
status=fields.BackupStatus.AVAILABLE)
|
||||
@ -1292,8 +1328,10 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(202, res.status_int)
|
||||
self.assertEqual(backup_id, res_dict['restore']['backup_id'])
|
||||
|
||||
def test_restore_backup_name_volume_id_specified(self):
|
||||
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
def test_restore_backup_name_volume_id_specified(
|
||||
self, _mock_is_service_enabled):
|
||||
_mock_is_service_enabled.return_value = True
|
||||
backup_id = self._create_backup(size=5,
|
||||
status=fields.BackupStatus.AVAILABLE)
|
||||
orig_vol_name = "vol-00"
|
||||
@ -1510,7 +1548,10 @@ class BackupsAPITestCase(test.TestCase):
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
db.backup_destroy(context.get_admin_context(), backup_id)
|
||||
|
||||
def test_restore_backup_to_oversized_volume(self):
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
def test_restore_backup_to_oversized_volume(
|
||||
self, _mock_is_service_enabled):
|
||||
_mock_is_service_enabled.return_value = True
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE,
|
||||
size=10)
|
||||
# need to create the volume referenced below first
|
||||
@ -1537,14 +1578,17 @@ class BackupsAPITestCase(test.TestCase):
|
||||
db.backup_destroy(context.get_admin_context(), backup_id)
|
||||
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup')
|
||||
def test_restore_backup_with_different_host(self, mock_restore_backup):
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
def test_restore_backup_with_different_host(self, mock_is_backup_available,
|
||||
mock_restore_backup):
|
||||
volume_name = 'test1'
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE,
|
||||
size=10, host='HostA@BackendB#PoolA')
|
||||
size=10, host='HostA')
|
||||
volume_id = utils.create_volume(self.context, size=10,
|
||||
host='HostB@BackendB#PoolB',
|
||||
display_name=volume_name)['id']
|
||||
|
||||
mock_is_backup_available.return_value = True
|
||||
body = {"restore": {"volume_id": volume_id, }}
|
||||
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
|
||||
backup_id)
|
||||
@ -1558,7 +1602,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual(backup_id, res_dict['restore']['backup_id'])
|
||||
self.assertEqual(volume_id, res_dict['restore']['volume_id'])
|
||||
self.assertEqual(volume_name, res_dict['restore']['volume_name'])
|
||||
mock_restore_backup.assert_called_once_with(mock.ANY, u'HostB',
|
||||
mock_restore_backup.assert_called_once_with(mock.ANY, u'HostA',
|
||||
mock.ANY, volume_id)
|
||||
# Manually check if restore_backup was called with appropriate backup.
|
||||
self.assertEqual(backup_id, mock_restore_backup.call_args[0][2].id)
|
||||
@ -1578,9 +1622,11 @@ class BackupsAPITestCase(test.TestCase):
|
||||
# request is not authorized
|
||||
self.assertEqual(403, res.status_int)
|
||||
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.export_record')
|
||||
def test_export_backup_record_id_specified_json(self,
|
||||
_mock_export_record_rpc):
|
||||
_mock_export_record_rpc,
|
||||
_mock_service_enabled):
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE,
|
||||
size=10)
|
||||
ctx = context.RequestContext('admin', 'fake', is_admin=True)
|
||||
@ -1589,6 +1635,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
_mock_export_record_rpc.return_value = \
|
||||
{'backup_service': backup_service,
|
||||
'backup_url': backup_url}
|
||||
_mock_service_enabled.return_value = True
|
||||
req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
|
||||
backup_id)
|
||||
req.method = 'GET'
|
||||
@ -1604,9 +1651,11 @@ class BackupsAPITestCase(test.TestCase):
|
||||
res_dict['backup-record']['backup_url'])
|
||||
db.backup_destroy(context.get_admin_context(), backup_id)
|
||||
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.export_record')
|
||||
def test_export_record_backup_id_specified_xml(self,
|
||||
_mock_export_record_rpc):
|
||||
_mock_export_record_rpc,
|
||||
_mock_service_enabled):
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE,
|
||||
size=10)
|
||||
ctx = context.RequestContext('admin', 'fake', is_admin=True)
|
||||
@ -1615,6 +1664,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
_mock_export_record_rpc.return_value = \
|
||||
{'backup_service': backup_service,
|
||||
'backup_url': backup_url}
|
||||
_mock_service_enabled.return_value = True
|
||||
req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
|
||||
backup_id)
|
||||
req.method = 'GET'
|
||||
@ -1665,12 +1715,15 @@ class BackupsAPITestCase(test.TestCase):
|
||||
res_dict['badRequest']['message'])
|
||||
db.backup_destroy(context.get_admin_context(), backup_id)
|
||||
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.export_record')
|
||||
def test_export_record_with_unavailable_service(self,
|
||||
_mock_export_record_rpc):
|
||||
_mock_export_record_rpc,
|
||||
_mock_service_enabled):
|
||||
msg = 'fake unavailable service'
|
||||
_mock_export_record_rpc.side_effect = \
|
||||
exception.InvalidBackup(reason=msg)
|
||||
_mock_service_enabled.return_value = True
|
||||
backup_id = self._create_backup(status=fields.BackupStatus.AVAILABLE)
|
||||
ctx = context.RequestContext('admin', 'fake', is_admin=True)
|
||||
req = webob.Request.blank('/v2/fake/backups/%s/export_record' %
|
||||
@ -1701,7 +1754,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
# request is not authorized
|
||||
self.assertEqual(403, res.status_int)
|
||||
|
||||
@mock.patch('cinder.backup.api.API._list_backup_services')
|
||||
@mock.patch('cinder.backup.api.API._list_backup_hosts')
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
|
||||
def test_import_record_volume_id_specified_json(self,
|
||||
_mock_import_record_rpc,
|
||||
@ -1739,7 +1792,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual('0000-0000-0000-0000', db_backup.volume_id)
|
||||
self.assertEqual(fields.BackupStatus.CREATING, db_backup.status)
|
||||
|
||||
@mock.patch('cinder.backup.api.API._list_backup_services')
|
||||
@mock.patch('cinder.backup.api.API._list_backup_hosts')
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
|
||||
def test_import_record_volume_id_exists_deleted(self,
|
||||
_mock_import_record_rpc,
|
||||
@ -1784,7 +1837,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
|
||||
db.backup_destroy(context.get_admin_context(), backup_id)
|
||||
|
||||
@mock.patch('cinder.backup.api.API._list_backup_services')
|
||||
@mock.patch('cinder.backup.api.API._list_backup_hosts')
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
|
||||
def test_import_record_volume_id_specified_xml(self,
|
||||
_mock_import_record_rpc,
|
||||
@ -1831,7 +1884,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
back = dom.getElementsByTagName('backup')
|
||||
self.assertEqual(backup.id, back.item(0).attributes['id'].value)
|
||||
|
||||
@mock.patch('cinder.backup.api.API._list_backup_services')
|
||||
@mock.patch('cinder.backup.api.API._list_backup_hosts')
|
||||
def test_import_record_with_no_backup_services(self,
|
||||
_mock_list_services):
|
||||
ctx = context.RequestContext('admin', 'fake', is_admin=True)
|
||||
@ -1854,7 +1907,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
% backup_service,
|
||||
res_dict['computeFault']['message'])
|
||||
|
||||
@mock.patch('cinder.backup.api.API._list_backup_services')
|
||||
@mock.patch('cinder.backup.api.API._list_backup_hosts')
|
||||
def test_import_backup_with_wrong_backup_url(self, _mock_list_services):
|
||||
ctx = context.RequestContext('admin', 'fake', is_admin=True)
|
||||
backup_service = 'fake'
|
||||
@ -1874,7 +1927,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
self.assertEqual("Invalid input received: Can't parse backup record.",
|
||||
res_dict['badRequest']['message'])
|
||||
|
||||
@mock.patch('cinder.backup.api.API._list_backup_services')
|
||||
@mock.patch('cinder.backup.api.API._list_backup_hosts')
|
||||
def test_import_backup_with_existing_backup_record(self,
|
||||
_mock_list_services):
|
||||
ctx = context.RequestContext('admin', 'fake', is_admin=True)
|
||||
@ -1899,7 +1952,7 @@ class BackupsAPITestCase(test.TestCase):
|
||||
|
||||
db.backup_destroy(context.get_admin_context(), backup_id)
|
||||
|
||||
@mock.patch('cinder.backup.api.API._list_backup_services')
|
||||
@mock.patch('cinder.backup.api.API._list_backup_hosts')
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.import_record')
|
||||
def test_import_backup_with_missing_backup_services(self,
|
||||
_mock_import_record,
|
||||
|
@ -34,7 +34,6 @@ from cinder.objects import fields
|
||||
from cinder import test
|
||||
from cinder.tests.unit.backup import fake_service_with_verify as fake_service
|
||||
from cinder.tests.unit import utils
|
||||
from cinder.volume.drivers import lvm
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -52,9 +51,21 @@ class BaseBackupTest(test.TestCase):
|
||||
self.backup_mgr = importutils.import_object(CONF.backup_manager)
|
||||
self.backup_mgr.host = 'testhost'
|
||||
self.ctxt = context.get_admin_context()
|
||||
self.backup_mgr.driver.set_initialized()
|
||||
paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot',
|
||||
'cinder.volume.rpcapi.VolumeAPI.delete_volume',
|
||||
'cinder.volume.rpcapi.VolumeAPI.detach_volume',
|
||||
'cinder.volume.rpcapi.VolumeAPI.'
|
||||
'secure_file_operations_enabled']
|
||||
self.volume_patches = {}
|
||||
self.volume_mocks = {}
|
||||
for path in paths:
|
||||
name = path.split('.')[-1]
|
||||
self.volume_patches[name] = mock.patch(path)
|
||||
self.volume_mocks[name] = self.volume_patches[name].start()
|
||||
self.addCleanup(self.volume_patches[name].stop)
|
||||
|
||||
def _create_backup_db_entry(self, volume_id=1, display_name='test_backup',
|
||||
def _create_backup_db_entry(self, volume_id=1, restore_volume_id=None,
|
||||
display_name='test_backup',
|
||||
display_description='this is a test backup',
|
||||
container='volumebackups',
|
||||
status=fields.BackupStatus.CREATING,
|
||||
@ -70,6 +81,7 @@ class BaseBackupTest(test.TestCase):
|
||||
"""
|
||||
kwargs = {}
|
||||
kwargs['volume_id'] = volume_id
|
||||
kwargs['restore_volume_id'] = restore_volume_id
|
||||
kwargs['user_id'] = 'fake'
|
||||
kwargs['project_id'] = project_id
|
||||
kwargs['host'] = 'testhost'
|
||||
@ -110,7 +122,9 @@ class BaseBackupTest(test.TestCase):
|
||||
vol['attach_status'] = 'detached'
|
||||
vol['availability_zone'] = '1'
|
||||
vol['previous_status'] = previous_status
|
||||
return db.volume_create(self.ctxt, vol)['id']
|
||||
volume = objects.Volume(context=self.ctxt, **vol)
|
||||
volume.create()
|
||||
return volume.id
|
||||
|
||||
def _create_snapshot_db_entry(self, display_name='test_snapshot',
|
||||
display_description='test snapshot',
|
||||
@ -133,6 +147,7 @@ class BaseBackupTest(test.TestCase):
|
||||
kwargs['volume_id'] = volume_id
|
||||
kwargs['cgsnapshot_id'] = None
|
||||
kwargs['volume_size'] = size
|
||||
kwargs['metadata'] = {}
|
||||
kwargs['provider_location'] = provider_location
|
||||
snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs)
|
||||
snapshot_obj.create()
|
||||
@ -183,14 +198,16 @@ class BaseBackupTest(test.TestCase):
|
||||
class BackupTestCase(BaseBackupTest):
|
||||
"""Test Case for backups."""
|
||||
|
||||
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_snapshot')
|
||||
@mock.patch.object(lvm.LVMVolumeDriver, 'delete_volume')
|
||||
def test_init_host(self, mock_delete_volume, mock_delete_snapshot):
|
||||
@mock.patch('cinder.context.get_admin_context')
|
||||
def test_init_host(self, mock_get_admin_context):
|
||||
"""Test stuck volumes and backups.
|
||||
|
||||
Make sure stuck volumes and backups are reset to correct
|
||||
states when backup_manager.init_host() is called
|
||||
"""
|
||||
def get_admin_context():
|
||||
return self.ctxt
|
||||
|
||||
vol1_id = self._create_volume_db_entry()
|
||||
self._create_volume_attach(vol1_id)
|
||||
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
|
||||
@ -208,13 +225,12 @@ class BackupTestCase(BaseBackupTest):
|
||||
temp_snap = self._create_snapshot_db_entry()
|
||||
temp_snap.status = 'available'
|
||||
temp_snap.save()
|
||||
vol6_id = self._create_volume_db_entry()
|
||||
db.volume_update(self.ctxt, vol6_id, {'status': 'restoring-backup'})
|
||||
|
||||
backup1 = self._create_backup_db_entry(
|
||||
status=fields.BackupStatus.CREATING, volume_id=vol1_id)
|
||||
backup2 = self._create_backup_db_entry(
|
||||
status=fields.BackupStatus.RESTORING, volume_id=vol2_id)
|
||||
status=fields.BackupStatus.RESTORING,
|
||||
restore_volume_id=vol2_id)
|
||||
backup3 = self._create_backup_db_entry(
|
||||
status=fields.BackupStatus.DELETING, volume_id=vol3_id)
|
||||
self._create_backup_db_entry(status=fields.BackupStatus.CREATING,
|
||||
@ -224,6 +240,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
volume_id=vol5_id,
|
||||
temp_snapshot_id=temp_snap.id)
|
||||
|
||||
mock_get_admin_context.side_effect = get_admin_context
|
||||
self.backup_mgr.init_host()
|
||||
|
||||
vol1 = db.volume_get(self.ctxt, vol1_id)
|
||||
@ -236,8 +253,6 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.assertEqual('available', vol4['status'])
|
||||
vol5 = db.volume_get(self.ctxt, vol5_id)
|
||||
self.assertEqual('available', vol5['status'])
|
||||
vol6 = db.volume_get(self.ctxt, vol6_id)
|
||||
self.assertEqual('error_restoring', vol6['status'])
|
||||
|
||||
backup1 = db.backup_get(self.ctxt, backup1.id)
|
||||
self.assertEqual(fields.BackupStatus.ERROR, backup1['status'])
|
||||
@ -248,8 +263,10 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.ctxt,
|
||||
backup3.id)
|
||||
|
||||
self.assertTrue(mock_delete_volume.called)
|
||||
self.assertTrue(mock_delete_snapshot.called)
|
||||
temp_vol = objects.Volume.get_by_id(self.ctxt, temp_vol_id)
|
||||
self.volume_mocks['delete_volume'].assert_called_once_with(
|
||||
self.ctxt, temp_vol)
|
||||
self.assertTrue(self.volume_mocks['detach_volume'].called)
|
||||
|
||||
@mock.patch('cinder.objects.backup.BackupList.get_all_by_host')
|
||||
@mock.patch('cinder.manager.SchedulerDependentManager._add_to_threadpool')
|
||||
@ -276,35 +293,9 @@ class BackupTestCase(BaseBackupTest):
|
||||
def test_is_working(self):
|
||||
self.assertTrue(self.backup_mgr.is_working())
|
||||
|
||||
vmanager_mock = mock.Mock()
|
||||
vmanager_mock.is_working.side_effect = [True, False, True]
|
||||
vms = {'a': vmanager_mock, 'b': vmanager_mock, 'c': vmanager_mock}
|
||||
with mock.patch.dict(self.backup_mgr.volume_managers, vms, True):
|
||||
self.assertFalse(self.backup_mgr.is_working())
|
||||
|
||||
def test_init_host_handles_exception(self):
|
||||
"""Test that exception in cleanup is handled."""
|
||||
|
||||
self.mock_object(self.backup_mgr, '_init_volume_driver')
|
||||
mock_cleanup = self.mock_object(
|
||||
self.backup_mgr,
|
||||
'_cleanup_incomplete_backup_operations')
|
||||
mock_cleanup.side_effect = [Exception]
|
||||
|
||||
self.assertIsNone(self.backup_mgr.init_host())
|
||||
|
||||
def test_cleanup_incomplete_backup_operations_with_exceptions(self):
|
||||
"""Test cleanup resilience in the face of exceptions."""
|
||||
|
||||
fake_volume_list = [{'id': 'vol1'}, {'id': 'vol2'}]
|
||||
mock_volume_get_by_host = self.mock_object(
|
||||
db, 'volume_get_all_by_host')
|
||||
mock_volume_get_by_host.return_value = fake_volume_list
|
||||
|
||||
mock_volume_cleanup = self.mock_object(
|
||||
self.backup_mgr, '_cleanup_one_volume')
|
||||
mock_volume_cleanup.side_effect = [Exception]
|
||||
|
||||
fake_backup_list = [{'id': 'bkup1'}, {'id': 'bkup2'}, {'id': 'bkup3'}]
|
||||
mock_backup_get_by_host = self.mock_object(
|
||||
objects.BackupList, 'get_all_by_host')
|
||||
@ -322,17 +313,12 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.backup_mgr._cleanup_incomplete_backup_operations(
|
||||
self.ctxt))
|
||||
|
||||
self.assertEqual(len(fake_volume_list), mock_volume_cleanup.call_count)
|
||||
self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count)
|
||||
self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count)
|
||||
|
||||
def test_cleanup_one_backing_up_volume(self):
|
||||
"""Test cleanup_one_volume for volume status 'backing-up'."""
|
||||
|
||||
mock_get_manager = self.mock_object(
|
||||
self.backup_mgr, '_get_manager')
|
||||
mock_get_manager.return_value = 'fake_manager'
|
||||
|
||||
volume_id = self._create_volume_db_entry(status='backing-up',
|
||||
previous_status='available')
|
||||
volume = db.volume_get(self.ctxt, volume_id)
|
||||
@ -345,10 +331,6 @@ class BackupTestCase(BaseBackupTest):
|
||||
def test_cleanup_one_restoring_backup_volume(self):
|
||||
"""Test cleanup_one_volume for volume status 'restoring-backup'."""
|
||||
|
||||
mock_get_manager = self.mock_object(
|
||||
self.backup_mgr, '_get_manager')
|
||||
mock_get_manager.return_value = 'fake_manager'
|
||||
|
||||
volume_id = self._create_volume_db_entry(status='restoring-backup')
|
||||
volume = db.volume_get(self.ctxt, volume_id)
|
||||
|
||||
@ -360,22 +342,35 @@ class BackupTestCase(BaseBackupTest):
|
||||
def test_cleanup_one_creating_backup(self):
|
||||
"""Test cleanup_one_backup for volume status 'creating'."""
|
||||
|
||||
vol1_id = self._create_volume_db_entry()
|
||||
self._create_volume_attach(vol1_id)
|
||||
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up', })
|
||||
|
||||
backup = self._create_backup_db_entry(
|
||||
status=fields.BackupStatus.CREATING)
|
||||
status=fields.BackupStatus.CREATING,
|
||||
volume_id=vol1_id)
|
||||
|
||||
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
|
||||
|
||||
self.assertEqual(fields.BackupStatus.ERROR, backup.status)
|
||||
volume = objects.Volume.get_by_id(self.ctxt, vol1_id)
|
||||
self.assertEqual('available', volume.status)
|
||||
|
||||
def test_cleanup_one_restoring_backup(self):
|
||||
"""Test cleanup_one_backup for volume status 'restoring'."""
|
||||
|
||||
vol1_id = self._create_volume_db_entry()
|
||||
db.volume_update(self.ctxt, vol1_id, {'status': 'restoring-backup', })
|
||||
|
||||
backup = self._create_backup_db_entry(
|
||||
status=fields.BackupStatus.RESTORING)
|
||||
status=fields.BackupStatus.RESTORING,
|
||||
restore_volume_id=vol1_id)
|
||||
|
||||
self.backup_mgr._cleanup_one_backup(self.ctxt, backup)
|
||||
|
||||
self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status)
|
||||
volume = objects.Volume.get_by_id(self.ctxt, vol1_id)
|
||||
self.assertEqual('error_restoring', volume.status)
|
||||
|
||||
def test_cleanup_one_deleting_backup(self):
|
||||
"""Test cleanup_one_backup for volume status 'deleting'."""
|
||||
@ -394,9 +389,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
"""Test detach_all_attachments with exceptions."""
|
||||
|
||||
mock_log = self.mock_object(manager, 'LOG')
|
||||
mock_volume_mgr = mock.Mock()
|
||||
mock_detach_volume = mock_volume_mgr.detach_volume
|
||||
mock_detach_volume.side_effect = [Exception]
|
||||
self.volume_mocks['detach_volume'].side_effect = [Exception]
|
||||
|
||||
fake_attachments = [
|
||||
{
|
||||
@ -416,7 +409,6 @@ class BackupTestCase(BaseBackupTest):
|
||||
}
|
||||
|
||||
self.backup_mgr._detach_all_attachments(self.ctxt,
|
||||
mock_volume_mgr,
|
||||
fake_volume)
|
||||
|
||||
self.assertEqual(len(fake_attachments), mock_log.exception.call_count)
|
||||
@ -439,8 +431,6 @@ class BackupTestCase(BaseBackupTest):
|
||||
|
||||
def test_cleanup_temp_snapshot_for_one_backup_not_found(self):
|
||||
"""Ensure we handle missing temp snapshot for a backup."""
|
||||
mock_delete_snapshot = self.mock_object(
|
||||
lvm.LVMVolumeDriver, 'delete_snapshot')
|
||||
|
||||
vol1_id = self._create_volume_db_entry()
|
||||
self._create_volume_attach(vol1_id)
|
||||
@ -454,7 +444,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.ctxt,
|
||||
backup))
|
||||
|
||||
self.assertFalse(mock_delete_snapshot.called)
|
||||
self.assertFalse(self.volume_mocks['delete_snapshot'].called)
|
||||
self.assertIsNone(backup.temp_snapshot_id)
|
||||
|
||||
backup.destroy()
|
||||
@ -462,8 +452,6 @@ class BackupTestCase(BaseBackupTest):
|
||||
|
||||
def test_cleanup_temp_volume_for_one_backup_not_found(self):
|
||||
"""Ensure we handle missing temp volume for a backup."""
|
||||
mock_delete_volume = self.mock_object(
|
||||
lvm.LVMVolumeDriver, 'delete_volume')
|
||||
|
||||
vol1_id = self._create_volume_db_entry()
|
||||
self._create_volume_attach(vol1_id)
|
||||
@ -477,7 +465,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.ctxt,
|
||||
backup))
|
||||
|
||||
self.assertFalse(mock_delete_volume.called)
|
||||
self.assertFalse(self.volume_mocks['delete_volume'].called)
|
||||
self.assertIsNone(backup.temp_volume_id)
|
||||
|
||||
backup.destroy()
|
||||
@ -502,13 +490,13 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.ctxt,
|
||||
backup)
|
||||
|
||||
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
|
||||
def test_create_backup_with_error(self, _mock_volume_backup):
|
||||
def test_create_backup_with_error(self):
|
||||
"""Test error handling when error occurs during backup creation."""
|
||||
vol_id = self._create_volume_db_entry(size=1)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
|
||||
_mock_volume_backup.side_effect = FakeBackupException('fake')
|
||||
mock_run_backup = self.mock_object(self.backup_mgr, '_run_backup')
|
||||
mock_run_backup.side_effect = FakeBackupException('fake')
|
||||
self.assertRaises(FakeBackupException,
|
||||
self.backup_mgr.create_backup,
|
||||
self.ctxt,
|
||||
@ -518,56 +506,61 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.assertEqual('error_backing-up', vol['previous_status'])
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
|
||||
self.assertTrue(_mock_volume_backup.called)
|
||||
self.assertTrue(mock_run_backup.called)
|
||||
|
||||
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
|
||||
def test_create_backup(self, _mock_volume_backup):
|
||||
@mock.patch('cinder.utils.brick_get_connector_properties')
|
||||
@mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device')
|
||||
@mock.patch('cinder.utils.temporary_chown')
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
def test_create_backup(self, mock_open, mock_temporary_chown,
|
||||
mock_get_backup_device, mock_get_conn):
|
||||
"""Test normal backup creation."""
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(size=vol_size)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
mock_get_backup_device.return_value = {'backup_device': vol,
|
||||
'secure_enabled': False,
|
||||
'is_snapshot': False, }
|
||||
attach_info = {'device': {'path': '/dev/null'}}
|
||||
mock_detach_device = self.mock_object(self.backup_mgr,
|
||||
'_detach_device')
|
||||
mock_attach_device = self.mock_object(self.backup_mgr,
|
||||
'_attach_device')
|
||||
mock_attach_device.return_value = attach_info
|
||||
properties = {}
|
||||
mock_get_conn.return_value = properties
|
||||
mock_open.return_value = open('/dev/null', 'rb')
|
||||
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
vol = db.volume_get(self.ctxt, vol_id)
|
||||
|
||||
mock_temporary_chown.assert_called_once_with('/dev/null')
|
||||
mock_attach_device.assert_called_once_with(self.ctxt, vol,
|
||||
properties, False)
|
||||
mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol)
|
||||
mock_get_conn.assert_called_once_with()
|
||||
mock_detach_device.assert_called_once_with(self.ctxt, attach_info,
|
||||
vol, properties, False)
|
||||
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
self.assertEqual('available', vol['status'])
|
||||
self.assertEqual('backing-up', vol['previous_status'])
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
|
||||
self.assertEqual(vol_size, backup['size'])
|
||||
self.assertTrue(_mock_volume_backup.called)
|
||||
|
||||
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
|
||||
@mock.patch('%s.%s' % (CONF.volume_driver, 'backup_volume'))
|
||||
def test_create_backup_with_notify(self, _mock_volume_backup, notify):
|
||||
def test_create_backup_with_notify(self, notify):
|
||||
"""Test normal backup creation with notifications."""
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(size=vol_size)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
|
||||
self.mock_object(self.backup_mgr, '_run_backup')
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
self.assertEqual(2, notify.call_count)
|
||||
|
||||
def test_require_driver_initialized_in_create_backup(self):
|
||||
"""Test backup creation.
|
||||
|
||||
Test require_driver_initialized with _get_driver
|
||||
in a normal backup creation.
|
||||
"""
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(size=vol_size)
|
||||
backup = self._create_backup_db_entry(volume_id=vol_id)
|
||||
|
||||
self.backup_mgr._get_driver = mock.MagicMock()
|
||||
self.backup_mgr._get_volume_backend = mock.MagicMock()
|
||||
self.backup_mgr._get_volume_backend.return_value = 'mybackend'
|
||||
|
||||
self.backup_mgr.create_backup(self.ctxt, backup)
|
||||
self.assertEqual(2, self.backup_mgr._get_driver.call_count)
|
||||
self.assertEqual(self.backup_mgr._get_driver.call_args_list[0],
|
||||
mock.call('mybackend'))
|
||||
self.assertEqual(self.backup_mgr._get_driver.call_args_list[1],
|
||||
mock.call('mybackend'))
|
||||
|
||||
def test_restore_backup_with_bad_volume_status(self):
|
||||
"""Test error handling.
|
||||
|
||||
@ -604,15 +597,17 @@ class BackupTestCase(BaseBackupTest):
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(fields.BackupStatus.ERROR, backup['status'])
|
||||
|
||||
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
|
||||
def test_restore_backup_with_driver_error(self, _mock_volume_restore):
|
||||
def test_restore_backup_with_driver_error(self):
|
||||
"""Test error handling when an error occurs during backup restore."""
|
||||
vol_id = self._create_volume_db_entry(status='restoring-backup',
|
||||
size=1)
|
||||
backup = self._create_backup_db_entry(
|
||||
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
|
||||
|
||||
_mock_volume_restore.side_effect = FakeBackupException('fake')
|
||||
mock_run_restore = self.mock_object(
|
||||
self.backup_mgr,
|
||||
'_run_restore')
|
||||
mock_run_restore.side_effect = FakeBackupException('fake')
|
||||
self.assertRaises(FakeBackupException,
|
||||
self.backup_mgr.restore_backup,
|
||||
self.ctxt,
|
||||
@ -622,7 +617,7 @@ class BackupTestCase(BaseBackupTest):
|
||||
self.assertEqual('error_restoring', vol['status'])
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
|
||||
self.assertTrue(_mock_volume_restore.called)
|
||||
self.assertTrue(mock_run_restore.called)
|
||||
|
||||
def test_restore_backup_with_bad_service(self):
|
||||
"""Test error handling.
|
||||
@ -647,8 +642,11 @@ class BackupTestCase(BaseBackupTest):
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
|
||||
|
||||
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
|
||||
def test_restore_backup(self, _mock_volume_restore):
|
||||
@mock.patch('cinder.utils.brick_get_connector_properties')
|
||||
@mock.patch('cinder.utils.temporary_chown')
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
def test_restore_backup(self, mock_open, mock_temporary_chown,
|
||||
mock_get_conn):
|
||||
"""Test normal backup restoration."""
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(status='restoring-backup',
|
||||
@ -656,49 +654,48 @@ class BackupTestCase(BaseBackupTest):
|
||||
backup = self._create_backup_db_entry(
|
||||
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
|
||||
|
||||
properties = {}
|
||||
mock_get_conn.return_value = properties
|
||||
mock_open.return_value = open('/dev/null', 'wb')
|
||||
mock_secure_enabled = (
|
||||
self.volume_mocks['secure_file_operations_enabled'])
|
||||
mock_secure_enabled.return_value = False
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
attach_info = {'device': {'path': '/dev/null'}}
|
||||
mock_detach_device = self.mock_object(self.backup_mgr,
|
||||
'_detach_device')
|
||||
mock_attach_device = self.mock_object(self.backup_mgr,
|
||||
'_attach_device')
|
||||
mock_attach_device.return_value = attach_info
|
||||
|
||||
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
|
||||
vol = db.volume_get(self.ctxt, vol_id)
|
||||
|
||||
mock_temporary_chown.assert_called_once_with('/dev/null')
|
||||
mock_get_conn.assert_called_once_with()
|
||||
mock_secure_enabled.assert_called_once_with(self.ctxt, vol)
|
||||
mock_attach_device.assert_called_once_with(self.ctxt, vol,
|
||||
properties)
|
||||
mock_detach_device.assert_called_once_with(self.ctxt, attach_info,
|
||||
vol, properties)
|
||||
|
||||
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
|
||||
self.assertEqual('available', vol['status'])
|
||||
backup = db.backup_get(self.ctxt, backup.id)
|
||||
self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status'])
|
||||
self.assertTrue(_mock_volume_restore.called)
|
||||
|
||||
@mock.patch('cinder.volume.utils.notify_about_backup_usage')
|
||||
@mock.patch('%s.%s' % (CONF.volume_driver, 'restore_backup'))
|
||||
def test_restore_backup_with_notify(self, _mock_volume_restore, notify):
|
||||
def test_restore_backup_with_notify(self, notify):
|
||||
"""Test normal backup restoration with notifications."""
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(status='restoring-backup',
|
||||
size=vol_size)
|
||||
backup = self._create_backup_db_entry(
|
||||
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
|
||||
self.backup_mgr._run_restore = mock.Mock()
|
||||
|
||||
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
|
||||
self.assertEqual(2, notify.call_count)
|
||||
|
||||
def test_require_driver_initialized_in_restore_backup(self):
|
||||
"""Test backup restoration.
|
||||
|
||||
Test require_driver_initialized with _get_driver
|
||||
in a normal backup restoration.
|
||||
"""
|
||||
vol_size = 1
|
||||
vol_id = self._create_volume_db_entry(status='restoring-backup',
|
||||
size=vol_size)
|
||||
backup = self._create_backup_db_entry(
|
||||
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
|
||||
|
||||
self.backup_mgr._get_driver = mock.MagicMock()
|
||||
self.backup_mgr._get_volume_backend = mock.MagicMock()
|
||||
self.backup_mgr._get_volume_backend.return_value = 'mybackend'
|
||||
|
||||
self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
|
||||
self.assertEqual(2, self.backup_mgr._get_driver.call_count)
|
||||
self.assertEqual(self.backup_mgr._get_driver.call_args_list[0],
|
||||
mock.call('mybackend'))
|
||||
self.assertEqual(self.backup_mgr._get_driver.call_args_list[1],
|
||||
mock.call('mybackend'))
|
||||
|
||||
def test_delete_backup_with_bad_backup_status(self):
|
||||
"""Test error handling.
|
||||
|
||||
@ -1237,13 +1234,13 @@ class BackupAPITestCase(BaseBackupTest):
|
||||
ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None,
|
||||
None)
|
||||
|
||||
@mock.patch.object(api.API, '_is_backup_service_enabled',
|
||||
return_value=True)
|
||||
@mock.patch.object(api.API, '_get_available_backup_service_host',
|
||||
return_value='fake_host')
|
||||
@mock.patch.object(db, 'backup_create',
|
||||
side_effect=db_exc.DBError())
|
||||
def test_create_when_failed_to_create_backup_object(
|
||||
self, mock_create,
|
||||
mock_service_enabled):
|
||||
mock_get_service):
|
||||
volume_id = utils.create_volume(self.ctxt)['id']
|
||||
self.ctxt.user_id = 'user_id'
|
||||
self.ctxt.project_id = 'project_id'
|
||||
@ -1261,13 +1258,13 @@ class BackupAPITestCase(BaseBackupTest):
|
||||
volume_id=volume_id,
|
||||
container='volumebackups')
|
||||
|
||||
@mock.patch.object(api.API, '_is_backup_service_enabled',
|
||||
return_value=True)
|
||||
@mock.patch.object(api.API, '_get_available_backup_service_host',
|
||||
return_value='fake_host')
|
||||
@mock.patch.object(objects.Backup, '__init__',
|
||||
side_effect=exception.InvalidInput(
|
||||
reason='Failed to new'))
|
||||
def test_create_when_failed_to_new_backup_object(self, mock_new,
|
||||
mock_service_enabled):
|
||||
mock_get_service):
|
||||
volume_id = utils.create_volume(self.ctxt)['id']
|
||||
self.ctxt.user_id = 'user_id'
|
||||
self.ctxt.project_id = 'project_id'
|
||||
@ -1284,14 +1281,17 @@ class BackupAPITestCase(BaseBackupTest):
|
||||
volume_id=volume_id,
|
||||
container='volumebackups')
|
||||
|
||||
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
|
||||
@mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup')
|
||||
def test_restore_volume(self,
|
||||
mock_rpcapi_restore):
|
||||
mock_rpcapi_restore,
|
||||
mock_is_service_enabled):
|
||||
ctxt = context.RequestContext('fake', 'fake')
|
||||
volume_id = self._create_volume_db_entry(status='available',
|
||||
size=1)
|
||||
backup = self._create_backup_db_entry(size=1,
|
||||
status='available')
|
||||
mock_is_service_enabled.return_value = True
|
||||
self.api.restore(ctxt, backup.id, volume_id)
|
||||
backup = objects.Backup.get_by_id(ctxt, backup.id)
|
||||
self.assertEqual(volume_id, backup.restore_volume_id)
|
||||
|
@ -178,9 +178,10 @@ class QuotaIntegrationTestCase(test.TestCase):
|
||||
self.flags(**flag_args)
|
||||
vol_ref = self._create_volume()
|
||||
backup_ref = self._create_backup(vol_ref)
|
||||
with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
|
||||
mock__is_backup_service_enabled:
|
||||
mock__is_backup_service_enabled.return_value = True
|
||||
with mock.patch.object(backup.API,
|
||||
'_get_available_backup_service_host') as \
|
||||
mock__get_available_backup_service:
|
||||
mock__get_available_backup_service.return_value = 'host'
|
||||
self.assertRaises(exception.BackupLimitExceeded,
|
||||
backup.API().create,
|
||||
self.context,
|
||||
@ -221,9 +222,10 @@ class QuotaIntegrationTestCase(test.TestCase):
|
||||
def test_too_many_combined_backup_gigabytes(self):
|
||||
vol_ref = self._create_volume(size=10000)
|
||||
backup_ref = self._create_backup(vol_ref)
|
||||
with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
|
||||
mock__is_backup_service_enabled:
|
||||
mock__is_backup_service_enabled.return_value = True
|
||||
with mock.patch.object(backup.API,
|
||||
'_get_available_backup_service_host') as \
|
||||
mock__get_available_backup_service:
|
||||
mock__get_available_backup_service.return_value = 'host'
|
||||
self.assertRaises(
|
||||
exception.VolumeBackupSizeExceedsAvailableQuota,
|
||||
backup.API().create,
|
||||
@ -266,9 +268,10 @@ class QuotaIntegrationTestCase(test.TestCase):
|
||||
)
|
||||
vol_ref = self._create_volume(size=10)
|
||||
backup_ref = self._create_backup(vol_ref)
|
||||
with mock.patch.object(backup.API, '_is_backup_service_enabled') as \
|
||||
mock__is_backup_service_enabled:
|
||||
mock__is_backup_service_enabled.return_value = True
|
||||
with mock.patch.object(backup.API,
|
||||
'_get_available_backup_service_host') as \
|
||||
mock_mock__get_available_backup_service:
|
||||
mock_mock__get_available_backup_service.return_value = 'host'
|
||||
backup_ref2 = backup.API().create(self.context,
|
||||
'name',
|
||||
'description',
|
||||
|
@ -5832,6 +5832,32 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase):
|
||||
vol_api.delete,
|
||||
self.context, volume)
|
||||
|
||||
@mock.patch.object(driver.BaseVD, 'get_backup_device')
|
||||
@mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled')
|
||||
def test_get_backup_device(self, mock_secure, mock_get_backup):
|
||||
vol = tests_utils.create_volume(self.context)
|
||||
backup = tests_utils.create_backup(self.context, vol['id'])
|
||||
mock_secure.return_value = False
|
||||
mock_get_backup.return_value = (vol, False)
|
||||
result = self.volume.get_backup_device(self.context,
|
||||
backup)
|
||||
|
||||
mock_get_backup.assert_called_once_with(self.context, backup)
|
||||
mock_secure.assert_called_once_with()
|
||||
expected_result = {'backup_device': vol,
|
||||
'secure_enabled': False,
|
||||
'is_snapshot': False}
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
@mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled')
|
||||
def test_secure_file_operations_enabled(self, mock_secure):
|
||||
mock_secure.return_value = True
|
||||
vol = tests_utils.create_volume(self.context)
|
||||
result = self.volume.secure_file_operations_enabled(self.context,
|
||||
vol)
|
||||
mock_secure.assert_called_once_with()
|
||||
self.assertTrue(result)
|
||||
|
||||
|
||||
class CopyVolumeToImageTestCase(BaseVolumeTestCase):
|
||||
def fake_local_path(self, volume):
|
||||
@ -6456,6 +6482,43 @@ class GenericVolumeDriverTestCase(DriverTestCase):
|
||||
volume_file)
|
||||
self.assertEqual(i, backup_service.restore.call_count)
|
||||
|
||||
def test_get_backup_device_available(self):
|
||||
vol = tests_utils.create_volume(self.context)
|
||||
self.context.user_id = 'fake'
|
||||
self.context.project_id = 'fake'
|
||||
backup = tests_utils.create_backup(self.context,
|
||||
vol['id'])
|
||||
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
|
||||
(backup_device, is_snapshot) = self.volume.driver.get_backup_device(
|
||||
self.context, backup_obj)
|
||||
volume = objects.Volume.get_by_id(self.context, vol.id)
|
||||
self.assertEqual(volume, backup_device)
|
||||
self.assertFalse(is_snapshot)
|
||||
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
|
||||
self.assertIsNone(backup.temp_volume_id)
|
||||
|
||||
def test_get_backup_device_in_use(self):
|
||||
vol = tests_utils.create_volume(self.context,
|
||||
status='backing-up',
|
||||
previous_status='in-use')
|
||||
temp_vol = tests_utils.create_volume(self.context)
|
||||
self.context.user_id = 'fake'
|
||||
self.context.project_id = 'fake'
|
||||
backup = tests_utils.create_backup(self.context,
|
||||
vol['id'])
|
||||
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
|
||||
with mock.patch.object(
|
||||
self.volume.driver,
|
||||
'_create_temp_cloned_volume') as mock_create_temp:
|
||||
mock_create_temp.return_value = temp_vol
|
||||
(backup_device, is_snapshot) = (
|
||||
self.volume.driver.get_backup_device(self.context,
|
||||
backup_obj))
|
||||
self.assertEqual(temp_vol, backup_device)
|
||||
self.assertFalse(is_snapshot)
|
||||
backup_obj = objects.Backup.get_by_id(self.context, backup.id)
|
||||
self.assertEqual(temp_vol.id, backup_obj.temp_volume_id)
|
||||
|
||||
def test_enable_replication_invalid_state(self):
|
||||
volume_api = cinder.volume.api.API()
|
||||
ctxt = context.get_admin_context()
|
||||
|
@ -26,6 +26,7 @@ from cinder import context
|
||||
from cinder import db
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.tests.unit import fake_backup
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit import utils as tests_utils
|
||||
@ -94,6 +95,7 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
self.fake_cg2 = group2
|
||||
self.fake_src_cg = jsonutils.to_primitive(source_group)
|
||||
self.fake_cgsnap = cgsnapshot
|
||||
self.fake_backup_obj = fake_backup.fake_backup_obj(self.context)
|
||||
|
||||
def test_serialized_volume_has_id(self):
|
||||
self.assertIn('id', self.fake_volume)
|
||||
@ -137,6 +139,12 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
if cgsnapshot:
|
||||
cgsnapshot.consistencygroup
|
||||
kwargs['cgsnapshot'].consistencygroup
|
||||
if 'backup' in expected_msg:
|
||||
backup = expected_msg['backup']
|
||||
del expected_msg['backup']
|
||||
expected_msg['backup_id'] = backup.id
|
||||
expected_msg['backup'] = backup
|
||||
|
||||
if 'host' in expected_msg:
|
||||
del expected_msg['host']
|
||||
if 'dest_host' in expected_msg:
|
||||
@ -205,6 +213,10 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
expected_volume = expected_msg[kwarg].obj_to_primitive()
|
||||
volume = value.obj_to_primitive()
|
||||
self.assertEqual(expected_volume, volume)
|
||||
elif isinstance(value, objects.Backup):
|
||||
expected_backup = expected_msg[kwarg].obj_to_primitive()
|
||||
backup = value.obj_to_primitive()
|
||||
self.assertEqual(expected_backup, backup)
|
||||
else:
|
||||
self.assertEqual(expected_msg[kwarg], value)
|
||||
|
||||
@ -580,3 +592,16 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
rpc_method='cast',
|
||||
volume=self.fake_volume,
|
||||
version='1.30')
|
||||
|
||||
def test_get_backup_device(self):
|
||||
self._test_volume_api('get_backup_device',
|
||||
rpc_method='call',
|
||||
backup=self.fake_backup_obj,
|
||||
volume=self.fake_volume_obj,
|
||||
version='1.38')
|
||||
|
||||
def test_secure_file_operations_enabled(self):
|
||||
self._test_volume_api('secure_file_operations_enabled',
|
||||
rpc_method='call',
|
||||
volume=self.fake_volume_obj,
|
||||
version='1.38')
|
||||
|
@ -1081,6 +1081,113 @@ class BaseVD(object):
|
||||
def backup_use_temp_snapshot(self):
|
||||
return False
|
||||
|
||||
def snapshot_remote_attachable(self):
|
||||
# TODO(lixiaoy1): the method will be deleted later when remote
|
||||
# attach snapshot is implemented.
|
||||
return False
|
||||
|
||||
def get_backup_device(self, context, backup):
|
||||
"""Get a backup device from an existing volume.
|
||||
|
||||
The function returns a volume or snapshot to backup service,
|
||||
and then backup service attaches the device and does backup.
|
||||
"""
|
||||
backup_device = None
|
||||
is_snapshot = False
|
||||
if (self.backup_use_temp_snapshot() and
|
||||
self.snapshot_remote_attachable()):
|
||||
(backup_device, is_snapshot) = (
|
||||
self._get_backup_volume_temp_snapshot(context, backup))
|
||||
else:
|
||||
backup_device = self._get_backup_volume_temp_volume(
|
||||
context, backup)
|
||||
is_snapshot = False
|
||||
return (backup_device, is_snapshot)
|
||||
|
||||
def _get_backup_volume_temp_volume(self, context, backup):
|
||||
"""Return a volume to do backup.
|
||||
|
||||
To backup a snapshot, create a temp volume from the snapshot and
|
||||
back it up.
|
||||
|
||||
Otherwise to backup an in-use volume, create a temp volume and
|
||||
back it up.
|
||||
"""
|
||||
volume = objects.Volume.get_by_id(context, backup.volume_id)
|
||||
snapshot = None
|
||||
if backup.snapshot_id:
|
||||
snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
|
||||
|
||||
LOG.debug('Creating a new backup for volume %s.', volume['name'])
|
||||
|
||||
temp_vol_ref = None
|
||||
device_to_backup = volume
|
||||
|
||||
# NOTE(xyang): If it is to backup from snapshot, create a temp
|
||||
# volume from the source snapshot, backup the temp volume, and
|
||||
# then clean up the temp volume.
|
||||
if snapshot:
|
||||
temp_vol_ref = self._create_temp_volume_from_snapshot(
|
||||
context, volume, snapshot)
|
||||
backup.temp_volume_id = temp_vol_ref['id']
|
||||
backup.save()
|
||||
device_to_backup = temp_vol_ref
|
||||
|
||||
else:
|
||||
# NOTE(xyang): Check volume status if it is not to backup from
|
||||
# snapshot; if 'in-use', create a temp volume from the source
|
||||
# volume, backup the temp volume, and then clean up the temp
|
||||
# volume; if 'available', just backup the volume.
|
||||
previous_status = volume.get('previous_status')
|
||||
if previous_status == "in-use":
|
||||
temp_vol_ref = self._create_temp_cloned_volume(
|
||||
context, volume)
|
||||
backup.temp_volume_id = temp_vol_ref['id']
|
||||
backup.save()
|
||||
device_to_backup = temp_vol_ref
|
||||
|
||||
return device_to_backup
|
||||
|
||||
def _get_backup_volume_temp_snapshot(self, context, backup):
|
||||
"""Return a device to backup.
|
||||
|
||||
If it is to backup from snapshot, back it up directly.
|
||||
|
||||
Otherwise for in-use volume, create a temp snapshot and back it up.
|
||||
"""
|
||||
volume = self.db.volume_get(context, backup.volume_id)
|
||||
snapshot = None
|
||||
if backup.snapshot_id:
|
||||
snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
|
||||
|
||||
LOG.debug('Creating a new backup for volume %s.', volume['name'])
|
||||
|
||||
device_to_backup = volume
|
||||
is_snapshot = False
|
||||
temp_snapshot = None
|
||||
|
||||
# NOTE(xyang): If it is to backup from snapshot, back it up
|
||||
# directly. No need to clean it up.
|
||||
if snapshot:
|
||||
device_to_backup = snapshot
|
||||
is_snapshot = True
|
||||
|
||||
else:
|
||||
# NOTE(xyang): If it is not to backup from snapshot, check volume
|
||||
# status. If the volume status is 'in-use', create a temp snapshot
|
||||
# from the source volume, backup the temp snapshot, and then clean
|
||||
# up the temp snapshot; if the volume status is 'available', just
|
||||
# backup the volume.
|
||||
previous_status = volume.get('previous_status')
|
||||
if previous_status == "in-use":
|
||||
temp_snapshot = self._create_temp_snapshot(context, volume)
|
||||
backup.temp_snapshot_id = temp_snapshot.id
|
||||
backup.save()
|
||||
device_to_backup = temp_snapshot
|
||||
is_snapshot = True
|
||||
|
||||
return (device_to_backup, is_snapshot)
|
||||
|
||||
def backup_volume(self, context, backup, backup_service):
|
||||
"""Create a new backup from an existing volume."""
|
||||
# NOTE(xyang): _backup_volume_temp_snapshot and
|
||||
@ -1294,6 +1401,8 @@ class BaseVD(object):
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'status': 'creating',
|
||||
'attach_status': 'detached',
|
||||
'availability_zone': volume.availability_zone,
|
||||
}
|
||||
temp_vol_ref = self.db.volume_create(context, temp_volume)
|
||||
try:
|
||||
|
@ -204,7 +204,7 @@ def locked_snapshot_operation(f):
|
||||
class VolumeManager(manager.SchedulerDependentManager):
|
||||
"""Manages attachable block storage devices."""
|
||||
|
||||
RPC_API_VERSION = '1.37'
|
||||
RPC_API_VERSION = '1.38'
|
||||
|
||||
target = messaging.Target(version=RPC_API_VERSION)
|
||||
|
||||
@ -3494,3 +3494,16 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
capabilities = self.driver.capabilities
|
||||
LOG.debug("Obtained capabilities list: %s.", capabilities)
|
||||
return capabilities
|
||||
|
||||
def get_backup_device(self, ctxt, backup):
|
||||
(backup_device, is_snapshot) = (
|
||||
self.driver.get_backup_device(ctxt, backup))
|
||||
secure_enabled = self.driver.secure_file_operations_enabled()
|
||||
backup_device_dict = {'backup_device': backup_device,
|
||||
'secure_enabled': secure_enabled,
|
||||
'is_snapshot': is_snapshot, }
|
||||
return backup_device_dict
|
||||
|
||||
def secure_file_operations_enabled(self, ctxt, volume):
|
||||
secure_enabled = self.driver.secure_file_operations_enabled()
|
||||
return secure_enabled
|
||||
|
@ -85,9 +85,11 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
migrate_volume_completion(), and update_migrated_volume().
|
||||
1.37 - Adds old_reservations parameter to retype to support quota
|
||||
checks in the API.
|
||||
1.38 - Scaling backup service, add get_backup_device() and
|
||||
secure_file_operations_enabled()
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '1.37'
|
||||
RPC_API_VERSION = '1.38'
|
||||
TOPIC = CONF.volume_topic
|
||||
BINARY = 'cinder-volume'
|
||||
|
||||
@ -326,3 +328,15 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
def get_capabilities(self, ctxt, host, discover):
|
||||
cctxt = self._get_cctxt(host, '1.29')
|
||||
return cctxt.call(ctxt, 'get_capabilities', discover=discover)
|
||||
|
||||
def get_backup_device(self, ctxt, backup, volume):
|
||||
new_host = utils.extract_host(volume.host)
|
||||
cctxt = self.client.prepare(server=new_host, version='1.38')
|
||||
return cctxt.call(ctxt, 'get_backup_device',
|
||||
backup=backup)
|
||||
|
||||
def secure_file_operations_enabled(self, ctxt, volume):
|
||||
new_host = utils.extract_host(volume.host)
|
||||
cctxt = self.client.prepare(server=new_host, version='1.38')
|
||||
return cctxt.call(ctxt, 'secure_file_operations_enabled',
|
||||
volume=volume)
|
||||
|
Loading…
Reference in New Issue
Block a user