Create backups via scheduler

Now all backup-create operations will be processed via scheduler as we do
for volume. Backup-specific filters will be added in a follow-up patch.

Related blueprint: backup-host-selection-algorigthm

Change-Id: Ie2afb57c4861c41982612e6054767cef43fdb867
This commit is contained in:
Ivan Kolodyazhny 2019-01-11 18:09:00 +02:00 committed by Jay Bryant
parent 3a73123472
commit f0211b53b8
12 changed files with 184 additions and 121 deletions

View File

@ -40,8 +40,8 @@ from cinder.policies import backups as policy
import cinder.policy import cinder.policy
from cinder import quota from cinder import quota
from cinder import quota_utils from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
import cinder.volume import cinder.volume
from cinder.volume import volume_utils
backup_opts = [ backup_opts = [
cfg.BoolOpt('backup_use_same_host', cfg.BoolOpt('backup_use_same_host',
@ -61,6 +61,7 @@ class API(base.Base):
def __init__(self, db=None): def __init__(self, db=None):
self.backup_rpcapi = backup_rpcapi.BackupAPI() self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_api = cinder.volume.API() self.volume_api = cinder.volume.API()
super(API, self).__init__(db) super(API, self).__init__(db)
@ -227,10 +228,6 @@ class API(base.Base):
raise exception.InvalidVolume(reason=msg) raise exception.InvalidVolume(reason=msg)
previous_status = volume['status'] previous_status = volume['status']
volume_host = volume_utils.extract_host(volume.host, 'host')
availability_zone = availability_zone or volume.availability_zone
host = self._get_available_backup_service_host(volume_host,
availability_zone)
# Reserve a quota before setting volume status and backup status # Reserve a quota before setting volume status and backup status
try: try:
@ -313,8 +310,6 @@ class API(base.Base):
'container': container, 'container': container,
'parent_id': parent_id, 'parent_id': parent_id,
'size': volume['size'], 'size': volume['size'],
'host': host,
'availability_zone': availability_zone,
'snapshot_id': snapshot_id, 'snapshot_id': snapshot_id,
'data_timestamp': data_timestamp, 'data_timestamp': data_timestamp,
'parent': parent, 'parent': parent,
@ -334,10 +329,7 @@ class API(base.Base):
finally: finally:
QUOTAS.rollback(context, reservations) QUOTAS.rollback(context, reservations)
# TODO(DuncanT): In future, when we have a generic local attach, self.scheduler_rpcapi.create_backup(context, backup)
# this can go via the scheduler, which enables
# better load balancing and isolation of services
self.backup_rpcapi.create_backup(context, backup)
return backup return backup

View File

@ -136,12 +136,6 @@ class BackupManager(manager.SchedulerDependentManager):
self.driver_name = new_name self.driver_name = new_name
self.service = importutils.import_class(self.driver_name) self.service = importutils.import_class(self.driver_name)
def _update_backup_error(self, backup, err,
status=fields.BackupStatus.ERROR):
backup.status = status
backup.fail_reason = err
backup.save()
def init_host(self, **kwargs): def init_host(self, **kwargs):
"""Run initialization needed for a standalone service.""" """Run initialization needed for a standalone service."""
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
@ -239,7 +233,7 @@ class BackupManager(manager.SchedulerDependentManager):
self._cleanup_one_volume(ctxt, volume) self._cleanup_one_volume(ctxt, volume)
err = 'incomplete backup reset on manager restart' err = 'incomplete backup reset on manager restart'
self._update_backup_error(backup, err) volume_utils.update_backup_error(backup, err)
elif backup['status'] == fields.BackupStatus.RESTORING: elif backup['status'] == fields.BackupStatus.RESTORING:
LOG.info('Resetting backup %s to ' LOG.info('Resetting backup %s to '
'available (was restoring).', 'available (was restoring).',
@ -369,7 +363,7 @@ class BackupManager(manager.SchedulerDependentManager):
'expected_status': expected_status, 'expected_status': expected_status,
'actual_status': actual_status, 'actual_status': actual_status,
} }
self._update_backup_error(backup, err) volume_utils.update_backup_error(backup, err)
raise exception.InvalidSnapshot(reason=err) raise exception.InvalidSnapshot(reason=err)
else: else:
actual_status = volume['status'] actual_status = volume['status']
@ -379,7 +373,7 @@ class BackupManager(manager.SchedulerDependentManager):
'expected_status': expected_status, 'expected_status': expected_status,
'actual_status': actual_status, 'actual_status': actual_status,
} }
self._update_backup_error(backup, err) volume_utils.update_backup_error(backup, err)
raise exception.InvalidVolume(reason=err) raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.CREATING expected_status = fields.BackupStatus.CREATING
@ -390,13 +384,13 @@ class BackupManager(manager.SchedulerDependentManager):
'expected_status': expected_status, 'expected_status': expected_status,
'actual_status': actual_status, 'actual_status': actual_status,
} }
self._update_backup_error(backup, err) volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
try: try:
if not self.is_working(): if not self.is_working():
err = _('Create backup aborted due to backup service is down.') err = _('Create backup aborted due to backup service is down.')
self._update_backup_error(backup, err) volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
backup.service = self.driver_name backup.service = self.driver_name
@ -412,7 +406,7 @@ class BackupManager(manager.SchedulerDependentManager):
context, volume_id, context, volume_id,
{'status': previous_status, {'status': previous_status,
'previous_status': 'error_backing-up'}) 'previous_status': 'error_backing-up'})
self._update_backup_error(backup, six.text_type(err)) volume_utils.update_backup_error(backup, six.text_type(err))
# Restore the original status. # Restore the original status.
if snapshot_id: if snapshot_id:
@ -559,7 +553,7 @@ class BackupManager(manager.SchedulerDependentManager):
'%(expected_status)s but got %(actual_status)s.') % '%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status, {'expected_status': expected_status,
'actual_status': actual_status}) 'actual_status': actual_status})
self._update_backup_error(backup, err) volume_utils.update_backup_error(backup, err)
self.db.volume_update(context, volume_id, self.db.volume_update(context, volume_id,
{'status': fields.VolumeStatus.ERROR}) {'status': fields.VolumeStatus.ERROR})
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
@ -718,13 +712,13 @@ class BackupManager(manager.SchedulerDependentManager):
'%(expected_status)s but got %(actual_status)s.') \ '%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status, % {'expected_status': expected_status,
'actual_status': actual_status} 'actual_status': actual_status}
self._update_backup_error(backup, err) volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
if backup.service and not self.is_working(): if backup.service and not self.is_working():
err = _('Delete backup is aborted due to backup service is down.') err = _('Delete backup is aborted due to backup service is down.')
status = fields.BackupStatus.ERROR_DELETING status = fields.BackupStatus.ERROR_DELETING
self._update_backup_error(backup, err, status) volume_utils.update_backup_error(backup, err, status)
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
if not self._is_our_backup(backup): if not self._is_our_backup(backup):
@ -734,7 +728,7 @@ class BackupManager(manager.SchedulerDependentManager):
' backup [%(backup_service)s].')\ ' backup [%(backup_service)s].')\
% {'configured_service': self.driver_name, % {'configured_service': self.driver_name,
'backup_service': backup.service} 'backup_service': backup.service}
self._update_backup_error(backup, err) volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
if backup.service: if backup.service:
@ -743,7 +737,8 @@ class BackupManager(manager.SchedulerDependentManager):
backup_service.delete_backup(backup) backup_service.delete_backup(backup)
except Exception as err: except Exception as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
self._update_backup_error(backup, six.text_type(err)) volume_utils.update_backup_error(backup,
six.text_type(err))
# Get reservations # Get reservations
try: try:
@ -874,7 +869,7 @@ class BackupManager(manager.SchedulerDependentManager):
err = _('Import record failed, cannot find backup ' err = _('Import record failed, cannot find backup '
'service to perform the import. Request service ' 'service to perform the import. Request service '
'%(service)s.') % {'service': backup_service} '%(service)s.') % {'service': backup_service}
self._update_backup_error(backup, err) volume_utils.update_backup_error(backup, err)
raise exception.ServiceNotFound(service_id=backup_service) raise exception.ServiceNotFound(service_id=backup_service)
else: else:
# Yes... # Yes...
@ -888,7 +883,7 @@ class BackupManager(manager.SchedulerDependentManager):
backup_service.import_record(backup, driver_options) backup_service.import_record(backup, driver_options)
except Exception as err: except Exception as err:
msg = six.text_type(err) msg = six.text_type(err)
self._update_backup_error(backup, msg) volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
required_import_options = { required_import_options = {
@ -907,7 +902,7 @@ class BackupManager(manager.SchedulerDependentManager):
msg = (_('Driver successfully decoded imported backup data, ' msg = (_('Driver successfully decoded imported backup data, '
'but there are missing fields (%s).') % 'but there are missing fields (%s).') %
', '.join(missing_opts)) ', '.join(missing_opts))
self._update_backup_error(backup, msg) volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
# Confirm the ID from the record in the DB is the right one # Confirm the ID from the record in the DB is the right one
@ -916,7 +911,7 @@ class BackupManager(manager.SchedulerDependentManager):
msg = (_('Trying to import backup metadata from id %(meta_id)s' msg = (_('Trying to import backup metadata from id %(meta_id)s'
' into backup %(id)s.') % ' into backup %(id)s.') %
{'meta_id': backup_id, 'id': backup.id}) {'meta_id': backup_id, 'id': backup.id})
self._update_backup_error(backup, msg) volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
# Overwrite some fields # Overwrite some fields

View File

@ -154,3 +154,8 @@ class Scheduler(object):
"""Must override schedule method for scheduler to work.""" """Must override schedule method for scheduler to work."""
raise NotImplementedError(_( raise NotImplementedError(_(
"Must implement schedule_get_pools")) "Must implement schedule_get_pools"))
def get_backup_host(self, volume, driver=None):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_(
"Must implement get_backup_host"))

View File

@ -576,3 +576,6 @@ class FilterScheduler(driver.Scheduler):
backend_state = top_backend.obj backend_state = top_backend.obj
LOG.debug("Choosing %s", backend_state.backend_id) LOG.debug("Choosing %s", backend_state.backend_id)
return top_backend return top_backend
def get_backup_host(self, volume, driver=None):
return self.host_manager.get_backup_host(volume, driver)

View File

@ -15,12 +15,8 @@
"""Manage backends in the current zone.""" """Manage backends in the current zone."""
# TODO(smcginnis) update this once six has support for collections.abc import collections
# (https://github.com/benjaminp/six/pull/241) or clean up once we drop py2.7. import random
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
@ -71,7 +67,7 @@ CONF.import_opt('max_over_subscription_ratio', 'cinder.volume.driver')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class ReadOnlyDict(Mapping): class ReadOnlyDict(collections.Mapping):
"""A read-only dict.""" """A read-only dict."""
def __init__(self, source=None): def __init__(self, source=None):
if source is not None: if source is not None:
@ -901,3 +897,74 @@ class HostManager(object):
# If the capability and value are not in the same type, # If the capability and value are not in the same type,
# we just convert them into string to compare them. # we just convert them into string to compare them.
return str(value) == str(capability) return str(value) == str(capability)
def get_backup_host(self, volume, driver=None):
if volume:
volume_host = volume_utils.extract_host(volume.host, 'host')
else:
volume_host = None
az = volume.availability_zone if volume else None
return self._get_available_backup_service_host(volume_host, az, driver)
def _get_any_available_backup_service(self, availability_zone,
driver=None):
"""Get an available backup service host.
Get an available backup service host in the specified
availability zone.
"""
services = [srv for srv in self._list_backup_services(
availability_zone, driver)]
random.shuffle(services)
return services[0] if services else None
def _get_available_backup_service_host(self, host, az, driver=None):
"""Return an appropriate backup service host."""
backup_host = None
if not host or not CONF.backup_use_same_host:
backup_host = self._get_any_available_backup_service(az, driver)
elif self._is_backup_service_enabled(az, host):
backup_host = host
if not backup_host:
raise exception.ServiceNotFound(service_id='cinder-backup')
return backup_host
def _list_backup_services(self, availability_zone, driver=None):
"""List all enabled backup services.
:returns: list -- hosts for services that are enabled for backup.
"""
services = []
def _is_good_service(cap, driver, az):
if driver is None and az is None:
return True
match_driver = cap['driver_name'] == driver if driver else True
if match_driver:
if not az:
return True
return cap['availability_zone'] == az
return False
for backend, capabilities in self.backup_service_states.items():
if capabilities['backend_state']:
if _is_good_service(capabilities, driver, availability_zone):
services.append(backend)
return services
def _az_matched(self, service, availability_zone):
return ((not availability_zone) or
service.availability_zone == availability_zone)
def _is_backup_service_enabled(self, availability_zone, host):
"""Check if there is a backup service available."""
topic = constants.BACKUP_TOPIC
ctxt = cinder_context.get_admin_context()
services = objects.ServiceList.get_all_by_topic(
ctxt, topic, disabled=False)
for srv in services:
if (self._az_matched(srv, availability_zone) and
srv.host == host and srv.is_up):
return True
return False

View File

@ -49,6 +49,7 @@ from cinder import rpc
from cinder.scheduler.flows import create_volume from cinder.scheduler.flows import create_volume
from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_utils as vol_utils
scheduler_manager_opts = [ scheduler_manager_opts = [
@ -626,3 +627,15 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
LOG.info('Cleanup requests completed.') LOG.info('Cleanup requests completed.')
return requested, not_requested return requested, not_requested
def create_backup(self, context, backup):
volume = self.db.volume_get(context, backup.volume_id)
try:
host = self.driver.get_backup_host(volume)
backup.host = host
backup.save()
self.backup_api.create_backup(context, backup)
except exception.ServiceNotFound:
msg = "Service not found for creating backup."
LOG.error(msg)
vol_utils.update_backup_error(backup, msg)

View File

@ -72,9 +72,10 @@ class SchedulerAPI(rpc.RPCAPI):
3.9 - Adds create_snapshot method 3.9 - Adds create_snapshot method
3.10 - Adds backup_id to create_volume method. 3.10 - Adds backup_id to create_volume method.
3.11 - Adds manage_existing_snapshot method. 3.11 - Adds manage_existing_snapshot method.
3.12 - Adds create_backup method.
""" """
RPC_API_VERSION = '3.11' RPC_API_VERSION = '3.12'
RPC_DEFAULT_VERSION = '3.0' RPC_DEFAULT_VERSION = '3.0'
TOPIC = constants.SCHEDULER_TOPIC TOPIC = constants.SCHEDULER_TOPIC
BINARY = 'cinder-scheduler' BINARY = 'cinder-scheduler'
@ -260,3 +261,9 @@ class SchedulerAPI(rpc.RPCAPI):
def get_log_levels(self, context, service, log_request): def get_log_levels(self, context, service, log_request):
cctxt = self._get_cctxt(server=service.host, version='3.7') cctxt = self._get_cctxt(server=service.host, version='3.7')
return cctxt.call(context, 'get_log_levels', log_request=log_request) return cctxt.call(context, 'get_log_levels', log_request=log_request)
@rpc.assert_min_rpc_version('3.12')
def create_backup(self, ctxt, backup):
cctxt = self._get_cctxt()
msg_args = {'backup': backup}
return cctxt.cast(ctxt, 'create_backup', **msg_args)

View File

@ -521,13 +521,7 @@ class BackupsAPITestCase(test.TestCase):
fake_auth_context=self.user_context)) fake_auth_context=self.user_context))
self.assertEqual(http_client.BAD_REQUEST, res.status_int) self.assertEqual(http_client.BAD_REQUEST, res.status_int)
@mock.patch('cinder.db.service_get_all') def test_create_backup_json(self):
def test_create_backup_json(self, _mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
volume = utils.create_volume(self.context, size=5) volume = utils.create_volume(self.context, size=5)
body = {"backup": {"name": "nightly001", body = {"backup": {"name": "nightly001",
@ -548,9 +542,6 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int) self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup']) self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
volume.destroy() volume.destroy()
@ -661,11 +652,6 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(202, res.status_code) self.assertEqual(202, res.status_code)
res_dict = jsonutils.loads(res.body)
backup = objects.Backup.get_by_id(self.context,
res_dict['backup']['id'])
self.assertEqual(backup_svc_az, backup.availability_zone)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
def test_create_backup_inuse_no_force(self, def test_create_backup_inuse_no_force(self,
_mock_service_get_all): _mock_service_get_all):
@ -699,13 +685,7 @@ class BackupsAPITestCase(test.TestCase):
volume.destroy() volume.destroy()
@mock.patch('cinder.db.service_get_all') def test_create_backup_inuse_force(self):
def test_create_backup_inuse_force(self, _mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
volume = utils.create_volume(self.context, size=5, status='in-use') volume = utils.create_volume(self.context, size=5, status='in-use')
backup = utils.create_backup(self.context, volume.id, backup = utils.create_backup(self.context, volume.id,
status=fields.BackupStatus.AVAILABLE, status=fields.BackupStatus.AVAILABLE,
@ -730,20 +710,11 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int) self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup']) self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
backup.destroy() backup.destroy()
volume.destroy() volume.destroy()
@mock.patch('cinder.db.service_get_all') def test_create_backup_snapshot_json(self):
def test_create_backup_snapshot_json(self, _mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
volume = utils.create_volume(self.context, size=5, status='available') volume = utils.create_volume(self.context, size=5, status='available')
body = {"backup": {"name": "nightly001", body = {"backup": {"name": "nightly001",
@ -763,9 +734,6 @@ class BackupsAPITestCase(test.TestCase):
res_dict = jsonutils.loads(res.body) res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.ACCEPTED, res.status_int) self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup']) self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
volume.destroy() volume.destroy()
@ -868,15 +836,8 @@ class BackupsAPITestCase(test.TestCase):
req, req,
body=body) body=body)
@mock.patch('cinder.db.service_get_all')
@ddt.data(False, True) @ddt.data(False, True)
def test_create_backup_delta(self, backup_from_snapshot, def test_create_backup_delta(self, backup_from_snapshot):
_mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
volume = utils.create_volume(self.context, size=5) volume = utils.create_volume(self.context, size=5)
snapshot = None snapshot = None
if backup_from_snapshot: if backup_from_snapshot:
@ -918,9 +879,6 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int) self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup']) self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
backup.destroy() backup.destroy()
if snapshot: if snapshot:
@ -1091,14 +1049,7 @@ class BackupsAPITestCase(test.TestCase):
res = req.get_response(fakes.wsgi_app( res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context)) fake_auth_context=self.user_context))
res_dict = jsonutils.loads(res.body) self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertEqual(http_client.SERVICE_UNAVAILABLE, res.status_int)
self.assertEqual(http_client.SERVICE_UNAVAILABLE,
res_dict['serviceUnavailable']['code'])
self.assertEqual('Service cinder-backup could not be found.',
res_dict['serviceUnavailable']['message'])
volume.refresh()
self.assertEqual('available', volume.status) self.assertEqual('available', volume.status)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@ -1135,13 +1086,7 @@ class BackupsAPITestCase(test.TestCase):
volume.destroy() volume.destroy()
@mock.patch('cinder.db.service_get_all') def test_create_backup_with_null_validate(self):
def test_create_backup_with_null_validate(self, _mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
volume = utils.create_volume(self.context, size=5) volume = utils.create_volume(self.context, size=5)
body = {"backup": {"name": None, body = {"backup": {"name": None,
@ -1162,9 +1107,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int) self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup']) self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
volume.destroy() volume.destroy()
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@ -1194,9 +1137,6 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int) self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup']) self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
volume.destroy() volume.destroy()
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@ -1471,6 +1411,7 @@ class BackupsAPITestCase(test.TestCase):
req = webob.Request.blank('/v2/%s/backups/%s' % ( req = webob.Request.blank('/v2/%s/backups/%s' % (
fake.PROJECT_ID, backup.id)) fake.PROJECT_ID, backup.id))
req.method = 'DELETE' req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json' req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app( res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context)) fake_auth_context=self.user_context))

View File

@ -1905,20 +1905,6 @@ class BackupAPITestCase(BaseBackupTest):
volume_id=volume_id, volume_id=volume_id,
container='volumebackups') container='volumebackups')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
def test_create_backup_in_same_host(self, mock_is_enable,
mock_create):
self.override_config('backup_use_same_host', True)
mock_is_enable.return_value = True
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
volume_id = self._create_volume_db_entry(status='available',
host='testhost#lvm',
size=1)
backup = self.api.create(self.ctxt, None, None, volume_id, None)
self.assertEqual('testhost', backup.host)
@mock.patch.object(api.API, '_get_available_backup_service_host', @mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host') return_value='fake_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')

View File

@ -24,6 +24,7 @@ from cinder import exception
from cinder import objects from cinder import objects
from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import test from cinder import test
from cinder.tests.unit.backup import fake_backup
from cinder.tests.unit import fake_constants from cinder.tests.unit import fake_constants
from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume from cinder.tests.unit import fake_volume
@ -44,6 +45,7 @@ class SchedulerRPCAPITestCase(test.RPCAPITestCase):
self.fake_rs_obj = objects.RequestSpec.from_primitives({}) self.fake_rs_obj = objects.RequestSpec.from_primitives({})
self.fake_rs_dict = {'volume_id': self.volume_id} self.fake_rs_dict = {'volume_id': self.volume_id}
self.fake_fp_dict = {'availability_zone': 'fake_az'} self.fake_fp_dict = {'availability_zone': 'fake_az'}
self.fake_backup_dict = fake_backup.fake_backup_obj(self.context)
@ddt.data('3.0', '3.3') @ddt.data('3.0', '3.3')
@mock.patch('oslo_messaging.RPCClient.can_send_version') @mock.patch('oslo_messaging.RPCClient.can_send_version')
@ -299,3 +301,11 @@ class SchedulerRPCAPITestCase(test.RPCAPITestCase):
service=service, service=service,
log_request='log_request', log_request='log_request',
version='3.7') version='3.7')
@mock.patch('oslo_messaging.RPCClient.can_send_version')
def test_create_backup(self, can_send_version):
self._test_rpc_api('create_backup',
rpc_method='cast',
backup=self.fake_backup_dict)
can_send_version.assert_called_once_with('3.12')

View File

@ -31,6 +31,7 @@ from cinder import objects
from cinder.scheduler import driver from cinder.scheduler import driver
from cinder.scheduler import manager from cinder.scheduler import manager
from cinder import test from cinder import test
from cinder.tests.unit.backup import fake_backup
from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume from cinder.tests.unit import fake_volume
from cinder.tests.unit.scheduler import fakes as fake_scheduler from cinder.tests.unit.scheduler import fakes as fake_scheduler
@ -583,6 +584,42 @@ class SchedulerManagerTestCase(test.TestCase):
self.assertEqual(1, vol_clean_mock.call_count) self.assertEqual(1, vol_clean_mock.call_count)
self.assertEqual(1, sch_clean_mock.call_count) self.assertEqual(1, sch_clean_mock.call_count)
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@mock.patch('cinder.objects.backup.Backup.save')
@mock.patch('cinder.scheduler.driver.Scheduler.get_backup_host')
@mock.patch('cinder.db.volume_get')
def test_create_backup(self, mock_volume_get, mock_host, mock_save,
mock_create):
volume = fake_volume.fake_db_volume()
mock_volume_get.return_value = volume
mock_host.return_value = 'cinder-backup'
backup = fake_backup.fake_backup_obj(self.context)
self.manager.create_backup(self.context, backup=backup)
mock_save.assert_called_once()
mock_host.assert_called_once_with(volume)
mock_volume_get.assert_called_once_with(self.context, backup.volume_id)
mock_create.assert_called_once_with(self.context, backup)
@mock.patch('cinder.volume.volume_utils.update_backup_error')
@mock.patch('cinder.scheduler.driver.Scheduler.get_backup_host')
@mock.patch('cinder.db.volume_get')
def test_create_backup_no_service(self, mock_volume_get, mock_host,
mock_error):
volume = fake_volume.fake_db_volume()
mock_volume_get.return_value = volume
mock_host.side_effect = exception.ServiceNotFound(
service_id='cinder-volume')
backup = fake_backup.fake_backup_obj(self.context)
self.manager.create_backup(self.context, backup=backup)
mock_host.assert_called_once_with(volume)
mock_volume_get.assert_called_once_with(self.context, backup.volume_id)
mock_error.assert_called_once_with(
backup, 'Service not found for creating backup.')
class SchedulerTestCase(test.TestCase): class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class.""" """Test case for base scheduler driver class."""

View File

@ -52,6 +52,7 @@ from cinder import db
from cinder import exception from cinder import exception
from cinder.i18n import _ from cinder.i18n import _
from cinder import objects from cinder import objects
from cinder.objects import fields
from cinder import rpc from cinder import rpc
from cinder import utils from cinder import utils
from cinder.volume import group_types from cinder.volume import group_types
@ -1245,3 +1246,9 @@ def resolve_hostname(hostname):
LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.', LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.',
{'host': hostname, 'ip': ip}) {'host': hostname, 'ip': ip})
return ip return ip
def update_backup_error(backup, err, status=fields.BackupStatus.ERROR):
backup.status = status
backup.fail_reason = err
backup.save()