Browse Source

Create backups via scheduler

Now all backup-create operations will be processed via scheduler as we do
for volume. Backup-specific filters will be added in a follow-up patch.

Related blueprint: backup-host-selection-algorigthm

Change-Id: Ie2afb57c4861c41982612e6054767cef43fdb867
changes/05/630305/14
Ivan Kolodyazhny 3 years ago
committed by Jay Bryant
parent
commit
f0211b53b8
  1. 14
      cinder/backup/api.py
  2. 37
      cinder/backup/manager.py
  3. 5
      cinder/scheduler/driver.py
  4. 3
      cinder/scheduler/filter_scheduler.py
  5. 81
      cinder/scheduler/host_manager.py
  6. 13
      cinder/scheduler/manager.py
  7. 9
      cinder/scheduler/rpcapi.py
  8. 75
      cinder/tests/unit/api/contrib/test_backups.py
  9. 14
      cinder/tests/unit/backup/test_backup.py
  10. 10
      cinder/tests/unit/scheduler/test_rpcapi.py
  11. 37
      cinder/tests/unit/scheduler/test_scheduler.py
  12. 7
      cinder/volume/volume_utils.py

14
cinder/backup/api.py

@ -40,8 +40,8 @@ from cinder.policies import backups as policy
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
import cinder.volume
from cinder.volume import volume_utils
backup_opts = [
cfg.BoolOpt('backup_use_same_host',
@ -61,6 +61,7 @@ class API(base.Base):
def __init__(self, db=None):
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_api = cinder.volume.API()
super(API, self).__init__(db)
@ -227,10 +228,6 @@ class API(base.Base):
raise exception.InvalidVolume(reason=msg)
previous_status = volume['status']
volume_host = volume_utils.extract_host(volume.host, 'host')
availability_zone = availability_zone or volume.availability_zone
host = self._get_available_backup_service_host(volume_host,
availability_zone)
# Reserve a quota before setting volume status and backup status
try:
@ -313,8 +310,6 @@ class API(base.Base):
'container': container,
'parent_id': parent_id,
'size': volume['size'],
'host': host,
'availability_zone': availability_zone,
'snapshot_id': snapshot_id,
'data_timestamp': data_timestamp,
'parent': parent,
@ -334,10 +329,7 @@ class API(base.Base):
finally:
QUOTAS.rollback(context, reservations)
# TODO(DuncanT): In future, when we have a generic local attach,
# this can go via the scheduler, which enables
# better load balancing and isolation of services
self.backup_rpcapi.create_backup(context, backup)
self.scheduler_rpcapi.create_backup(context, backup)
return backup

37
cinder/backup/manager.py

@ -136,12 +136,6 @@ class BackupManager(manager.SchedulerDependentManager):
self.driver_name = new_name
self.service = importutils.import_class(self.driver_name)
def _update_backup_error(self, backup, err,
status=fields.BackupStatus.ERROR):
backup.status = status
backup.fail_reason = err
backup.save()
def init_host(self, **kwargs):
"""Run initialization needed for a standalone service."""
ctxt = context.get_admin_context()
@ -239,7 +233,7 @@ class BackupManager(manager.SchedulerDependentManager):
self._cleanup_one_volume(ctxt, volume)
err = 'incomplete backup reset on manager restart'
self._update_backup_error(backup, err)
volume_utils.update_backup_error(backup, err)
elif backup['status'] == fields.BackupStatus.RESTORING:
LOG.info('Resetting backup %s to '
'available (was restoring).',
@ -369,7 +363,7 @@ class BackupManager(manager.SchedulerDependentManager):
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, err)
volume_utils.update_backup_error(backup, err)
raise exception.InvalidSnapshot(reason=err)
else:
actual_status = volume['status']
@ -379,7 +373,7 @@ class BackupManager(manager.SchedulerDependentManager):
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, err)
volume_utils.update_backup_error(backup, err)
raise exception.InvalidVolume(reason=err)
expected_status = fields.BackupStatus.CREATING
@ -390,13 +384,13 @@ class BackupManager(manager.SchedulerDependentManager):
'expected_status': expected_status,
'actual_status': actual_status,
}
self._update_backup_error(backup, err)
volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err)
try:
if not self.is_working():
err = _('Create backup aborted due to backup service is down.')
self._update_backup_error(backup, err)
volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err)
backup.service = self.driver_name
@ -412,7 +406,7 @@ class BackupManager(manager.SchedulerDependentManager):
context, volume_id,
{'status': previous_status,
'previous_status': 'error_backing-up'})
self._update_backup_error(backup, six.text_type(err))
volume_utils.update_backup_error(backup, six.text_type(err))
# Restore the original status.
if snapshot_id:
@ -559,7 +553,7 @@ class BackupManager(manager.SchedulerDependentManager):
'%(expected_status)s but got %(actual_status)s.') %
{'expected_status': expected_status,
'actual_status': actual_status})
self._update_backup_error(backup, err)
volume_utils.update_backup_error(backup, err)
self.db.volume_update(context, volume_id,
{'status': fields.VolumeStatus.ERROR})
raise exception.InvalidBackup(reason=err)
@ -718,13 +712,13 @@ class BackupManager(manager.SchedulerDependentManager):
'%(expected_status)s but got %(actual_status)s.') \
% {'expected_status': expected_status,
'actual_status': actual_status}
self._update_backup_error(backup, err)
volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err)
if backup.service and not self.is_working():
err = _('Delete backup is aborted due to backup service is down.')
status = fields.BackupStatus.ERROR_DELETING
self._update_backup_error(backup, err, status)
volume_utils.update_backup_error(backup, err, status)
raise exception.InvalidBackup(reason=err)
if not self._is_our_backup(backup):
@ -734,7 +728,7 @@ class BackupManager(manager.SchedulerDependentManager):
' backup [%(backup_service)s].')\
% {'configured_service': self.driver_name,
'backup_service': backup.service}
self._update_backup_error(backup, err)
volume_utils.update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err)
if backup.service:
@ -743,7 +737,8 @@ class BackupManager(manager.SchedulerDependentManager):
backup_service.delete_backup(backup)
except Exception as err:
with excutils.save_and_reraise_exception():
self._update_backup_error(backup, six.text_type(err))
volume_utils.update_backup_error(backup,
six.text_type(err))
# Get reservations
try:
@ -874,7 +869,7 @@ class BackupManager(manager.SchedulerDependentManager):
err = _('Import record failed, cannot find backup '
'service to perform the import. Request service '
'%(service)s.') % {'service': backup_service}
self._update_backup_error(backup, err)
volume_utils.update_backup_error(backup, err)
raise exception.ServiceNotFound(service_id=backup_service)
else:
# Yes...
@ -888,7 +883,7 @@ class BackupManager(manager.SchedulerDependentManager):
backup_service.import_record(backup, driver_options)
except Exception as err:
msg = six.text_type(err)
self._update_backup_error(backup, msg)
volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg)
required_import_options = {
@ -907,7 +902,7 @@ class BackupManager(manager.SchedulerDependentManager):
msg = (_('Driver successfully decoded imported backup data, '
'but there are missing fields (%s).') %
', '.join(missing_opts))
self._update_backup_error(backup, msg)
volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg)
# Confirm the ID from the record in the DB is the right one
@ -916,7 +911,7 @@ class BackupManager(manager.SchedulerDependentManager):
msg = (_('Trying to import backup metadata from id %(meta_id)s'
' into backup %(id)s.') %
{'meta_id': backup_id, 'id': backup.id})
self._update_backup_error(backup, msg)
volume_utils.update_backup_error(backup, msg)
raise exception.InvalidBackup(reason=msg)
# Overwrite some fields

5
cinder/scheduler/driver.py

@ -154,3 +154,8 @@ class Scheduler(object):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_(
"Must implement schedule_get_pools"))
def get_backup_host(self, volume, driver=None):
"""Must override schedule method for scheduler to work."""
raise NotImplementedError(_(
"Must implement get_backup_host"))

3
cinder/scheduler/filter_scheduler.py

@ -576,3 +576,6 @@ class FilterScheduler(driver.Scheduler):
backend_state = top_backend.obj
LOG.debug("Choosing %s", backend_state.backend_id)
return top_backend
def get_backup_host(self, volume, driver=None):
return self.host_manager.get_backup_host(volume, driver)

81
cinder/scheduler/host_manager.py

@ -15,12 +15,8 @@
"""Manage backends in the current zone."""
# TODO(smcginnis) update this once six has support for collections.abc
# (https://github.com/benjaminp/six/pull/241) or clean up once we drop py2.7.
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import collections
import random
from oslo_config import cfg
from oslo_log import log as logging
@ -71,7 +67,7 @@ CONF.import_opt('max_over_subscription_ratio', 'cinder.volume.driver')
LOG = logging.getLogger(__name__)
class ReadOnlyDict(Mapping):
class ReadOnlyDict(collections.Mapping):
"""A read-only dict."""
def __init__(self, source=None):
if source is not None:
@ -901,3 +897,74 @@ class HostManager(object):
# If the capability and value are not in the same type,
# we just convert them into string to compare them.
return str(value) == str(capability)
def get_backup_host(self, volume, driver=None):
if volume:
volume_host = volume_utils.extract_host(volume.host, 'host')
else:
volume_host = None
az = volume.availability_zone if volume else None
return self._get_available_backup_service_host(volume_host, az, driver)
def _get_any_available_backup_service(self, availability_zone,
driver=None):
"""Get an available backup service host.
Get an available backup service host in the specified
availability zone.
"""
services = [srv for srv in self._list_backup_services(
availability_zone, driver)]
random.shuffle(services)
return services[0] if services else None
def _get_available_backup_service_host(self, host, az, driver=None):
"""Return an appropriate backup service host."""
backup_host = None
if not host or not CONF.backup_use_same_host:
backup_host = self._get_any_available_backup_service(az, driver)
elif self._is_backup_service_enabled(az, host):
backup_host = host
if not backup_host:
raise exception.ServiceNotFound(service_id='cinder-backup')
return backup_host
def _list_backup_services(self, availability_zone, driver=None):
"""List all enabled backup services.
:returns: list -- hosts for services that are enabled for backup.
"""
services = []
def _is_good_service(cap, driver, az):
if driver is None and az is None:
return True
match_driver = cap['driver_name'] == driver if driver else True
if match_driver:
if not az:
return True
return cap['availability_zone'] == az
return False
for backend, capabilities in self.backup_service_states.items():
if capabilities['backend_state']:
if _is_good_service(capabilities, driver, availability_zone):
services.append(backend)
return services
def _az_matched(self, service, availability_zone):
return ((not availability_zone) or
service.availability_zone == availability_zone)
def _is_backup_service_enabled(self, availability_zone, host):
"""Check if there is a backup service available."""
topic = constants.BACKUP_TOPIC
ctxt = cinder_context.get_admin_context()
services = objects.ServiceList.get_all_by_topic(
ctxt, topic, disabled=False)
for srv in services:
if (self._az_matched(srv, availability_zone) and
srv.host == host and srv.is_up):
return True
return False

13
cinder/scheduler/manager.py

@ -49,6 +49,7 @@ from cinder import rpc
from cinder.scheduler.flows import create_volume
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_utils as vol_utils
scheduler_manager_opts = [
@ -626,3 +627,15 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
LOG.info('Cleanup requests completed.')
return requested, not_requested
def create_backup(self, context, backup):
volume = self.db.volume_get(context, backup.volume_id)
try:
host = self.driver.get_backup_host(volume)
backup.host = host
backup.save()
self.backup_api.create_backup(context, backup)
except exception.ServiceNotFound:
msg = "Service not found for creating backup."
LOG.error(msg)
vol_utils.update_backup_error(backup, msg)

9
cinder/scheduler/rpcapi.py

@ -72,9 +72,10 @@ class SchedulerAPI(rpc.RPCAPI):
3.9 - Adds create_snapshot method
3.10 - Adds backup_id to create_volume method.
3.11 - Adds manage_existing_snapshot method.
3.12 - Adds create_backup method.
"""
RPC_API_VERSION = '3.11'
RPC_API_VERSION = '3.12'
RPC_DEFAULT_VERSION = '3.0'
TOPIC = constants.SCHEDULER_TOPIC
BINARY = 'cinder-scheduler'
@ -260,3 +261,9 @@ class SchedulerAPI(rpc.RPCAPI):
def get_log_levels(self, context, service, log_request):
cctxt = self._get_cctxt(server=service.host, version='3.7')
return cctxt.call(context, 'get_log_levels', log_request=log_request)
@rpc.assert_min_rpc_version('3.12')
def create_backup(self, ctxt, backup):
cctxt = self._get_cctxt()
msg_args = {'backup': backup}
return cctxt.cast(ctxt, 'create_backup', **msg_args)

75
cinder/tests/unit/api/contrib/test_backups.py

@ -521,13 +521,7 @@ class BackupsAPITestCase(test.TestCase):
fake_auth_context=self.user_context))
self.assertEqual(http_client.BAD_REQUEST, res.status_int)
@mock.patch('cinder.db.service_get_all')
def test_create_backup_json(self, _mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
def test_create_backup_json(self):
volume = utils.create_volume(self.context, size=5)
body = {"backup": {"name": "nightly001",
@ -548,9 +542,6 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
volume.destroy()
@ -661,11 +652,6 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(202, res.status_code)
res_dict = jsonutils.loads(res.body)
backup = objects.Backup.get_by_id(self.context,
res_dict['backup']['id'])
self.assertEqual(backup_svc_az, backup.availability_zone)
@mock.patch('cinder.db.service_get_all')
def test_create_backup_inuse_no_force(self,
_mock_service_get_all):
@ -699,13 +685,7 @@ class BackupsAPITestCase(test.TestCase):
volume.destroy()
@mock.patch('cinder.db.service_get_all')
def test_create_backup_inuse_force(self, _mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
def test_create_backup_inuse_force(self):
volume = utils.create_volume(self.context, size=5, status='in-use')
backup = utils.create_backup(self.context, volume.id,
status=fields.BackupStatus.AVAILABLE,
@ -730,20 +710,11 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
backup.destroy()
volume.destroy()
@mock.patch('cinder.db.service_get_all')
def test_create_backup_snapshot_json(self, _mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
def test_create_backup_snapshot_json(self):
volume = utils.create_volume(self.context, size=5, status='available')
body = {"backup": {"name": "nightly001",
@ -763,9 +734,6 @@ class BackupsAPITestCase(test.TestCase):
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
volume.destroy()
@ -868,15 +836,8 @@ class BackupsAPITestCase(test.TestCase):
req,
body=body)
@mock.patch('cinder.db.service_get_all')
@ddt.data(False, True)
def test_create_backup_delta(self, backup_from_snapshot,
_mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
def test_create_backup_delta(self, backup_from_snapshot):
volume = utils.create_volume(self.context, size=5)
snapshot = None
if backup_from_snapshot:
@ -918,9 +879,6 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
backup.destroy()
if snapshot:
@ -1091,14 +1049,7 @@ class BackupsAPITestCase(test.TestCase):
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.SERVICE_UNAVAILABLE, res.status_int)
self.assertEqual(http_client.SERVICE_UNAVAILABLE,
res_dict['serviceUnavailable']['code'])
self.assertEqual('Service cinder-backup could not be found.',
res_dict['serviceUnavailable']['message'])
volume.refresh()
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertEqual('available', volume.status)
@mock.patch('cinder.db.service_get_all')
@ -1135,13 +1086,7 @@ class BackupsAPITestCase(test.TestCase):
volume.destroy()
@mock.patch('cinder.db.service_get_all')
def test_create_backup_with_null_validate(self, _mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow(),
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}]
def test_create_backup_with_null_validate(self):
volume = utils.create_volume(self.context, size=5)
body = {"backup": {"name": None,
@ -1162,9 +1107,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
volume.destroy()
@mock.patch('cinder.db.service_get_all')
@ -1194,9 +1137,6 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(http_client.ACCEPTED, res.status_int)
self.assertIn('id', res_dict['backup'])
_mock_service_get_all.assert_called_once_with(mock.ANY,
disabled=False,
topic='cinder-backup')
volume.destroy()
@mock.patch('cinder.db.service_get_all')
@ -1471,6 +1411,7 @@ class BackupsAPITestCase(test.TestCase):
req = webob.Request.blank('/v2/%s/backups/%s' % (
fake.PROJECT_ID, backup.id))
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context))

14
cinder/tests/unit/backup/test_backup.py

@ -1905,20 +1905,6 @@ class BackupAPITestCase(BaseBackupTest):
volume_id=volume_id,
container='volumebackups')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@mock.patch('cinder.backup.api.API._is_backup_service_enabled')
def test_create_backup_in_same_host(self, mock_is_enable,
mock_create):
self.override_config('backup_use_same_host', True)
mock_is_enable.return_value = True
self.ctxt.user_id = 'fake_user'
self.ctxt.project_id = 'fake_project'
volume_id = self._create_volume_db_entry(status='available',
host='testhost#lvm',
size=1)
backup = self.api.create(self.ctxt, None, None, volume_id, None)
self.assertEqual('testhost', backup.host)
@mock.patch.object(api.API, '_get_available_backup_service_host',
return_value='fake_host')
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')

10
cinder/tests/unit/scheduler/test_rpcapi.py

@ -24,6 +24,7 @@ from cinder import exception
from cinder import objects
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import test
from cinder.tests.unit.backup import fake_backup
from cinder.tests.unit import fake_constants
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
@ -44,6 +45,7 @@ class SchedulerRPCAPITestCase(test.RPCAPITestCase):
self.fake_rs_obj = objects.RequestSpec.from_primitives({})
self.fake_rs_dict = {'volume_id': self.volume_id}
self.fake_fp_dict = {'availability_zone': 'fake_az'}
self.fake_backup_dict = fake_backup.fake_backup_obj(self.context)
@ddt.data('3.0', '3.3')
@mock.patch('oslo_messaging.RPCClient.can_send_version')
@ -299,3 +301,11 @@ class SchedulerRPCAPITestCase(test.RPCAPITestCase):
service=service,
log_request='log_request',
version='3.7')
@mock.patch('oslo_messaging.RPCClient.can_send_version')
def test_create_backup(self, can_send_version):
self._test_rpc_api('create_backup',
rpc_method='cast',
backup=self.fake_backup_dict)
can_send_version.assert_called_once_with('3.12')

37
cinder/tests/unit/scheduler/test_scheduler.py

@ -31,6 +31,7 @@ from cinder import objects
from cinder.scheduler import driver
from cinder.scheduler import manager
from cinder import test
from cinder.tests.unit.backup import fake_backup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit.scheduler import fakes as fake_scheduler
@ -583,6 +584,42 @@ class SchedulerManagerTestCase(test.TestCase):
self.assertEqual(1, vol_clean_mock.call_count)
self.assertEqual(1, sch_clean_mock.call_count)
@mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup')
@mock.patch('cinder.objects.backup.Backup.save')
@mock.patch('cinder.scheduler.driver.Scheduler.get_backup_host')
@mock.patch('cinder.db.volume_get')
def test_create_backup(self, mock_volume_get, mock_host, mock_save,
mock_create):
volume = fake_volume.fake_db_volume()
mock_volume_get.return_value = volume
mock_host.return_value = 'cinder-backup'
backup = fake_backup.fake_backup_obj(self.context)
self.manager.create_backup(self.context, backup=backup)
mock_save.assert_called_once()
mock_host.assert_called_once_with(volume)
mock_volume_get.assert_called_once_with(self.context, backup.volume_id)
mock_create.assert_called_once_with(self.context, backup)
@mock.patch('cinder.volume.volume_utils.update_backup_error')
@mock.patch('cinder.scheduler.driver.Scheduler.get_backup_host')
@mock.patch('cinder.db.volume_get')
def test_create_backup_no_service(self, mock_volume_get, mock_host,
mock_error):
volume = fake_volume.fake_db_volume()
mock_volume_get.return_value = volume
mock_host.side_effect = exception.ServiceNotFound(
service_id='cinder-volume')
backup = fake_backup.fake_backup_obj(self.context)
self.manager.create_backup(self.context, backup=backup)
mock_host.assert_called_once_with(volume)
mock_volume_get.assert_called_once_with(self.context, backup.volume_id)
mock_error.assert_called_once_with(
backup, 'Service not found for creating backup.')
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""

7
cinder/volume/volume_utils.py

@ -52,6 +52,7 @@ from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import rpc
from cinder import utils
from cinder.volume import group_types
@ -1245,3 +1246,9 @@ def resolve_hostname(hostname):
LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.',
{'host': hostname, 'ip': ip})
return ip
def update_backup_error(backup, err, status=fields.BackupStatus.ERROR):
backup.status = status
backup.fail_reason = err
backup.save()
Loading…
Cancel
Save