Merge "Add cinder backup service initialize check"

This commit is contained in:
Jenkins 2017-07-25 22:42:57 +00:00 committed by Gerrit Code Review
commit 2242b9ea8f
14 changed files with 164 additions and 61 deletions

View File

@ -165,7 +165,7 @@ class BackupsController(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=error.msg) raise exc.HTTPBadRequest(explanation=error.msg)
# Other not found exceptions will be handled at the wsgi level # Other not found exceptions will be handled at the wsgi level
except exception.ServiceNotFound as error: except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg) raise exc.HTTPServiceUnavailable(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup)) retval = self._view_builder.summary(req, dict(new_backup))
return retval return retval
@ -246,7 +246,7 @@ class BackupsController(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=error.msg) raise exc.HTTPBadRequest(explanation=error.msg)
# Other Not found exceptions will be handled at the wsgi level # Other Not found exceptions will be handled at the wsgi level
except exception.ServiceNotFound as error: except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg) raise exc.HTTPServiceUnavailable(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup)) retval = self._view_builder.summary(req, dict(new_backup))
LOG.debug('Import record output: %s.', retval) LOG.debug('Import record output: %s.', retval)

View File

@ -410,6 +410,10 @@ class BackupDriver(base.Base):
""" """
return return
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed."""
return
@six.add_metaclass(abc.ABCMeta) @six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver): class BackupDriverWithVerify(BackupDriver):

View File

@ -267,6 +267,24 @@ class CephBackupDriver(driver.BackupDriver):
return (old_format, features) return (old_format, features)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if rados is None or rbd is None:
msg = _('rados and rbd python libraries not found')
raise exception.BackupDriverException(message=msg)
for attr in ['backup_ceph_user', 'backup_ceph_pool',
'backup_ceph_conf']:
val = getattr(CONF, attr)
if not val:
raise exception.InvalidConfigurationValue(option=attr,
value=val)
# NOTE: Checking connection to ceph
# RADOSClient __init__ method invokes _connect_to_rados
# so no need to check for self.rados.Error here.
with rbd_driver.RADOSClient(self, self._ceph_backup_pool):
pass
def _connect_to_rados(self, pool=None): def _connect_to_rados(self, pool=None):
"""Establish connection to the backup Ceph cluster.""" """Establish connection to the backup Ceph cluster."""
client = self.rados.Rados(rados_id=self._ceph_backup_user, client = self.rados.Rados(rados_id=self._ceph_backup_user,

View File

@ -47,7 +47,6 @@ class GlusterfsBackupDriver(posix.PosixBackupDriver):
"""Provides backup, restore and delete using GlusterFS repository.""" """Provides backup, restore and delete using GlusterFS repository."""
def __init__(self, context, db=None): def __init__(self, context, db=None):
self._check_configuration()
self.backup_mount_point_base = CONF.glusterfs_backup_mount_point self.backup_mount_point_base = CONF.glusterfs_backup_mount_point
self.backup_share = CONF.glusterfs_backup_share self.backup_share = CONF.glusterfs_backup_share
self._execute = putils.execute self._execute = putils.execute
@ -56,13 +55,14 @@ class GlusterfsBackupDriver(posix.PosixBackupDriver):
super(GlusterfsBackupDriver, self).__init__(context, super(GlusterfsBackupDriver, self).__init__(context,
backup_path=backup_path) backup_path=backup_path)
@staticmethod def check_for_setup_error(self):
def _check_configuration():
"""Raises error if any required configuration flag is missing.""" """Raises error if any required configuration flag is missing."""
required_flags = ['glusterfs_backup_share'] required_flags = ['glusterfs_backup_share']
for flag in required_flags: for flag in required_flags:
if not getattr(CONF, flag, None): val = getattr(CONF, flag, None)
raise exception.ConfigNotFound(path=flag) if not val:
raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _init_backup_repo_path(self): def _init_backup_repo_path(self):
remotefsclient = remotefs_brick.RemoteFsClient( remotefsclient = remotefs_brick.RemoteFsClient(

View File

@ -120,7 +120,6 @@ class GoogleBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete of backup objects within GCS.""" """Provides backup, restore and delete of backup objects within GCS."""
def __init__(self, context, db=None): def __init__(self, context, db=None):
self.check_gcs_options()
backup_bucket = CONF.backup_gcs_bucket backup_bucket = CONF.backup_gcs_bucket
backup_credential = CONF.backup_gcs_credential_file backup_credential = CONF.backup_gcs_credential_file
self.gcs_project_id = CONF.backup_gcs_project_id self.gcs_project_id = CONF.backup_gcs_project_id
@ -153,15 +152,14 @@ class GoogleBackupDriver(chunkeddriver.ChunkedBackupDriver):
else: else:
return httplib2.proxy_info_from_environment() return httplib2.proxy_info_from_environment()
def check_gcs_options(self): def check_for_setup_error(self):
required_options = ('backup_gcs_bucket', 'backup_gcs_credential_file', required_options = ('backup_gcs_bucket', 'backup_gcs_credential_file',
'backup_gcs_project_id') 'backup_gcs_project_id')
unset_options = [opt for opt in required_options for opt in required_options:
if not getattr(CONF, opt, None)] val = getattr(CONF, opt, None)
if unset_options: if not val:
msg = _('Unset gcs options: %s') % unset_options raise exception.InvalidConfigurationValue(option=opt,
LOG.error(msg) value=val)
raise exception.InvalidInput(reason=msg)
@gcs_logger @gcs_logger
def put_container(self, bucket): def put_container(self, bucket):

View File

@ -22,7 +22,6 @@ from oslo_log import log as logging
from cinder.backup.drivers import posix from cinder.backup.drivers import posix
from cinder import exception from cinder import exception
from cinder.i18n import _
from cinder import interface from cinder import interface
from cinder import utils from cinder import utils
@ -50,7 +49,6 @@ class NFSBackupDriver(posix.PosixBackupDriver):
"""Provides backup, restore and delete using NFS supplied repository.""" """Provides backup, restore and delete using NFS supplied repository."""
def __init__(self, context, db=None): def __init__(self, context, db=None):
self._check_configuration()
self.backup_mount_point_base = CONF.backup_mount_point_base self.backup_mount_point_base = CONF.backup_mount_point_base
self.backup_share = CONF.backup_share self.backup_share = CONF.backup_share
self.mount_options = CONF.backup_mount_options self.mount_options = CONF.backup_mount_options
@ -59,14 +57,14 @@ class NFSBackupDriver(posix.PosixBackupDriver):
super(NFSBackupDriver, self).__init__(context, super(NFSBackupDriver, self).__init__(context,
backup_path=backup_path) backup_path=backup_path)
@staticmethod def check_for_setup_error(self):
def _check_configuration():
"""Raises error if any required configuration flag is missing.""" """Raises error if any required configuration flag is missing."""
required_flags = ['backup_share'] required_flags = ['backup_share']
for flag in required_flags: for flag in required_flags:
if not getattr(CONF, flag, None): val = getattr(CONF, flag, None)
raise exception.ConfigNotFound(_( if not val:
'Required flag %s is not set') % flag) raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _init_backup_repo_path(self): def _init_backup_repo_path(self):
remotefsclient = remotefs_brick.RemoteFsClient( remotefsclient = remotefs_brick.RemoteFsClient(

View File

@ -153,6 +153,10 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
backup_default_container, backup_default_container,
enable_progress_timer, enable_progress_timer,
db) db)
if context:
self.initialize()
def initialize(self):
self.swift_attempts = CONF.backup_swift_retry_attempts self.swift_attempts = CONF.backup_swift_retry_attempts
self.swift_backoff = CONF.backup_swift_retry_backoff self.swift_backoff = CONF.backup_swift_retry_backoff
self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure
@ -173,7 +177,7 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
"Failed to parse the configuration option " "Failed to parse the configuration option "
"'keystone_catalog_info', must be in the form " "'keystone_catalog_info', must be in the form "
"<service_type>:<service_name>:<endpoint_type>")) "<service_type>:<service_name>:<endpoint_type>"))
for entry in context.service_catalog: for entry in self.context.service_catalog:
if entry.get('type') == service_type: if entry.get('type') == service_type:
# It is assumed that service_types are unique within # It is assumed that service_types are unique within
# the service catalog, so once the correct one is found # the service catalog, so once the correct one is found
@ -223,7 +227,7 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
"Failed to parse the configuration option " "Failed to parse the configuration option "
"'swift_catalog_info', must be in the form " "'swift_catalog_info', must be in the form "
"<service_type>:<service_name>:<endpoint_type>")) "<service_type>:<service_name>:<endpoint_type>"))
for entry in context.service_catalog: for entry in self.context.service_catalog:
if entry.get('type') == service_type: if entry.get('type') == service_type:
# It is assumed that service_types are unique within # It is assumed that service_types are unique within
# the service catalog, so once the correct one is found # the service catalog, so once the correct one is found
@ -233,7 +237,7 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
break break
else: else:
self.swift_url = '%s%s' % (CONF.backup_swift_url, self.swift_url = '%s%s' % (CONF.backup_swift_url,
context.project_id) self.context.project_id)
if self.swift_url is None: if self.swift_url is None:
raise exception.BackupDriverException(_( raise exception.BackupDriverException(_(
"Could not determine which Swift endpoint to use. This " "Could not determine which Swift endpoint to use. This "
@ -367,6 +371,32 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Swift driver does not use any extra metadata.""" """Swift driver does not use any extra metadata."""
return None return None
def check_for_setup_errors(self):
# Here we are trying to connect to swift backend service
# without any additional parameters.
# At the moment of execution we don't have any user data
# After just trying to do easiest operations, that will show
# that we've configured swift backup driver in right way
if not CONF.backup_swift_url:
LOG.warning("We will use endpoints from keystone. It is "
"possible we could have problems because of it.")
return
conn = swift.Connection(retries=CONF.backup_swift_retry_attempts,
preauthurl=CONF.backup_swift_url)
try:
conn.get_capabilities()
# TODO(e0ne) catch less general exception
except Exception:
LOG.exception("Can not get Swift capabilities during backup "
"driver initialization.")
raise
def get_backup_driver(context): def get_backup_driver(context):
# NOTE(mdovgal): at the moment of backup service start we need to
# get driver class instance and for swift at that moment
# we can't get all necessary information like endpoints
# from context, so we have exception as a result.
if context.user is None:
return SwiftBackupDriver(None)
return SwiftBackupDriver(context) return SwiftBackupDriver(context)

View File

@ -271,6 +271,14 @@ class TSMBackupDriver(driver.BackupDriver):
self.tsm_password = CONF.backup_tsm_password self.tsm_password = CONF.backup_tsm_password
self.volume_prefix = CONF.backup_tsm_volume_prefix self.volume_prefix = CONF.backup_tsm_volume_prefix
def check_for_setup_error(self):
required_flags = ['backup_share']
for flag in required_flags:
val = getattr(CONF, flag, None)
if not val:
raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _do_backup(self, backup_path, vol_id, backup_mode): def _do_backup(self, backup_path, vol_id, backup_mode):
"""Perform the actual backup operation. """Perform the actual backup operation.

View File

@ -35,6 +35,8 @@ import os
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import oslo_messaging as messaging import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils from oslo_utils import excutils
from oslo_utils import importutils from oslo_utils import importutils
import six import six
@ -89,10 +91,10 @@ class BackupManager(manager.ThreadPoolManager):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.service = importutils.import_module(self.driver_name) self.service = importutils.import_module(self.driver_name)
self.az = CONF.storage_availability_zone self.az = CONF.storage_availability_zone
self.volume_managers = {}
self.backup_rpcapi = backup_rpcapi.BackupAPI() self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI()
super(BackupManager, self).__init__(*args, **kwargs) super(BackupManager, self).__init__(*args, **kwargs)
self.is_initialized = False
@property @property
def driver_name(self): def driver_name(self):
@ -107,14 +109,16 @@ class BackupManager(manager.ThreadPoolManager):
return mapper[service] return mapper[service]
return service return service
def _update_backup_error(self, backup, err): def _update_backup_error(self, backup, err,
backup.status = fields.BackupStatus.ERROR status=fields.BackupStatus.ERROR):
backup.status = status
backup.fail_reason = err backup.fail_reason = err
backup.save() backup.save()
def init_host(self, **kwargs): def init_host(self, **kwargs):
"""Run initialization needed for a standalone service.""" """Run initialization needed for a standalone service."""
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
self.setup_backup_backend(ctxt)
try: try:
self._cleanup_incomplete_backup_operations(ctxt) self._cleanup_incomplete_backup_operations(ctxt)
@ -122,6 +126,24 @@ class BackupManager(manager.ThreadPoolManager):
# Don't block startup of the backup service. # Don't block startup of the backup service.
LOG.exception("Problem cleaning incomplete backup operations.") LOG.exception("Problem cleaning incomplete backup operations.")
def _setup_backup_driver(self, ctxt):
backup_service = self.service.get_backup_driver(ctxt)
backup_service.check_for_setup_error()
self.is_initialized = True
raise loopingcall.LoopingCallDone()
def setup_backup_backend(self, ctxt):
try:
init_loop = loopingcall.FixedIntervalLoopingCall(
self._setup_backup_driver, ctxt)
init_loop.start(interval=CONF.periodic_interval)
except loopingcall.LoopingCallDone:
LOG.info("Backup driver was successfully initialized.")
except Exception:
LOG.exception("Failed to initialize driver.",
resource={'type': 'driver',
'id': self.__class__.__name__})
def reset(self): def reset(self):
super(BackupManager, self).reset() super(BackupManager, self).reset()
self.backup_rpcapi = backup_rpcapi.BackupAPI() self.backup_rpcapi = backup_rpcapi.BackupAPI()
@ -317,6 +339,10 @@ class BackupManager(manager.ThreadPoolManager):
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
try: try:
if not self.is_working():
err = _('Create backup aborted due to backup service is down')
self._update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err)
self._run_backup(context, backup, volume) self._run_backup(context, backup, volume)
except Exception as err: except Exception as err:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
@ -507,6 +533,12 @@ class BackupManager(manager.ThreadPoolManager):
self._update_backup_error(backup, err) self._update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err) raise exception.InvalidBackup(reason=err)
if not self.is_working():
err = _('Delete backup is aborted due to backup service is down')
status = fields.BackupStatus.ERROR_DELETING
self._update_backup_error(backup, err, status)
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service']) backup_service = self._map_service_to_driver(backup['service'])
if backup_service is not None: if backup_service is not None:
configured_service = self.driver_name configured_service = self.driver_name
@ -908,3 +940,11 @@ class BackupManager(manager.ThreadPoolManager):
rpcapi.terminate_connection_snapshot(ctxt, device, rpcapi.terminate_connection_snapshot(ctxt, device,
properties, force=force) properties, force=force)
rpcapi.remove_export_snapshot(ctxt, device) rpcapi.remove_export_snapshot(ctxt, device)
def is_working(self):
return self.is_initialized
@periodic_task.periodic_task(spacing=CONF.periodic_interval)
def _report_driver_status(self, context):
if not self.is_working():
self.setup_backup_backend(context)

View File

@ -131,3 +131,14 @@ class BackupDriver(base.CinderInterface):
information information
:returns: None :returns: None
""" """
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed.
Depends on storage backend limitations and driver implementation this
method could check if all needed config options are configurated well
or try to connect to the storage to verify driver can do it without
any issues.
:returns: None
"""

View File

@ -913,11 +913,11 @@ class BackupsAPITestCase(test.TestCase):
res = req.get_response(fakes.wsgi_app( res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context)) fake_auth_context=self.user_context))
res_dict = jsonutils.loads(res.body) res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int) self.assertEqual(http_client.SERVICE_UNAVAILABLE, res.status_int)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, self.assertEqual(http_client.SERVICE_UNAVAILABLE,
res_dict['computeFault']['code']) res_dict['serviceUnavailable']['code'])
self.assertEqual('Service cinder-backup could not be found.', self.assertEqual('Service cinder-backup could not be found.',
res_dict['computeFault']['message']) res_dict['serviceUnavailable']['message'])
volume = self.volume_api.get(context.get_admin_context(), volume_id) volume = self.volume_api.get(context.get_admin_context(), volume_id)
self.assertEqual('available', volume['status']) self.assertEqual('available', volume['status'])
@ -1885,12 +1885,12 @@ class BackupsAPITestCase(test.TestCase):
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = jsonutils.loads(res.body) res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int) self.assertEqual(http_client.SERVICE_UNAVAILABLE, res.status_int)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, self.assertEqual(http_client.SERVICE_UNAVAILABLE,
res_dict['computeFault']['code']) res_dict['serviceUnavailable']['code'])
self.assertEqual('Service %s could not be found.' self.assertEqual('Service %s could not be found.'
% backup_service, % backup_service,
res_dict['computeFault']['message']) res_dict['serviceUnavailable']['message'])
@mock.patch('cinder.backup.api.API._list_backup_hosts') @mock.patch('cinder.backup.api.API._list_backup_hosts')
def test_import_backup_with_wrong_backup_url(self, _mock_list_services): def test_import_backup_with_wrong_backup_url(self, _mock_list_services):
@ -1968,11 +1968,11 @@ class BackupsAPITestCase(test.TestCase):
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = jsonutils.loads(res.body) res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int) self.assertEqual(http_client.SERVICE_UNAVAILABLE, res.status_int)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, self.assertEqual(http_client.SERVICE_UNAVAILABLE,
res_dict['computeFault']['code']) res_dict['serviceUnavailable']['code'])
self.assertEqual('Service %s could not be found.' % backup_service, self.assertEqual('Service %s could not be found.' % backup_service,
res_dict['computeFault']['message']) res_dict['serviceUnavailable']['message'])
db.backup_destroy(context.get_admin_context(), backup_id) db.backup_destroy(context.get_admin_context(), backup_id)

View File

@ -47,10 +47,8 @@ class BackupGlusterfsShareTestCase(test.TestCase):
'_init_backup_repo_path', '_init_backup_repo_path',
return_value=FAKE_BACKUP_PATH) return_value=FAKE_BACKUP_PATH)
with mock.patch.object(glusterfs.GlusterfsBackupDriver, driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
'_check_configuration'): driver.check_for_setup_error()
driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
driver._check_configuration()
def test_check_configuration_no_backup_share(self): def test_check_configuration_no_backup_share(self):
self.override_config('glusterfs_backup_share', None) self.override_config('glusterfs_backup_share', None)
@ -58,11 +56,9 @@ class BackupGlusterfsShareTestCase(test.TestCase):
'_init_backup_repo_path', '_init_backup_repo_path',
return_value=FAKE_BACKUP_PATH) return_value=FAKE_BACKUP_PATH)
with mock.patch.object(glusterfs.GlusterfsBackupDriver, driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
'_check_configuration'): self.assertRaises(exception.InvalidConfigurationValue,
driver = glusterfs.GlusterfsBackupDriver(self.ctxt) driver.check_for_setup_error)
self.assertRaises(exception.ConfigNotFound,
driver._check_configuration)
def test_init_backup_repo_path(self): def test_init_backup_repo_path(self):
self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE) self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE)
@ -72,7 +68,7 @@ class BackupGlusterfsShareTestCase(test.TestCase):
mock_remotefsclient.get_mount_point = mock.Mock( mock_remotefsclient.get_mount_point = mock.Mock(
return_value=FAKE_BACKUP_PATH) return_value=FAKE_BACKUP_PATH)
self.mock_object(glusterfs.GlusterfsBackupDriver, self.mock_object(glusterfs.GlusterfsBackupDriver,
'_check_configuration') 'check_for_setup_error')
self.mock_object(remotefs_brick, 'RemoteFsClient', self.mock_object(remotefs_brick, 'RemoteFsClient',
return_value=mock_remotefsclient) return_value=mock_remotefsclient)
self.mock_object(os, 'getegid', self.mock_object(os, 'getegid',

View File

@ -68,10 +68,9 @@ class BackupNFSShareTestCase(test.TestCase):
self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path', self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path',
return_value=FAKE_BACKUP_PATH) return_value=FAKE_BACKUP_PATH)
with mock.patch.object(nfs.NFSBackupDriver, '_check_configuration'): driver = nfs.NFSBackupDriver(self.ctxt)
driver = nfs.NFSBackupDriver(self.ctxt) self.assertRaises(exception.InvalidConfigurationValue,
self.assertRaises(exception.ConfigNotFound, driver.check_for_setup_error)
driver._check_configuration)
@mock.patch.object(remotefs_brick, 'RemoteFsClient') @mock.patch.object(remotefs_brick, 'RemoteFsClient')
def test_init_backup_repo_path(self, mock_remotefs_client_class): def test_init_backup_repo_path(self, mock_remotefs_client_class):
@ -81,7 +80,7 @@ class BackupNFSShareTestCase(test.TestCase):
mock_remotefsclient = mock.Mock() mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock( mock_remotefsclient.get_mount_point = mock.Mock(
return_value=FAKE_BACKUP_PATH) return_value=FAKE_BACKUP_PATH)
self.mock_object(nfs.NFSBackupDriver, '_check_configuration') self.mock_object(nfs.NFSBackupDriver, 'check_for_setup_error')
mock_remotefs_client_class.return_value = mock_remotefsclient mock_remotefs_client_class.return_value = mock_remotefsclient
self.mock_object(utils, 'get_root_helper') self.mock_object(utils, 'get_root_helper')
with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'): with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'):

View File

@ -53,6 +53,7 @@ class BaseBackupTest(test.TestCase):
super(BaseBackupTest, self).setUp() super(BaseBackupTest, self).setUp()
self.backup_mgr = importutils.import_object(CONF.backup_manager) self.backup_mgr = importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost' self.backup_mgr.host = 'testhost'
self.backup_mgr.is_initialized = True
self.ctxt = context.get_admin_context() self.ctxt = context.get_admin_context()
paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot', paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot',
@ -222,6 +223,7 @@ class BackupTestCase(BaseBackupTest):
return self.ctxt return self.ctxt
self.override_config('backup_service_inithost_offload', False) self.override_config('backup_service_inithost_offload', False)
self.override_config('periodic_interval', 0)
vol1_id = self._create_volume_db_entry() vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id) self._create_volume_attach(vol1_id)
@ -259,8 +261,6 @@ class BackupTestCase(BaseBackupTest):
self.volume = importutils.import_object(CONF.volume_manager) self.volume = importutils.import_object(CONF.volume_manager)
self.backup_mgr.init_host() self.backup_mgr.init_host()
self.assertEqual({}, self.backup_mgr.volume_managers)
vol1 = db.volume_get(self.ctxt, vol1_id) vol1 = db.volume_get(self.ctxt, vol1_id)
self.assertEqual('available', vol1['status']) self.assertEqual('available', vol1['status'])
vol2 = db.volume_get(self.ctxt, vol2_id) vol2 = db.volume_get(self.ctxt, vol2_id)
@ -341,8 +341,10 @@ class BackupTestCase(BaseBackupTest):
volume_rpcapi.client.serializer._base.version_cap) volume_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(volume_rpcapi.client.serializer._base.manifest) self.assertIsNone(volume_rpcapi.client.serializer._base.manifest)
def test_is_working(self): @ddt.data(True, False)
self.assertTrue(self.backup_mgr.is_working()) def test_is_working(self, initialized):
self.backup_mgr.is_initialized = initialized
self.assertEqual(initialized, self.backup_mgr.is_working())
def test_cleanup_incomplete_backup_operations_with_exceptions(self): def test_cleanup_incomplete_backup_operations_with_exceptions(self):
"""Test cleanup resilience in the face of exceptions.""" """Test cleanup resilience in the face of exceptions."""
@ -427,7 +429,6 @@ class BackupTestCase(BaseBackupTest):
def test_cleanup_one_deleting_backup(self): def test_cleanup_one_deleting_backup(self):
"""Test cleanup_one_backup for volume status 'deleting'.""" """Test cleanup_one_backup for volume status 'deleting'."""
self.override_config('backup_service_inithost_offload', False) self.override_config('backup_service_inithost_offload', False)
backup = self._create_backup_db_entry( backup = self._create_backup_db_entry(
@ -1257,7 +1258,7 @@ class BackupTestCase(BaseBackupTest):
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_init_backup_repo_path', return_value=None) '_init_backup_repo_path', return_value=None)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_check_configuration', return_value=None) 'check_for_setup_error', return_value=None)
def test_check_support_to_force_delete(self, mock_check_configuration, def test_check_support_to_force_delete(self, mock_check_configuration,
mock_init_backup_repo_path): mock_init_backup_repo_path):
"""Test force delete check method for supported drivers.""" """Test force delete check method for supported drivers."""