Browse Source

Merge "Add cinder backup service initialize check"

changes/89/478789/5
Jenkins 4 years ago
committed by Gerrit Code Review
parent
commit
2242b9ea8f
  1. 4
      cinder/api/contrib/backups.py
  2. 4
      cinder/backup/driver.py
  3. 18
      cinder/backup/drivers/ceph.py
  4. 10
      cinder/backup/drivers/glusterfs.py
  5. 14
      cinder/backup/drivers/google.py
  6. 12
      cinder/backup/drivers/nfs.py
  7. 36
      cinder/backup/drivers/swift.py
  8. 8
      cinder/backup/drivers/tsm.py
  9. 46
      cinder/backup/manager.py
  10. 11
      cinder/interface/backup_driver.py
  11. 24
      cinder/tests/unit/api/contrib/test_backups.py
  12. 16
      cinder/tests/unit/backup/drivers/test_backup_glusterfs.py
  13. 9
      cinder/tests/unit/backup/drivers/test_backup_nfs.py
  14. 13
      cinder/tests/unit/backup/test_backup.py

4
cinder/api/contrib/backups.py

@ -165,7 +165,7 @@ class BackupsController(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=error.msg)
# Other not found exceptions will be handled at the wsgi level
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg)
raise exc.HTTPServiceUnavailable(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
return retval
@ -246,7 +246,7 @@ class BackupsController(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=error.msg)
# Other Not found exceptions will be handled at the wsgi level
except exception.ServiceNotFound as error:
raise exc.HTTPInternalServerError(explanation=error.msg)
raise exc.HTTPServiceUnavailable(explanation=error.msg)
retval = self._view_builder.summary(req, dict(new_backup))
LOG.debug('Import record output: %s.', retval)

4
cinder/backup/driver.py

@ -410,6 +410,10 @@ class BackupDriver(base.Base):
"""
return
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed."""
return
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):

18
cinder/backup/drivers/ceph.py

@ -267,6 +267,24 @@ class CephBackupDriver(driver.BackupDriver):
return (old_format, features)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if rados is None or rbd is None:
msg = _('rados and rbd python libraries not found')
raise exception.BackupDriverException(message=msg)
for attr in ['backup_ceph_user', 'backup_ceph_pool',
'backup_ceph_conf']:
val = getattr(CONF, attr)
if not val:
raise exception.InvalidConfigurationValue(option=attr,
value=val)
# NOTE: Checking connection to ceph
# RADOSClient __init__ method invokes _connect_to_rados
# so no need to check for self.rados.Error here.
with rbd_driver.RADOSClient(self, self._ceph_backup_pool):
pass
def _connect_to_rados(self, pool=None):
"""Establish connection to the backup Ceph cluster."""
client = self.rados.Rados(rados_id=self._ceph_backup_user,

10
cinder/backup/drivers/glusterfs.py

@ -47,7 +47,6 @@ class GlusterfsBackupDriver(posix.PosixBackupDriver):
"""Provides backup, restore and delete using GlusterFS repository."""
def __init__(self, context, db=None):
self._check_configuration()
self.backup_mount_point_base = CONF.glusterfs_backup_mount_point
self.backup_share = CONF.glusterfs_backup_share
self._execute = putils.execute
@ -56,13 +55,14 @@ class GlusterfsBackupDriver(posix.PosixBackupDriver):
super(GlusterfsBackupDriver, self).__init__(context,
backup_path=backup_path)
@staticmethod
def _check_configuration():
def check_for_setup_error(self):
"""Raises error if any required configuration flag is missing."""
required_flags = ['glusterfs_backup_share']
for flag in required_flags:
if not getattr(CONF, flag, None):
raise exception.ConfigNotFound(path=flag)
val = getattr(CONF, flag, None)
if not val:
raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _init_backup_repo_path(self):
remotefsclient = remotefs_brick.RemoteFsClient(

14
cinder/backup/drivers/google.py

@ -120,7 +120,6 @@ class GoogleBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Provides backup, restore and delete of backup objects within GCS."""
def __init__(self, context, db=None):
self.check_gcs_options()
backup_bucket = CONF.backup_gcs_bucket
backup_credential = CONF.backup_gcs_credential_file
self.gcs_project_id = CONF.backup_gcs_project_id
@ -153,15 +152,14 @@ class GoogleBackupDriver(chunkeddriver.ChunkedBackupDriver):
else:
return httplib2.proxy_info_from_environment()
def check_gcs_options(self):
def check_for_setup_error(self):
required_options = ('backup_gcs_bucket', 'backup_gcs_credential_file',
'backup_gcs_project_id')
unset_options = [opt for opt in required_options
if not getattr(CONF, opt, None)]
if unset_options:
msg = _('Unset gcs options: %s') % unset_options
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
for opt in required_options:
val = getattr(CONF, opt, None)
if not val:
raise exception.InvalidConfigurationValue(option=opt,
value=val)
@gcs_logger
def put_container(self, bucket):

12
cinder/backup/drivers/nfs.py

@ -22,7 +22,6 @@ from oslo_log import log as logging
from cinder.backup.drivers import posix
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
@ -50,7 +49,6 @@ class NFSBackupDriver(posix.PosixBackupDriver):
"""Provides backup, restore and delete using NFS supplied repository."""
def __init__(self, context, db=None):
self._check_configuration()
self.backup_mount_point_base = CONF.backup_mount_point_base
self.backup_share = CONF.backup_share
self.mount_options = CONF.backup_mount_options
@ -59,14 +57,14 @@ class NFSBackupDriver(posix.PosixBackupDriver):
super(NFSBackupDriver, self).__init__(context,
backup_path=backup_path)
@staticmethod
def _check_configuration():
def check_for_setup_error(self):
"""Raises error if any required configuration flag is missing."""
required_flags = ['backup_share']
for flag in required_flags:
if not getattr(CONF, flag, None):
raise exception.ConfigNotFound(_(
'Required flag %s is not set') % flag)
val = getattr(CONF, flag, None)
if not val:
raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _init_backup_repo_path(self):
remotefsclient = remotefs_brick.RemoteFsClient(

36
cinder/backup/drivers/swift.py

@ -153,6 +153,10 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
backup_default_container,
enable_progress_timer,
db)
if context:
self.initialize()
def initialize(self):
self.swift_attempts = CONF.backup_swift_retry_attempts
self.swift_backoff = CONF.backup_swift_retry_backoff
self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure
@ -173,7 +177,7 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
"Failed to parse the configuration option "
"'keystone_catalog_info', must be in the form "
"<service_type>:<service_name>:<endpoint_type>"))
for entry in context.service_catalog:
for entry in self.context.service_catalog:
if entry.get('type') == service_type:
# It is assumed that service_types are unique within
# the service catalog, so once the correct one is found
@ -223,7 +227,7 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
"Failed to parse the configuration option "
"'swift_catalog_info', must be in the form "
"<service_type>:<service_name>:<endpoint_type>"))
for entry in context.service_catalog:
for entry in self.context.service_catalog:
if entry.get('type') == service_type:
# It is assumed that service_types are unique within
# the service catalog, so once the correct one is found
@ -233,7 +237,7 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
break
else:
self.swift_url = '%s%s' % (CONF.backup_swift_url,
context.project_id)
self.context.project_id)
if self.swift_url is None:
raise exception.BackupDriverException(_(
"Could not determine which Swift endpoint to use. This "
@ -367,6 +371,32 @@ class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver):
"""Swift driver does not use any extra metadata."""
return None
def check_for_setup_errors(self):
# Here we are trying to connect to swift backend service
# without any additional parameters.
# At the moment of execution we don't have any user data
# After just trying to do easiest operations, that will show
# that we've configured swift backup driver in right way
if not CONF.backup_swift_url:
LOG.warning("We will use endpoints from keystone. It is "
"possible we could have problems because of it.")
return
conn = swift.Connection(retries=CONF.backup_swift_retry_attempts,
preauthurl=CONF.backup_swift_url)
try:
conn.get_capabilities()
# TODO(e0ne) catch less general exception
except Exception:
LOG.exception("Can not get Swift capabilities during backup "
"driver initialization.")
raise
def get_backup_driver(context):
# NOTE(mdovgal): at the moment of backup service start we need to
# get driver class instance and for swift at that moment
# we can't get all necessary information like endpoints
# from context, so we have exception as a result.
if context.user is None:
return SwiftBackupDriver(None)
return SwiftBackupDriver(context)

8
cinder/backup/drivers/tsm.py

@ -271,6 +271,14 @@ class TSMBackupDriver(driver.BackupDriver):
self.tsm_password = CONF.backup_tsm_password
self.volume_prefix = CONF.backup_tsm_volume_prefix
def check_for_setup_error(self):
required_flags = ['backup_share']
for flag in required_flags:
val = getattr(CONF, flag, None)
if not val:
raise exception.InvalidConfigurationValue(option=flag,
value=val)
def _do_backup(self, backup_path, vol_id, backup_mode):
"""Perform the actual backup operation.

46
cinder/backup/manager.py

@ -35,6 +35,8 @@ import os
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
import six
@ -89,10 +91,10 @@ class BackupManager(manager.ThreadPoolManager):
def __init__(self, *args, **kwargs):
self.service = importutils.import_module(self.driver_name)
self.az = CONF.storage_availability_zone
self.volume_managers = {}
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
super(BackupManager, self).__init__(*args, **kwargs)
self.is_initialized = False
@property
def driver_name(self):
@ -107,14 +109,16 @@ class BackupManager(manager.ThreadPoolManager):
return mapper[service]
return service
def _update_backup_error(self, backup, err):
backup.status = fields.BackupStatus.ERROR
def _update_backup_error(self, backup, err,
status=fields.BackupStatus.ERROR):
backup.status = status
backup.fail_reason = err
backup.save()
def init_host(self, **kwargs):
"""Run initialization needed for a standalone service."""
ctxt = context.get_admin_context()
self.setup_backup_backend(ctxt)
try:
self._cleanup_incomplete_backup_operations(ctxt)
@ -122,6 +126,24 @@ class BackupManager(manager.ThreadPoolManager):
# Don't block startup of the backup service.
LOG.exception("Problem cleaning incomplete backup operations.")
def _setup_backup_driver(self, ctxt):
backup_service = self.service.get_backup_driver(ctxt)
backup_service.check_for_setup_error()
self.is_initialized = True
raise loopingcall.LoopingCallDone()
def setup_backup_backend(self, ctxt):
try:
init_loop = loopingcall.FixedIntervalLoopingCall(
self._setup_backup_driver, ctxt)
init_loop.start(interval=CONF.periodic_interval)
except loopingcall.LoopingCallDone:
LOG.info("Backup driver was successfully initialized.")
except Exception:
LOG.exception("Failed to initialize driver.",
resource={'type': 'driver',
'id': self.__class__.__name__})
def reset(self):
super(BackupManager, self).reset()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
@ -317,6 +339,10 @@ class BackupManager(manager.ThreadPoolManager):
raise exception.InvalidBackup(reason=err)
try:
if not self.is_working():
err = _('Create backup aborted due to backup service is down')
self._update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err)
self._run_backup(context, backup, volume)
except Exception as err:
with excutils.save_and_reraise_exception():
@ -507,6 +533,12 @@ class BackupManager(manager.ThreadPoolManager):
self._update_backup_error(backup, err)
raise exception.InvalidBackup(reason=err)
if not self.is_working():
err = _('Delete backup is aborted due to backup service is down')
status = fields.BackupStatus.ERROR_DELETING
self._update_backup_error(backup, err, status)
raise exception.InvalidBackup(reason=err)
backup_service = self._map_service_to_driver(backup['service'])
if backup_service is not None:
configured_service = self.driver_name
@ -908,3 +940,11 @@ class BackupManager(manager.ThreadPoolManager):
rpcapi.terminate_connection_snapshot(ctxt, device,
properties, force=force)
rpcapi.remove_export_snapshot(ctxt, device)
def is_working(self):
return self.is_initialized
@periodic_task.periodic_task(spacing=CONF.periodic_interval)
def _report_driver_status(self, context):
if not self.is_working():
self.setup_backup_backend(context)

11
cinder/interface/backup_driver.py

@ -131,3 +131,14 @@ class BackupDriver(base.CinderInterface):
information
:returns: None
"""
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed.
Depends on storage backend limitations and driver implementation this
method could check if all needed config options are configurated well
or try to connect to the storage to verify driver can do it without
any issues.
:returns: None
"""

24
cinder/tests/unit/api/contrib/test_backups.py

@ -913,11 +913,11 @@ class BackupsAPITestCase(test.TestCase):
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR,
res_dict['computeFault']['code'])
self.assertEqual(http_client.SERVICE_UNAVAILABLE, res.status_int)
self.assertEqual(http_client.SERVICE_UNAVAILABLE,
res_dict['serviceUnavailable']['code'])
self.assertEqual('Service cinder-backup could not be found.',
res_dict['computeFault']['message'])
res_dict['serviceUnavailable']['message'])
volume = self.volume_api.get(context.get_admin_context(), volume_id)
self.assertEqual('available', volume['status'])
@ -1885,12 +1885,12 @@ class BackupsAPITestCase(test.TestCase):
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR,
res_dict['computeFault']['code'])
self.assertEqual(http_client.SERVICE_UNAVAILABLE, res.status_int)
self.assertEqual(http_client.SERVICE_UNAVAILABLE,
res_dict['serviceUnavailable']['code'])
self.assertEqual('Service %s could not be found.'
% backup_service,
res_dict['computeFault']['message'])
res_dict['serviceUnavailable']['message'])
@mock.patch('cinder.backup.api.API._list_backup_hosts')
def test_import_backup_with_wrong_backup_url(self, _mock_list_services):
@ -1968,11 +1968,11 @@ class BackupsAPITestCase(test.TestCase):
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx))
res_dict = jsonutils.loads(res.body)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR, res.status_int)
self.assertEqual(http_client.INTERNAL_SERVER_ERROR,
res_dict['computeFault']['code'])
self.assertEqual(http_client.SERVICE_UNAVAILABLE, res.status_int)
self.assertEqual(http_client.SERVICE_UNAVAILABLE,
res_dict['serviceUnavailable']['code'])
self.assertEqual('Service %s could not be found.' % backup_service,
res_dict['computeFault']['message'])
res_dict['serviceUnavailable']['message'])
db.backup_destroy(context.get_admin_context(), backup_id)

16
cinder/tests/unit/backup/drivers/test_backup_glusterfs.py

@ -47,10 +47,8 @@ class BackupGlusterfsShareTestCase(test.TestCase):
'_init_backup_repo_path',
return_value=FAKE_BACKUP_PATH)
with mock.patch.object(glusterfs.GlusterfsBackupDriver,
'_check_configuration'):
driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
driver._check_configuration()
driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
driver.check_for_setup_error()
def test_check_configuration_no_backup_share(self):
self.override_config('glusterfs_backup_share', None)
@ -58,11 +56,9 @@ class BackupGlusterfsShareTestCase(test.TestCase):
'_init_backup_repo_path',
return_value=FAKE_BACKUP_PATH)
with mock.patch.object(glusterfs.GlusterfsBackupDriver,
'_check_configuration'):
driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
self.assertRaises(exception.ConfigNotFound,
driver._check_configuration)
driver = glusterfs.GlusterfsBackupDriver(self.ctxt)
self.assertRaises(exception.InvalidConfigurationValue,
driver.check_for_setup_error)
def test_init_backup_repo_path(self):
self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE)
@ -72,7 +68,7 @@ class BackupGlusterfsShareTestCase(test.TestCase):
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=FAKE_BACKUP_PATH)
self.mock_object(glusterfs.GlusterfsBackupDriver,
'_check_configuration')
'check_for_setup_error')
self.mock_object(remotefs_brick, 'RemoteFsClient',
return_value=mock_remotefsclient)
self.mock_object(os, 'getegid',

9
cinder/tests/unit/backup/drivers/test_backup_nfs.py

@ -68,10 +68,9 @@ class BackupNFSShareTestCase(test.TestCase):
self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path',
return_value=FAKE_BACKUP_PATH)
with mock.patch.object(nfs.NFSBackupDriver, '_check_configuration'):
driver = nfs.NFSBackupDriver(self.ctxt)
self.assertRaises(exception.ConfigNotFound,
driver._check_configuration)
driver = nfs.NFSBackupDriver(self.ctxt)
self.assertRaises(exception.InvalidConfigurationValue,
driver.check_for_setup_error)
@mock.patch.object(remotefs_brick, 'RemoteFsClient')
def test_init_backup_repo_path(self, mock_remotefs_client_class):
@ -81,7 +80,7 @@ class BackupNFSShareTestCase(test.TestCase):
mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=FAKE_BACKUP_PATH)
self.mock_object(nfs.NFSBackupDriver, '_check_configuration')
self.mock_object(nfs.NFSBackupDriver, 'check_for_setup_error')
mock_remotefs_client_class.return_value = mock_remotefsclient
self.mock_object(utils, 'get_root_helper')
with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'):

13
cinder/tests/unit/backup/test_backup.py

@ -53,6 +53,7 @@ class BaseBackupTest(test.TestCase):
super(BaseBackupTest, self).setUp()
self.backup_mgr = importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.backup_mgr.is_initialized = True
self.ctxt = context.get_admin_context()
paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot',
@ -222,6 +223,7 @@ class BackupTestCase(BaseBackupTest):
return self.ctxt
self.override_config('backup_service_inithost_offload', False)
self.override_config('periodic_interval', 0)
vol1_id = self._create_volume_db_entry()
self._create_volume_attach(vol1_id)
@ -259,8 +261,6 @@ class BackupTestCase(BaseBackupTest):
self.volume = importutils.import_object(CONF.volume_manager)
self.backup_mgr.init_host()
self.assertEqual({}, self.backup_mgr.volume_managers)
vol1 = db.volume_get(self.ctxt, vol1_id)
self.assertEqual('available', vol1['status'])
vol2 = db.volume_get(self.ctxt, vol2_id)
@ -341,8 +341,10 @@ class BackupTestCase(BaseBackupTest):
volume_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(volume_rpcapi.client.serializer._base.manifest)
def test_is_working(self):
self.assertTrue(self.backup_mgr.is_working())
@ddt.data(True, False)
def test_is_working(self, initialized):
self.backup_mgr.is_initialized = initialized
self.assertEqual(initialized, self.backup_mgr.is_working())
def test_cleanup_incomplete_backup_operations_with_exceptions(self):
"""Test cleanup resilience in the face of exceptions."""
@ -427,7 +429,6 @@ class BackupTestCase(BaseBackupTest):
def test_cleanup_one_deleting_backup(self):
"""Test cleanup_one_backup for volume status 'deleting'."""
self.override_config('backup_service_inithost_offload', False)
backup = self._create_backup_db_entry(
@ -1257,7 +1258,7 @@ class BackupTestCase(BaseBackupTest):
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_init_backup_repo_path', return_value=None)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_check_configuration', return_value=None)
'check_for_setup_error', return_value=None)
def test_check_support_to_force_delete(self, mock_check_configuration,
mock_init_backup_repo_path):
"""Test force delete check method for supported drivers."""

Loading…
Cancel
Save