Add reset-state function for backups

Since there are volume reset-state function and snapshot reset-state
function, backup also needs reset-state as well.
When creating or restoring backup, it may leave the backup stuck in
creating or restoring status due to database down or rabbitmq down, etc.
Currently we could only solve these problems by restarting cinder-backup
service.

This BP is to provide another means for administrators to solve these
problems by calling backup reset state API, instead of directly
restarting cinder-backup service.

DocImpact: Support reset backup status

blueprint support-reset-state-for-backup

Change-Id: Icd677a0b48083894bcd969d5199fa91b307080de
This commit is contained in:
ling-yun 2014-08-26 18:39:30 +08:00
parent 69b4cfd09c
commit cef4a9b230
10 changed files with 317 additions and 2 deletions

View File

@ -17,6 +17,7 @@ from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import backup
from cinder import db
from cinder import exception
from cinder.i18n import _
@ -47,6 +48,7 @@ class AdminController(wsgi.Controller):
# singular name of the resource
self.resource_name = self.collection.rstrip('s')
self.volume_api = volume.API()
self.backup_api = backup.API()
def _update(self, *args, **kwargs):
raise NotImplementedError()
@ -254,6 +256,38 @@ class SnapshotAdminController(AdminController):
return self.volume_api.delete_snapshot(*args, **kwargs)
class BackupAdminController(AdminController):
"""AdminController for Backups."""
collection = 'backups'
valid_status = set(['available',
'error'
])
@wsgi.action('os-reset_status')
def _reset_status(self, req, id, body):
"""Reset status on the resource."""
context = req.environ['cinder.context']
self.authorize(context, 'reset_status')
update = self.validate_update(body['os-reset_status'])
msg = "Updating %(resource)s '%(id)s' with '%(update)r'"
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
notifier_info = {'id': id, 'update': update}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
try:
self.backup_api.reset_status(context=context, backup_id=id,
status=update['status'])
except exception.NotFound as e:
raise exc.HTTPNotFound(explanation=e.msg)
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin actions."""
@ -264,7 +298,8 @@ class Admin_actions(extensions.ExtensionDescriptor):
def get_controller_extensions(self):
exts = []
for class_ in (VolumeAdminController, SnapshotAdminController):
for class_ in (VolumeAdminController, SnapshotAdminController,
BackupAdminController):
controller = class_()
extension = extensions.ControllerExtension(
self, class_.collection, controller)

View File

@ -378,6 +378,7 @@ class Backups(extensions.ExtensionDescriptor):
res = extensions.ResourceExtension(
Backups.alias, BackupsController(),
collection_actions={'detail': 'GET', 'import_record': 'POST'},
member_actions={'restore': 'POST', 'export_record': 'GET'})
member_actions={'restore': 'POST', 'export_record': 'GET',
'action': 'POST'})
resources.append(res)
return resources

View File

@ -258,6 +258,21 @@ class API(base.Base):
return d
def reset_status(self, context, backup_id, status):
"""Make the RPC call to reset a volume backup's status.
Call backup manager to execute backup status reset operation.
:param context: running context
:param backup_id: which backup's status to be reset
:parma status: backup's status to be reset
:raises: InvalidBackup
"""
# get backup info
backup = self.get(context, backup_id)
# send to manager to do reset operation
self.backup_rpcapi.reset_status(ctxt=context, host=backup['host'],
backup_id=backup_id, status=status)
def export_record(self, context, backup_id):
"""Make the RPC call to export a volume backup.

View File

@ -46,6 +46,7 @@ from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder import quota
from cinder import rpc
from cinder import utils
from cinder.volume import utils as volume_utils
@ -601,3 +602,97 @@ class BackupManager(manager.SchedulerDependentManager):
LOG.info(_('Import record id %s metadata from driver '
'finished.') % backup_id)
def reset_status(self, context, backup_id, status):
"""Reset volume backup status.
:param context: running context
:param backup_id: The backup id for reset status operation
:param status: The status to be set
:raises: InvalidBackup
:raises: BackupVerifyUnsupportedDriver
:raises: AttributeError
"""
LOG.info(_('Reset backup status started, backup_id: '
'%(backup_id)s, status: %(status)s.'),
{'backup_id': backup_id,
'status': status})
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the backup status updated. Fail early since there
# are no other status to change but backup's
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_("Backup driver has not been initialized"))
backup = self.db.backup_get(context, backup_id)
backup_service = self._map_service_to_driver(backup['service'])
LOG.info(_('Backup service: %s.'), backup_service)
if backup_service is not None:
configured_service = self.driver_name
if backup_service != configured_service:
err = _('Reset backup status aborted, the backup service'
' currently configured [%(configured_service)s] '
'is not the backup service that was used to create'
' this backup [%(backup_service)s].') % \
{'configured_service': configured_service,
'backup_service': backup_service}
raise exception.InvalidBackup(reason=err)
# Verify backup
try:
# check whether the backup is ok or not
if status == 'available' and backup['status'] != 'restoring':
# check whether we could verify the backup is ok or not
if isinstance(backup_service,
driver.BackupDriverWithVerify):
backup_service.verify(backup_id)
self.db.backup_update(context, backup_id,
{'status': status})
# driver does not support verify function
else:
msg = (_('Backup service %(configured_service)s '
'does not support verify. Backup id'
' %(id)s is not verified. '
'Skipping verify.') %
{'configured_service': self.driver_name,
'id': backup_id})
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# reset status to error or from restoring to available
else:
if (status == 'error' or
(status == 'available' and
backup['status'] == 'restoring')):
self.db.backup_update(context, backup_id,
{'status': status})
except exception.InvalidBackup:
with excutils.save_and_reraise_exception():
msg = (_("Backup id %(id)s is not invalid. "
"Skipping reset.") % {'id': backup_id})
LOG.error(msg)
except exception.BackupVerifyUnsupportedDriver:
with excutils.save_and_reraise_exception():
msg = (_('Backup service %(configured_service)s '
'does not support verify. Backup id'
' %(id)s is not verified. '
'Skipping verify.') %
{'configured_service': self.driver_name,
'id': backup_id})
LOG.error(msg)
except AttributeError:
msg = (_('Backup service %(service)s does not support '
'verify. Backup id %(id)s is not verified. '
'Skipping reset.') %
{'service': self.driver_name,
'id': backup_id})
LOG.error(msg)
raise exception.BackupVerifyUnsupportedDriver(
reason=msg)
# send notification to ceilometer
notifier_info = {'id': backup_id, 'update': {'status': status}}
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups" + '.reset_status.end',
notifier_info)

View File

@ -88,3 +88,12 @@ class BackupAPI(object):
backup_service=backup_service,
backup_url=backup_url,
backup_hosts=backup_hosts)
def reset_status(self, ctxt, host, backup_id, status):
LOG.debug("reset_status in rpcapi backup_id %(id)s "
"on host %(host)s.",
{'id': backup_id,
'host': host})
cctxt = self.client.prepare(server=host)
return cctxt.cast(ctxt, 'reset_status', backup_id=backup_id,
status=status)

View File

@ -514,6 +514,10 @@ class BackupMetadataUnsupportedVersion(BackupDriverException):
message = _("Unsupported backup metadata version requested")
class BackupVerifyUnsupportedDriver(BackupDriverException):
message = _("Unsupported backup verify driver")
class VolumeMetadataBackupExists(BackupDriverException):
message = _("Metadata backup already exists for this volume")

View File

@ -15,6 +15,7 @@ import tempfile
from oslo.config import cfg
import webob
from webob import exc
from cinder.api.contrib import admin_actions
from cinder.brick.local_dev import lvm as brick_lvm
@ -75,6 +76,16 @@ class AdminActionsTest(test.TestCase):
resp = req.get_response(app())
return resp
def _issue_backup_reset(self, ctx, backup, updated_status):
req = webob.Request.blank('/v2/fake/backups/%s/action' % backup['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = \
jsonutils.dumps({'os-reset_status': updated_status})
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
return resp
def test_valid_updates(self):
vac = admin_actions.VolumeAdminController()
@ -168,6 +179,54 @@ class AdminActionsTest(test.TestCase):
# status is still 'error'
self.assertEqual(volume['status'], 'error')
def test_backup_reset_status_as_admin(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available'})
backup = db.backup_create(ctx, {'status': 'available',
'size': 1,
'volume_id': volume['id']})
resp = self._issue_backup_reset(ctx,
backup,
{'status': 'error'})
self.assertEqual(resp.status_int, 202)
def test_backup_reset_status_as_non_admin(self):
ctx = context.RequestContext('fake', 'fake')
backup = db.backup_create(ctx, {'status': 'available',
'size': 1,
'volume_id': "fakeid"})
resp = self._issue_backup_reset(ctx,
backup,
{'status': 'error'})
# request is not authorized
self.assertEqual(resp.status_int, 403)
def test_backup_reset_status(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1})
backup = db.backup_create(ctx, {'status': 'available',
'volume_id': volume['id']})
resp = self._issue_backup_reset(ctx,
backup,
{'status': 'error'})
self.assertEqual(resp.status_int, 202)
def test_invalid_status_for_backup(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1})
backup = db.backup_create(ctx, {'status': 'available',
'volume_id': volume['id']})
resp = self._issue_backup_reset(ctx,
backup,
{'status': 'restoring'})
self.assertEqual(resp.status_int, 400)
def test_malformed_reset_status_body(self):
ctx = context.RequestContext('admin', 'fake', True)
volume = db.volume_create(ctx, {'status': 'available', 'size': 1})
@ -761,3 +820,14 @@ class AdminActionsTest(test.TestCase):
ctx = context.RequestContext('admin', 'fake', True)
volume = self._migrate_volume_comp_exec(ctx, volume, new_volume, False,
expected_status, expected_id)
def test_backup_reset_valid_updates(self):
vac = admin_actions.BackupAdminController()
vac.validate_update({'status': 'available'})
vac.validate_update({'status': 'error'})
self.assertRaises(exc.HTTPBadRequest,
vac.validate_update,
{'status': 'restoring'})
self.assertRaises(exc.HTTPBadRequest,
vac.validate_update,
{'status': 'creating'})

View File

@ -36,6 +36,7 @@
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach": "rule:admin_api",

View File

@ -22,6 +22,7 @@ import tempfile
import mock
from oslo.config import cfg
from cinder.backup import manager
from cinder import context
from cinder import db
from cinder import exception
@ -29,6 +30,8 @@ from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import test
from cinder.tests.backup.fake_service_with_verify import\
get_backup_driver
CONF = cfg.CONF
@ -599,3 +602,84 @@ class BackupTestCaseWithVerify(BaseBackupTest):
self.assertTrue(_mock_record_verify.called)
backup = db.backup_get(self.ctxt, imported_record)
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_nonrestoring_to_available(
self):
vol_id = self._create_volume_db_entry(status='available',
size=1)
backup_id = self._create_backup_db_entry(status='error',
volume_id=vol_id)
with mock.patch.object(manager.BackupManager,
'_map_service_to_driver') as \
mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \
get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt,
backup_id,
'available')
backup = db.backup_get(self.ctxt, backup_id)
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_available_invalid_backup(self):
volume = db.volume_create(self.ctxt, {'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'error',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt)
_mock_backup_verify_class = ('%s.%s.%s' %
(backup_driver.__module__,
backup_driver.__class__.__name__,
'verify'))
with mock.patch(_mock_backup_verify_class) as \
_mock_record_verify:
_mock_record_verify.side_effect = \
exception.BackupVerifyUnsupportedDriver(reason='fake')
self.assertRaises(exception.BackupVerifyUnsupportedDriver,
self.backup_mgr.reset_status,
self.ctxt,
backup['id'],
'available')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error')
def test_backup_reset_status_from_restoring_to_available(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'restoring',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt,
backup['id'],
'available')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'available')
def test_backup_reset_status_to_error(self):
volume = db.volume_create(self.ctxt,
{'status': 'available',
'host': 'test',
'provider_location': '',
'size': 1})
backup = db.backup_create(self.ctxt,
{'status': 'creating',
'service':
CONF.backup_driver,
'volume_id': volume['id']})
self.backup_mgr.reset_status(self.ctxt,
backup['id'],
'error')
backup = db.backup_get(self.ctxt, backup['id'])
self.assertEqual(backup['status'], 'error')

View File

@ -30,6 +30,7 @@
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",