Use OVOs to set errors in manage volume/snapshot
Currently when we are managing volumes and snapshots and we run into an error that requires changing the status to error we don't make use of the OVO instances we already have and instead pass just the id to a helper function that in some cases retrieves the resource again fro the DB before doing the update. This patch leverages the OVO instance we already have to consolidate both helper methods -error_out_volume and error_out_snapshot- into using error_out method. Doing this not only simplifies the code but it also removes one extra DB query. Change-Id: I8d3b47dd71ee616bf00ba98ad1da0935356ebe74
This commit is contained in:
parent
f5634ea3c1
commit
7c8983d5d9
|
@ -89,11 +89,9 @@ class ManageVolumeFlowTestCase(test.TestCase):
|
|||
mock_driver = mock.MagicMock()
|
||||
mock_result = mock.MagicMock()
|
||||
mock_flow_failures = mock.MagicMock()
|
||||
mock_error_out_volumes = self.mock_object(
|
||||
flow_common, 'error_out_volume')
|
||||
mock_error_out = self.mock_object(flow_common, 'error_out')
|
||||
volume_ref = self._stub_volume_object_get(self)
|
||||
task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver)
|
||||
|
||||
task.revert(self.ctxt, mock_result, mock_flow_failures, volume_ref)
|
||||
mock_error_out_volumes.assert_called_once_with(
|
||||
self.ctxt, mock_db, volume_ref.id, reason=mock.ANY)
|
||||
mock_error_out.assert_called_once_with(volume_ref, reason=mock.ANY)
|
||||
|
|
|
@ -103,8 +103,7 @@ class ManageCastTask(flow_utils.CinderTask):
|
|||
self.scheduler_rpcapi = scheduler_rpcapi
|
||||
self.db = db
|
||||
|
||||
def execute(self, context, **kwargs):
|
||||
volume = kwargs.pop('volume')
|
||||
def execute(self, context, volume, **kwargs):
|
||||
request_spec = kwargs.copy()
|
||||
request_spec['volume_id'] = volume.id
|
||||
|
||||
|
@ -115,11 +114,10 @@ class ManageCastTask(flow_utils.CinderTask):
|
|||
request_spec=request_spec,
|
||||
volume=volume)
|
||||
|
||||
def revert(self, context, result, flow_failures, **kwargs):
|
||||
def revert(self, context, result, flow_failures, volume, **kwargs):
|
||||
# Restore the source volume status and set the volume to error status.
|
||||
volume_id = kwargs['volume_id']
|
||||
common.error_out_volume(context, self.db, volume_id)
|
||||
LOG.error(_LE("Volume %s: manage failed."), volume_id)
|
||||
common.error_out(volume)
|
||||
LOG.error(_LE("Volume %s: manage failed."), volume.id)
|
||||
exc_info = False
|
||||
if all(flow_failures[-1].exc_info):
|
||||
exc_info = flow_failures[-1].exc_info
|
||||
|
|
|
@ -21,7 +21,7 @@ import six
|
|||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _LE
|
||||
from cinder import objects
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -75,40 +75,6 @@ def _clean_reason(reason):
|
|||
return reason[0:REASON_LENGTH] + '...'
|
||||
|
||||
|
||||
def _update_object(context, db, status, reason, object_type, object_id):
|
||||
update = {
|
||||
'status': status,
|
||||
}
|
||||
try:
|
||||
LOG.debug('Updating %(object_type)s: %(object_id)s with %(update)s'
|
||||
' due to: %(reason)s', {'object_type': object_type,
|
||||
'object_id': object_id,
|
||||
'reason': reason,
|
||||
'update': update})
|
||||
if object_type == 'volume':
|
||||
db.volume_update(context, object_id, update)
|
||||
elif object_type == 'snapshot':
|
||||
snapshot = objects.Snapshot.get_by_id(context, object_id)
|
||||
snapshot.update(update)
|
||||
snapshot.save()
|
||||
except exception.CinderException:
|
||||
# Don't let this cause further exceptions.
|
||||
LOG.exception(_LE("Failed updating %(object_type)s %(object_id)s with"
|
||||
" %(update)s"), {'object_type': object_type,
|
||||
'object_id': object_id,
|
||||
'update': update})
|
||||
|
||||
|
||||
def error_out_volume(context, db, volume_id, reason=None):
|
||||
reason = _clean_reason(reason)
|
||||
_update_object(context, db, 'error', reason, 'volume', volume_id)
|
||||
|
||||
|
||||
def error_out_snapshot(context, db, snapshot_id, reason=None):
|
||||
reason = _clean_reason(reason)
|
||||
_update_object(context, db, 'error', reason, 'snapshot', snapshot_id)
|
||||
|
||||
|
||||
def error_out(resource, reason=None):
|
||||
"""Sets status to error for any persistent OVO."""
|
||||
reason = _clean_reason(reason)
|
||||
|
|
|
@ -215,7 +215,7 @@ class ExtractVolumeRefTask(flow_utils.CinderTask):
|
|||
return
|
||||
|
||||
reason = _('Volume create failed while extracting volume ref.')
|
||||
common.error_out_volume(context, self.db, volume.id, reason=reason)
|
||||
common.error_out(volume, reason)
|
||||
LOG.error(_LE("Volume %s: create failed"), volume.id)
|
||||
|
||||
|
||||
|
|
|
@ -45,10 +45,8 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask):
|
|||
driver_name = self.driver.__class__.__name__
|
||||
LOG.error(_LE("Unable to manage existing volume. "
|
||||
"Volume driver %s not initialized.") % driver_name)
|
||||
flow_common.error_out_volume(context, self.db, volume_id,
|
||||
reason=_("Volume driver %s "
|
||||
"not initialized.") %
|
||||
driver_name)
|
||||
flow_common.error_out(volume_ref, _("Volume driver %s not "
|
||||
"initialized.") % driver_name)
|
||||
raise exception.DriverNotInitialized()
|
||||
|
||||
size = self.driver.manage_existing_get_size(volume_ref,
|
||||
|
@ -64,8 +62,7 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask):
|
|||
def revert(self, context, result, flow_failures, volume_ref, **kwargs):
|
||||
volume_id = volume_ref.id
|
||||
reason = _('Volume manage failed.')
|
||||
flow_common.error_out_volume(context, self.db,
|
||||
volume_id, reason=reason)
|
||||
flow_common.error_out(volume_ref, reason=reason)
|
||||
LOG.error(_LE("Volume %s: manage failed."), volume_id)
|
||||
|
||||
|
||||
|
|
|
@ -60,8 +60,8 @@ class ExtractSnapshotRefTask(flow_utils.CinderTask):
|
|||
if isinstance(result, ft.Failure):
|
||||
return
|
||||
|
||||
flow_common.error_out_snapshot(context, self.db, snapshot_id)
|
||||
LOG.error(_LE("Snapshot %s: create failed"), snapshot_id)
|
||||
flow_common.error_out(result)
|
||||
LOG.error(_LE("Snapshot %s: create failed"), result.id)
|
||||
|
||||
|
||||
class NotifySnapshotActionTask(flow_utils.CinderTask):
|
||||
|
@ -104,16 +104,14 @@ class PrepareForQuotaReservationTask(flow_utils.CinderTask):
|
|||
self.driver = driver
|
||||
|
||||
def execute(self, context, snapshot_ref, manage_existing_ref):
|
||||
snapshot_id = snapshot_ref['id']
|
||||
if not self.driver.initialized:
|
||||
driver_name = (self.driver.configuration.
|
||||
safe_get('volume_backend_name'))
|
||||
LOG.error(_LE("Unable to manage existing snapshot. "
|
||||
"Volume driver %s not initialized."), driver_name)
|
||||
flow_common.error_out_snapshot(context, self.db, snapshot_id,
|
||||
reason=_("Volume driver %s "
|
||||
"not initialized.") %
|
||||
driver_name)
|
||||
flow_common.error_out(snapshot_ref, reason=_("Volume driver %s "
|
||||
"not initialized.") %
|
||||
driver_name)
|
||||
raise exception.DriverNotInitialized()
|
||||
|
||||
size = self.driver.manage_existing_snapshot_get_size(
|
||||
|
|
Loading…
Reference in New Issue