Fix notifications of migration temp volume

Volume usage notifications in Cinder during Cinder volume migrations are
incorrect.

Part of a volume migration is creating a new volume on the destination
backend and during this process Cinder will issue volume usage
notifications "create.start" and "create.end" even though this is not a
user volume creation and is inconsistent with other temporary volume and
snapshot creation cases.

Also one of the latest steps during the volume creation is to delete one
of the 2 volumes (the source or the destination) and in that case Cinder
will only issue a "delete.end" notification without its corresponding
"delete.start".

Since temporary volumes (for backups or migrations) are not counted
towards quota usage they should also not issue volume usage
notifications.

This patch makes sure that we don't do notifications when creating or
deleting temporary migration volumes.

In both cases it checks the migration_status field to see if it starts
with 'target:'.  For creation the migration_status is set in
_migrate_volume_generic method before making the RPC call, so the data
will be there from the start, before the manager flow starts.

Closes-Bug: #1922920
Change-Id: I7164d700ef56a29e5d4f707fd2340e621bd6f351
This commit is contained in:
Gorka Eguileor 2021-04-07 11:51:30 +02:00
parent d29f12117e
commit 28d9bca7d6
7 changed files with 169 additions and 28 deletions

View File

@ -580,6 +580,11 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
def is_multiattach(self):
return self.volume_type and self.volume_type.is_multiattach()
# Don't add it as a property to avoid having to add it obj_extra_fields,
# to manager's _VOLUME_CLONE_SKIP_PROPERTIES, etc.
def is_migration_target(self):
return (self.migration_status or '').startswith('target:')
@base.CinderObjectRegistry.register
class VolumeList(base.ObjectListBase, base.CinderObject):

View File

@ -578,6 +578,14 @@ class TestVolume(test_objects.BaseObjectsTestCase):
self.context, volume_type_id=None, volume_type=None)
self.assertFalse(bool(volume.is_replicated()))
@ddt.data((None, False), ('error', False), ('success', False),
('target:123456', True))
@ddt.unpack
def test_is_migration_target(self, migration_status, expected):
volume = fake_volume.fake_volume_obj(self.context,
migration_status=migration_status)
self.assertIs(expected, volume.is_migration_target())
@ddt.ddt
class TestVolumeList(test_objects.BaseObjectsTestCase):

View File

@ -1305,6 +1305,104 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
detail=message_field.Detail.DRIVER_FAILED_CREATE,
exception=err)
@mock.patch('cinder.volume.volume_utils.notify_about_volume_usage')
def test_notify_volume_action_do_nothing(self, notify_mock):
task = create_volume_manager.NotifyVolumeActionTask(mock.sentinel.db,
None)
task.execute(mock.sentinel.context, mock.sentinel.volume)
notify_mock.assert_not_called()
@mock.patch('cinder.volume.volume_utils.notify_about_volume_usage')
def test_notify_volume_action_send_notification(self, notify_mock):
event_suffix = 'create.start'
volume = mock.Mock()
task = create_volume_manager.NotifyVolumeActionTask(mock.sentinel.db,
event_suffix)
task.execute(mock.sentinel.context, volume)
notify_mock.assert_called_once_with(mock.sentinel.context,
volume,
event_suffix,
host=volume.host)
@ddt.data(False, True)
@mock.patch('taskflow.engines.load')
@mock.patch.object(create_volume_manager, 'CreateVolumeOnFinishTask')
@mock.patch.object(create_volume_manager, 'CreateVolumeFromSpecTask')
@mock.patch.object(create_volume_manager, 'NotifyVolumeActionTask')
@mock.patch.object(create_volume_manager, 'ExtractVolumeSpecTask')
@mock.patch.object(create_volume_manager, 'OnFailureRescheduleTask')
@mock.patch.object(create_volume_manager, 'ExtractVolumeRefTask')
@mock.patch.object(create_volume_manager.linear_flow, 'Flow')
def test_get_flow(self, is_migration_target, flow_mock, extract_ref_mock,
onfailure_mock, extract_spec_mock, notify_mock,
create_mock, onfinish_mock, load_mock):
assert(isinstance(is_migration_target, bool))
filter_properties = {'retry': mock.sentinel.retry}
tasks = [mock.call(extract_ref_mock.return_value),
mock.call(onfailure_mock.return_value),
mock.call(extract_spec_mock.return_value),
mock.call(notify_mock.return_value),
mock.call(create_mock.return_value,
onfinish_mock.return_value)]
volume = mock.Mock()
volume.is_migration_target.return_value = is_migration_target
result = create_volume_manager.get_flow(
mock.sentinel.context,
mock.sentinel.manager,
mock.sentinel.db,
mock.sentinel.driver,
mock.sentinel.scheduler_rpcapi,
mock.sentinel.host,
volume,
mock.sentinel.allow_reschedule,
mock.sentinel.reschedule_context,
mock.sentinel.request_spec,
filter_properties,
mock.sentinel.image_volume_cache)
volume.is_migration_target.assert_called_once_with()
if is_migration_target:
tasks.pop(3)
notify_mock.assert_not_called()
end_notify_suffix = None
else:
notify_mock.assert_called_once_with(mock.sentinel.db,
'create.start')
end_notify_suffix = 'create.end'
flow_mock.assert_called_once_with('volume_create_manager')
extract_ref_mock.assert_called_once_with(mock.sentinel.db,
mock.sentinel.host,
set_error=False)
onfailure_mock.assert_called_once_with(
mock.sentinel.reschedule_context, mock.sentinel.db,
mock.sentinel.manager, mock.sentinel.scheduler_rpcapi, mock.ANY)
extract_spec_mock.assert_called_once_with(mock.sentinel.db)
create_mock.assert_called_once_with(mock.sentinel.manager,
mock.sentinel.db,
mock.sentinel.driver,
mock.sentinel.image_volume_cache)
onfinish_mock.assert_called_once_with(mock.sentinel.db,
end_notify_suffix)
volume_flow = flow_mock.return_value
self.assertEqual(len(tasks), volume_flow.add.call_count)
volume_flow.add.assert_has_calls(tasks)
load_mock.assert_called_once_with(
volume_flow,
store={'context': mock.sentinel.context,
'filter_properties': filter_properties,
'request_spec': mock.sentinel.request_spec,
'volume': volume})
self.assertEqual(result, load_mock.return_value)
@ddt.ddt(testNameFormat=ddt.TestNameFormat.INDEX_ONLY)
class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):

View File

@ -391,6 +391,32 @@ class VolumeTestCase(base.BaseVolumeTestCase):
volume_id)
mock_clean.assert_called_once_with(volume_id, self.volume.driver)
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
@mock.patch('cinder.quota.QUOTAS.rollback')
@mock.patch('cinder.quota.QUOTAS.commit')
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=['RESERVATION'])
def test_delete_migrating_volume(self, reserve_mock, commit_mock,
rollback_mock, notify_mock):
"""Test volume can be created and deleted."""
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
migration_status='target:123',
**self.volume_params)
volume_id = volume['id']
self.volume.delete_volume(self.context, volume)
vol = db.volume_get(context.get_admin_context(read_deleted='yes'),
volume_id)
self.assertEqual(vol['status'], 'deleted')
# For migration's temp volume we don't notify or do any quota
notify_mock.assert_not_called()
rollback_mock.assert_not_called()
commit_mock.assert_not_called()
reserve_mock.assert_not_called()
def test_create_delete_volume_with_metadata(self):
"""Test volume can be created with metadata and deleted."""
test_meta = {'fake_key': 'fake_value'}

View File

@ -364,6 +364,9 @@ class NotifyVolumeActionTask(flow_utils.CinderTask):
self.event_suffix = event_suffix
def execute(self, context, volume):
if not self.event_suffix:
return
try:
volume_utils.notify_about_volume_usage(context, volume,
self.event_suffix,
@ -1346,13 +1349,17 @@ def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume,
LOG.debug("Volume reschedule parameters: %(allow)s "
"retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
volume_flow.add(ExtractVolumeSpecTask(db),
NotifyVolumeActionTask(db, "create.start"),
CreateVolumeFromSpecTask(manager,
volume_flow.add(ExtractVolumeSpecTask(db))
# Temporary volumes created during migration should not be notified
end_notify_suffix = None
if not volume.is_migration_target():
volume_flow.add(NotifyVolumeActionTask(db, 'create.start'))
end_notify_suffix = 'create.end'
volume_flow.add(CreateVolumeFromSpecTask(manager,
db,
driver,
image_volume_cache),
CreateVolumeOnFinishTask(db, "create.end"))
CreateVolumeOnFinishTask(db, end_notify_suffix))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)

View File

@ -942,14 +942,13 @@ class VolumeManager(manager.CleanableManager,
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
if not is_temp_vol:
self._notify_about_volume_usage(context, volume, notification)
# If deleting source/destination volume in a migration or a temp
# volume for backup, we should skip quotas.
do_quota = not (is_migrating or is_temp_vol)
if do_quota:
notification = 'unmanage.' if unmanage_only else 'delete.'
self._notify_about_volume_usage(context, volume,
notification + 'start')
try:
volume_utils.require_driver_initialized(self.driver)
@ -962,8 +961,7 @@ class VolumeManager(manager.CleanableManager,
volume.id)
for s in snapshots:
if s.status != fields.SnapshotStatus.DELETING:
self._clear_db(is_migrating_dest, volume,
'error_deleting')
self._clear_db(volume, 'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
@ -982,7 +980,7 @@ class VolumeManager(manager.CleanableManager,
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(is_migrating_dest, volume, 'available')
self._clear_db(volume, 'available')
return True # Let caller know we skipped deletion
except Exception:
with excutils.save_and_reraise_exception():
@ -992,12 +990,9 @@ class VolumeManager(manager.CleanableManager,
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(is_migrating_dest, volume, new_status)
self._clear_db(volume, new_status)
# If deleting source/destination volume in a migration or a temp
# volume for backup, we should skip quotas.
skip_quota = is_migrating or is_temp_vol
if not skip_quota:
if do_quota:
# Get reservations
try:
reservations = None
@ -1018,12 +1013,9 @@ class VolumeManager(manager.CleanableManager,
# If deleting source/destination volume in a migration or a temp
# volume for backup, we should skip quotas.
if not skip_quota:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
if do_quota:
self._notify_about_volume_usage(context, volume,
notification + 'end')
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
@ -1037,11 +1029,11 @@ class VolumeManager(manager.CleanableManager,
LOG.info(msg, resource=volume)
return None
def _clear_db(self, is_migrating_dest, volume_ref, status) -> None:
def _clear_db(self, volume_ref, status) -> None:
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
if volume_ref.is_migration_target():
volume_ref.destroy()
LOG.error("Unable to delete the destination volume "
"during volume migration, (NOTE: database "

View File

@ -0,0 +1,5 @@
---
fixes:
- |
`Bug #1922920 <https://bugs.launchpad.net/cinder/+bug/1922920>`_: Don't do
volume usage notifications for migration temporary volumes.