Fix incremental backups handling in project cleanup

Cinder refuse to delete backup if there are dependent (incremental)
backups. Introduce second iteration over backups to first drop all
incremental ones and then all remainings.

Story: 2010217
Change-Id: Id4525bbbe11294b53e981e7654056eeb8969343d
This commit is contained in:
Artem Goncharov 2022-08-12 15:07:49 +02:00
parent 0ded7ac398
commit d9b7beff43
2 changed files with 119 additions and 23 deletions
openstack
block_storage/v3
tests/functional/cloud

@ -1397,36 +1397,51 @@ class Proxy(_base_proxy.BaseBlockStorageProxy):
}
}
def _service_cleanup(self, dry_run=True, client_status_queue=None,
identified_resources=None,
filters=None, resource_evaluation_fn=None):
backups = []
for obj in self.backups(details=False):
need_delete = self._service_cleanup_del_res(
self.delete_backup,
obj,
dry_run=dry_run,
client_status_queue=client_status_queue,
identified_resources=identified_resources,
filters=filters,
resource_evaluation_fn=resource_evaluation_fn)
if not dry_run and need_delete:
backups.append(obj)
def _service_cleanup(
self,
dry_run=True,
client_status_queue=None,
identified_resources=None,
filters=None,
resource_evaluation_fn=None
):
# It is not possible to delete backup if there are dependent backups.
# In order to be able to do cleanup those is required to have at least
# 2 iterations (first cleans up backups with has no dependent backups,
# and in 2nd iteration there should be no backups with dependencies
# remaining.
for i in range(1, 2):
backups = []
for obj in self.backups(details=False):
if (
(i == 1 and not obj.has_dependent_backups)
or i != 1
):
need_delete = self._service_cleanup_del_res(
self.delete_backup,
obj,
dry_run=True,
client_status_queue=client_status_queue,
identified_resources=identified_resources,
filters=filters,
resource_evaluation_fn=resource_evaluation_fn)
if not dry_run and need_delete:
backups.append(obj)
# Before deleting snapshots need to wait for backups to be deleted
for obj in backups:
try:
self.wait_for_delete(obj)
except exceptions.SDKException:
# Well, did our best, still try further
pass
# Before proceeding need to wait for backups to be deleted
for obj in backups:
try:
self.wait_for_delete(obj)
except exceptions.SDKException:
# Well, did our best, still try further
pass
snapshots = []
for obj in self.snapshots(details=False):
need_delete = self._service_cleanup_del_res(
self.delete_snapshot,
obj,
dry_run=dry_run,
dry_run=True,
client_status_queue=client_status_queue,
identified_resources=identified_resources,
filters=filters,

@ -119,3 +119,84 @@ class TestProjectCleanup(base.BaseFunctionalTest):
# Since we might not have enough privs to drop all nets - ensure
# we do not have our known one
self.assertNotIn(self.network_name, net_names)
def test_block_storage_cleanup(self):
if not self.user_cloud.has_service('object-store'):
self.skipTest('Object service is requred, but not available')
status_queue = queue.Queue()
vol = self.conn.block_storage.create_volume(name='vol1', size='1')
self.conn.block_storage.wait_for_status(vol)
s1 = self.conn.block_storage.create_snapshot(volume_id=vol.id)
self.conn.block_storage.wait_for_status(s1)
b1 = self.conn.block_storage.create_backup(volume_id=vol.id)
self.conn.block_storage.wait_for_status(b1)
b2 = self.conn.block_storage.create_backup(
volume_id=vol.id, is_incremental=True, snapshot_id=s1.id)
self.conn.block_storage.wait_for_status(b2)
b3 = self.conn.block_storage.create_backup(
volume_id=vol.id, is_incremental=True, snapshot_id=s1.id)
self.conn.block_storage.wait_for_status(b3)
# First round - check no resources are old enough
self.conn.project_cleanup(
dry_run=True,
wait_timeout=120,
status_queue=status_queue,
filters={'created_at': '2000-01-01'})
self.assertTrue(status_queue.empty())
# Second round - resource evaluation function return false, ensure
# nothing identified
self.conn.project_cleanup(
dry_run=True,
wait_timeout=120,
status_queue=status_queue,
filters={'created_at': '2200-01-01'},
resource_evaluation_fn=lambda x, y, z: False)
self.assertTrue(status_queue.empty())
# Third round - filters set too low
self.conn.project_cleanup(
dry_run=True,
wait_timeout=120,
status_queue=status_queue,
filters={'created_at': '2200-01-01'})
objects = []
while not status_queue.empty():
objects.append(status_queue.get())
# At least known networks should be identified
volumes = list(obj.id for obj in objects)
self.assertIn(vol.id, volumes)
# Fourth round - dry run with no filters, ensure everything identified
self.conn.project_cleanup(
dry_run=True,
wait_timeout=120,
status_queue=status_queue)
objects = []
while not status_queue.empty():
objects.append(status_queue.get())
vol_ids = list(obj.id for obj in objects)
self.assertIn(vol.id, vol_ids)
# Ensure network still exists
vol_check = self.conn.block_storage.get_volume(vol.id)
self.assertEqual(vol.name, vol_check.name)
# Last round - do a real cleanup
self.conn.project_cleanup(
dry_run=False,
wait_timeout=600,
status_queue=status_queue)
objects = []
while not status_queue.empty():
objects.append(status_queue.get())