Make volume soft delete more thorough

When a volume record is soft-deleted in the database,
dependent records in other tables (for example,
Transfers, VolumeGlanceMetadata, etc.) must be soft
deleted as well.  Otherwise, we will get FK dependency
errors when the database is purged.

This patch adds that support for VolumeAttachment table.
(other tables were already covered, just refactored)

Also adds tests.

Co-authored-by: Rajat Dhasmana <rajatdhasmana@gmail.com>
Co-authored-by: Brian Rosmaita <rosmaita.fossdev@gmail.com>

Change-Id: Ibfa6c4ba2f162681756ec3203991351345b65346
Related-Bug: #1542169
(cherry picked from commit a5bb17bdfc)
(cherry picked from commit dd2ba49004)
This commit is contained in:
whoami-rajat 2019-11-29 09:43:57 +00:00 committed by Rajat Dhasmana
parent bffb7d5954
commit 3b3e0b2c58
6 changed files with 73 additions and 26 deletions

View File

@ -1712,35 +1712,33 @@ def volume_data_get_for_project(context, project_id,
volume_type_id, host=host)
VOLUME_DEPENDENT_MODELS = frozenset([models.VolumeMetadata,
models.VolumeAdminMetadata,
models.Transfer,
models.VolumeGlanceMetadata,
models.VolumeAttachment])
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def volume_destroy(context, volume_id):
session = get_session()
now = timeutils.utcnow()
updated_values = {'status': 'deleted',
'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at'),
'migration_status': None}
with session.begin():
updated_values = {'status': 'deleted',
'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at'),
'migration_status': None}
model_query(context, models.Volume, session=session).\
filter_by(id=volume_id).\
update(updated_values)
model_query(context, models.VolumeMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.VolumeAdminMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.Transfer, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
for model in VOLUME_DEPENDENT_MODELS:
model_query(context, model, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
del updated_values['updated_at']
return updated_values

View File

@ -0,0 +1,46 @@
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for code that makes assumptions about ORM relationships."""
from sqlalchemy_utils import functions as saf
from cinder.db.sqlalchemy import api as db_api
from cinder.db.sqlalchemy import models
from cinder import test
class VolumeRelationshipsTestCase(test.TestCase):
"""Test cases for Volume ORM model relationshps."""
def test_volume_dependent_models_list(self):
"""Make sure the volume dependent tables list is accurate."""
# Addresses LP Bug #1542169
volume_declarative_base = saf.get_declarative_base(models.Volume)
volume_fks = saf.get_referencing_foreign_keys(models.Volume)
dependent_tables = []
for table, fks in saf.group_foreign_keys(volume_fks):
dependent_tables.append(table)
found_dependent_models = []
for table in dependent_tables:
found_dependent_models.append(saf.get_class_by_table(
volume_declarative_base, table))
self.assertEqual(len(found_dependent_models),
len(db_api.VOLUME_DEPENDENT_MODELS))
for model in found_dependent_models:
self.assertIn(model, db_api.VOLUME_DEPENDENT_MODELS)

View File

@ -768,6 +768,13 @@ class DBAPIVolumeTestCase(BaseTest):
self.assertRaises(exception.VolumeNotFound, db.volume_get,
self.ctxt, volume['id'])
@mock.patch('cinder.db.sqlalchemy.api.model_query')
def test_volume_destroy_deletes_dependent_data(self, mock_model_query):
"""Addresses LP Bug #1542169."""
db.volume_destroy(self.ctxt, fake.VOLUME_ID)
expected_call_count = 1 + len(sqlalchemy_api.VOLUME_DEPENDENT_MODELS)
self.assertEqual(expected_call_count, mock_model_query.call_count)
def test_volume_get_all(self):
volumes = [db.volume_create(self.ctxt,
{'host': 'h%d' % i, 'size': i})

View File

@ -936,9 +936,6 @@ class VolumeManager(manager.CleanableManager,
LOG.exception("Failed to update usages deleting volume.",
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
volume.destroy()
# If deleting source/destination volume in a migration or a temp
@ -3534,9 +3531,6 @@ class VolumeManager(manager.CleanableManager,
resource={'type': 'group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, vol.id)
vol.destroy()
# Commit the reservations

View File

@ -138,6 +138,7 @@ sphinx-feature-classification==0.1.0
sphinxcontrib-websupport==1.0.1
sqlalchemy-migrate==0.11.0
SQLAlchemy==1.0.10
SQLAlchemy-Utils==0.36.1
sqlparse==0.2.4
statsd==3.2.2
stestr==2.2.0

View File

@ -13,6 +13,7 @@ os-api-ref>=1.4.0 # Apache-2.0
oslotest>=3.2.0 # Apache-2.0
PyMySQL>=0.7.6 # MIT License
psycopg2>=2.7 # LGPL/ZPL
SQLAlchemy-Utils>=0.36.1 # BSD License
testtools>=2.2.0 # MIT
testresources>=2.0.0 # Apache-2.0/BSD
testscenarios>=0.4 # Apache-2.0/BSD