Include deleted volumes for online data migrations

The two online data migrations which moved volumes to the __DEFAULT__
volume type did not include deleted volumes, which meant that the follow
up migration which makes it a nonnullable value does not work because
the deleted volumes will continue to have `null` as their
volume_type_id.

This patch fixes that by including deleted volumes when running the
online database migration.  Also adds tests.

(Patch is proposed directly to stable/ussuri because the online migration
code is removed from master by change
I78681ea89762790f544178725999903a41d75ad1)

Co-authored-by: Mohammed Naser <mnaser@vexxhost.com>

Change-Id: I3dc5ab78266dd895829e827066f78c6bebf67d0d
Closes-Bug: #1893107
This commit is contained in:
Brian Rosmaita 2020-08-26 22:00:12 -04:00
parent 1afaac1ca3
commit 2440cb66e3
3 changed files with 133 additions and 4 deletions

View File

@ -595,11 +595,11 @@ def untyped_volumes_online_data_migration(context, max_count):
session = get_session()
with session.begin():
total = model_query(context,
models.Volume,
models.Volume, read_deleted='yes',
session=session).filter_by(
volume_type_id=None).limit(max_count).count()
volumes = model_query(context,
models.Volume,
models.Volume, read_deleted='yes',
session=session).filter_by(
volume_type_id=None).limit(max_count).all()
for volume in volumes:
@ -620,11 +620,11 @@ def untyped_snapshots_online_data_migration(context, max_count):
session = get_session()
with session.begin():
total = model_query(context,
models.Snapshot,
models.Snapshot, read_deleted='yes',
session=session).filter_by(
volume_type_id=None).limit(max_count).count()
snapshots = model_query(context,
models.Snapshot,
models.Snapshot, read_deleted='yes',
session=session).filter_by(
volume_type_id=None).limit(max_count).all()
for snapshot in snapshots:

View File

@ -20,6 +20,7 @@ from unittest.mock import call
import ddt
from oslo_config import cfg
from oslo_db import exception as oslo_exc
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
@ -1688,6 +1689,57 @@ class DBAPIVolumeTestCase(BaseTest):
new_cluster_name + vols[i].cluster_name[len(cluster_name):],
db_vols[i].cluster_name)
def test_untyped_volumes_online_data_migration(self):
# Bug #1893107: need to make sure we migrate even deleted
# volumes so that the DB doesn't need to be purged before
# making the volume_type_id column nullable in a subsequent
# release.
# need this or volume_create will fail when creating a deleted volume
self.ctxt.read_deleted = 'yes'
db_volumes = [
# non-deleted with volume_type
db.volume_create(self.ctxt,
{'id': fake.VOLUME_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'deleted': False}),
# deleted with volume_type
db.volume_create(self.ctxt,
{'id': fake.VOLUME2_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'deleted': True})]
expected_total = 0
expected_updated = 0
# if the volume_type_id column is nullable, add some that will
# have to be migrated
try:
# non-deleted with NO volume_type
v3 = db.volume_create(self.ctxt,
{'id': fake.VOLUME3_ID,
'volume_type_id': None,
'deleted': False})
# deleted with NO volume_type
v4 = db.volume_create(self.ctxt,
{'id': fake.VOLUME4_ID,
'volume_type_id': None,
'deleted': True})
db_volumes.append([v3, v4])
expected_total = 2
expected_updated = 2
except oslo_exc.DBError:
pass
# restore context to normal
self.ctxt.read_deleted = 'no'
total, updated = db.untyped_volumes_online_data_migration(
self.ctxt, max_count=10)
self.assertEqual(expected_total, total)
self.assertEqual(expected_updated, updated)
@ddt.ddt
class DBAPISnapshotTestCase(BaseTest):
@ -2064,6 +2116,63 @@ class DBAPISnapshotTestCase(BaseTest):
self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1))
def test_untyped_snapshots_online_data_migration(self):
# Bug #1893107: need to make sure we migrate even deleted
# snapshots so that the DB doesn't need to be purged before
# making the volume_type_id column nullable in a subsequent
# release.
# need this or snapshot_create will fail when creating a deleted
# snapshot
self.ctxt.read_deleted = 'yes'
db_snapshots = [
# non-deleted with volume_type
db.snapshot_create(self.ctxt,
{'id': fake.SNAPSHOT_ID,
'volume_id': fake.VOLUME_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'deleted': False}),
# deleted with volume_type
db.snapshot_create(self.ctxt,
{'id': fake.SNAPSHOT2_ID,
'volume_id': fake.VOLUME2_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'deleted': True})]
expected_total = 0
expected_updated = 0
# if the volume_type_id column is nullable, add some that will
# have to be migrated
try:
# non-deleted with NO volume_type
s3 = db.snapshot_create(self.ctxt,
{'id': fake.SNAPSHOT3_ID,
'volume_id': fake.VOLUME3_ID,
'volume_type_id': None,
'deleted': False})
# deleted with NO volume_type
s4 = db.snapshot_create(self.ctxt,
{'id': fake.UUID4,
'volume_id': fake.VOLUME4_ID,
'volume_type_id': None,
'deleted': True})
db_snapshots.append([s3, s4])
expected_total = 2
expected_updated = 2
except oslo_exc.DBError:
pass
# restore context to normal
self.ctxt.read_deleted = 'no'
total, updated = db.untyped_snapshots_online_data_migration(
self.ctxt, max_count=10)
self.assertEqual(expected_total, total)
self.assertEqual(expected_updated, updated)
@ddt.ddt
class DBAPIConsistencygroupTestCase(BaseTest):

View File

@ -0,0 +1,20 @@
---
upgrade:
- |
This release fixes an issue (`Bug #1893107
<https://bugs.launchpad.net/cinder/+bug/1893107>`_) that under some
circumstances could prevent a cinder database upgrade from Train to
Ussuri. The issue would occur if you had upgraded an unpurged Stein
database to Train, and then attempted to upgrade the still unpurged
database to Ussuri.
fixes:
- |
`Bug #1893107 <https://bugs.launchpad.net/cinder/+bug/1893107>`_:
The Ussuri release changes the cinder database schema to make the
``volume_type_id`` column in the ``volumes`` and ``snapshots`` tables
non-nullable because all volumes have been required to have a volume type
since the Train release. The online migration, however, did not process
soft-deleted rows, leaving the possibility that there could be a deleted
volume or snapshot with a null ``volume_type_id``, which in turn would
make the database upgrade fail when the non-nullability constraint could
not be applied. The issue is fixed in the current release.