Browse Source

Include deleted volumes for online data migrations

The two online data migrations which moved volumes to the __DEFAULT__
volume type did not include deleted volumes, which meant that the follow
up migration which makes it a nonnullable value does not work because
the deleted volumes will continue to have `null` as their
volume_type_id.

This patch fixes that by including deleted volumes when running the
online database migration.  Also adds tests.

(Patch is proposed directly to stable/ussuri because the online migration
code is removed from master by change
I78681ea89762790f544178725999903a41d75ad1)

Co-authored-by: Mohammed Naser <mnaser@vexxhost.com>

Change-Id: I3dc5ab78266dd895829e827066f78c6bebf67d0d
Closes-Bug: #1893107
(cherry picked from commit 2440cb66e3)
conflicts:
- replaced the Ussuri release note with one specific to Train
changes/82/748482/3
Brian Rosmaita 1 month ago
parent
commit
00ac80bbad
3 changed files with 191 additions and 4 deletions
  1. +4
    -4
      cinder/db/sqlalchemy/api.py
  2. +109
    -0
      cinder/tests/unit/test_db_api.py
  3. +78
    -0
      releasenotes/notes/bug-1893107-train-45bb91952c3170e1.yaml

+ 4
- 4
cinder/db/sqlalchemy/api.py View File

@@ -580,11 +580,11 @@ def untyped_volumes_online_data_migration(context, max_count):
session = get_session()
with session.begin():
total = model_query(context,
models.Volume,
models.Volume, read_deleted='yes',
session=session).filter_by(
volume_type_id=None).limit(max_count).count()
volumes = model_query(context,
models.Volume,
models.Volume, read_deleted='yes',
session=session).filter_by(
volume_type_id=None).limit(max_count).all()
for volume in volumes:
@@ -605,11 +605,11 @@ def untyped_snapshots_online_data_migration(context, max_count):
session = get_session()
with session.begin():
total = model_query(context,
models.Snapshot,
models.Snapshot, read_deleted='yes',
session=session).filter_by(
volume_type_id=None).limit(max_count).count()
snapshots = model_query(context,
models.Snapshot,
models.Snapshot, read_deleted='yes',
session=session).filter_by(
volume_type_id=None).limit(max_count).all()
for snapshot in snapshots:


+ 109
- 0
cinder/tests/unit/test_db_api.py View File

@@ -21,6 +21,7 @@ import enum
import mock
from mock import call
from oslo_config import cfg
from oslo_db import exception as oslo_exc
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
@@ -1689,6 +1690,57 @@ class DBAPIVolumeTestCase(BaseTest):
new_cluster_name + vols[i].cluster_name[len(cluster_name):],
db_vols[i].cluster_name)

def test_untyped_volumes_online_data_migration(self):
# Bug #1893107: need to make sure we migrate even deleted
# volumes so that the DB doesn't need to be purged before
# making the volume_type_id column nullable in a subsequent
# release.

# need this or volume_create will fail when creating a deleted volume
self.ctxt.read_deleted = 'yes'

db_volumes = [
# non-deleted with volume_type
db.volume_create(self.ctxt,
{'id': fake.VOLUME_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'deleted': False}),
# deleted with volume_type
db.volume_create(self.ctxt,
{'id': fake.VOLUME2_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'deleted': True})]
expected_total = 0
expected_updated = 0

# if the volume_type_id column is nullable, add some that will
# have to be migrated
try:
# non-deleted with NO volume_type
v3 = db.volume_create(self.ctxt,
{'id': fake.VOLUME3_ID,
'volume_type_id': None,
'deleted': False})
# deleted with NO volume_type
v4 = db.volume_create(self.ctxt,
{'id': fake.VOLUME4_ID,
'volume_type_id': None,
'deleted': True})
db_volumes.append([v3, v4])
expected_total = 2
expected_updated = 2

except oslo_exc.DBError:
pass

# restore context to normal
self.ctxt.read_deleted = 'no'

total, updated = db.untyped_volumes_online_data_migration(
self.ctxt, max_count=10)
self.assertEqual(expected_total, total)
self.assertEqual(expected_updated, updated)


@ddt.ddt
class DBAPISnapshotTestCase(BaseTest):
@@ -2065,6 +2117,63 @@ class DBAPISnapshotTestCase(BaseTest):

self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1))

def test_untyped_snapshots_online_data_migration(self):
# Bug #1893107: need to make sure we migrate even deleted
# snapshots so that the DB doesn't need to be purged before
# making the volume_type_id column nullable in a subsequent
# release.

# need this or snapshot_create will fail when creating a deleted
# snapshot
self.ctxt.read_deleted = 'yes'

db_snapshots = [
# non-deleted with volume_type
db.snapshot_create(self.ctxt,
{'id': fake.SNAPSHOT_ID,
'volume_id': fake.VOLUME_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'deleted': False}),
# deleted with volume_type
db.snapshot_create(self.ctxt,
{'id': fake.SNAPSHOT2_ID,
'volume_id': fake.VOLUME2_ID,
'volume_type_id': fake.VOLUME_TYPE_ID,
'deleted': True})]
expected_total = 0
expected_updated = 0

# if the volume_type_id column is nullable, add some that will
# have to be migrated
try:
# non-deleted with NO volume_type
s3 = db.snapshot_create(self.ctxt,
{'id': fake.SNAPSHOT3_ID,
'volume_id': fake.VOLUME3_ID,
'volume_type_id': None,
'deleted': False})
# deleted with NO volume_type
s4 = db.snapshot_create(self.ctxt,
{'id': fake.UUID4,
'volume_id': fake.VOLUME4_ID,
'volume_type_id': None,
'deleted': True})

db_snapshots.append([s3, s4])
expected_total = 2
expected_updated = 2

except oslo_exc.DBError:
pass

# restore context to normal
self.ctxt.read_deleted = 'no'

total, updated = db.untyped_snapshots_online_data_migration(
self.ctxt, max_count=10)
self.assertEqual(expected_total, total)
self.assertEqual(expected_updated, updated)


@ddt.ddt
class DBAPIConsistencygroupTestCase(BaseTest):


+ 78
- 0
releasenotes/notes/bug-1893107-train-45bb91952c3170e1.yaml View File

@@ -0,0 +1,78 @@
---
upgrade:
- |
This release modifies the online database migrations to address an
an upgrade issue (`Bug #1893107
<https://bugs.launchpad.net/cinder/+bug/1893107>`_). The issue does
not manifest itself in the Train release of cinder, but under specific
circumstances it can prevent a cinder database upgrade from Train to
Ussuri.

This upgrade notice applies to you only if **all** of the following
conditions are met:

#. You upgraded to Train from Stein
#. Before upgrading from Stein, you did **not** purge the cinder database
#. Your original upgrade from Stein was to cinder version 15.3.0 or
earlier.

.. note::
If you are upgrading a Stein installation directly to this release
(cinder 15.3.1) or later, this notice does *not* apply to you.

If all the above three items apply to you, as part of your upgrade
to cinder 15.3.1 you should re-run the online database migrations
contained in this release. This will prepare your cinder database
for an eventual upgrade to the Ussuri release.

.. note::
The online database migrations in this release require the existence
of a volume type named ``__DEFAULT__``. A ``__DEFAULT__`` volume
type was created as part of your original installation of/upgrade to
a Train release of cinder. If you have renamed (or renamed and deleted)
the ``__DEFAULT__`` volume type, you must re-create it before running
the online migrations. (If you renamed it, you don't have to un-rename
it; you can create a new one just for the purposes of the online
database migration.)

If necessary, you can create a new ``__DEFAULT__`` volume type as
follows using the Block Storage API, or by using the
python-cinderclient or python-openstackclient to do the equivalent:

API request: ``POST /v3/{project_id}/types``

Request body::

{
"volume_type": {
"name": "__DEFAULT__",
"description": "Default Volume Type",
"os-volume-type-access:is_public": true
}
}

The ``__DEFAULT__`` volume type may safely be renamed (or renamed
and deleted) after you have run the online migrations as long as
the ``default_volume_type`` configuration option is set to a valid
existing volume type.

fixes:
- |
`Bug #1893107 <https://bugs.launchpad.net/cinder/+bug/1893107>`_:
The Ussuri release changes the cinder database schema to make the
``volume_type_id`` column in the ``volumes`` and ``snapshots`` tables
non-nullable because all volumes have been required to have a volume type
since the Train release. The online database migration in the cinder
Train series (release 15.3.0 or earlier), however, did not process
soft-deleted rows, leaving the possibility that there could be a
deleted volume or snapshot with a null ``volume_type_id``, which in
turn will make the database upgrade fail when the non-nullability
constraint cannot be applied when a Train installation is upgraded
to Ussuri.

If you are upgrading to this release from an earlier release in the
Train series (that is, you are upgrading from cinder>=15.0.0,<=15.3.0),
under specific circumstances you should re-run the online database
migrations so that your database will be in the correct state when
you eventually upgrade to a Ussuri release. See the "Upgrade Notes"
for more information.

Loading…
Cancel
Save