Fix online data migrations

Online data migrations are part of the rolling upgrades mechanism, and
they are run on the current installed version before we upgrade to the
next one to ensure that all DB records have been upgraded even if they
have not been accessed during the life of the current release service
life.

This means that online migrations from one release should be removed on
the next release, as they are no longer needed.

We still have the Queens online data migrations in our code, and we
should remove them in master, Stein, and Rocky.

This patch also adds checks to the status tool to check that these
online data migrations have completed.

Closes-bug: #1837703
Change-Id: I025789f92eb318b8e5a531bea6cf106d52f268ae
(cherry picked from commit 230bda82c1)
This commit is contained in:
Gorka Eguileor 2019-07-24 10:30:38 +02:00 committed by Ivan Kolodyazhny
parent 7a33e5fa79
commit 83b8325614
8 changed files with 134 additions and 412 deletions

View File

@ -120,30 +120,6 @@ def _get_non_shared_target_hosts(ctxt):
return hosts, numvols_needing_update
def shared_targets_online_data_migration(ctxt, max_count):
"""Update existing volumes shared_targets flag based on capabilities."""
non_shared_hosts = []
completed = 0
non_shared_hosts, total_vols_to_update = _get_non_shared_target_hosts(ctxt)
for host in non_shared_hosts:
# We use the api call here instead of going direct to
# db query to take advantage of parsing out the host
# correctly
vrefs = db_api.volume_get_all_by_host(
ctxt, host,
filters={'shared_targets': True})
if len(vrefs) > max_count:
del vrefs[-(len(vrefs) - max_count):]
max_count -= len(vrefs)
for v in vrefs:
db.volume_update(
ctxt, v['id'],
{'shared_targets': 0})
completed += 1
return total_vols_to_update, completed
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
@ -266,17 +242,13 @@ class DbCommands(object):
# NOTE: Online migrations cannot depend on having Cinder services running.
# Migrations can be called during Fast-Forward Upgrades without having any
# Cinder services up.
online_migrations = (
# Added in Queens
db.service_uuids_online_data_migration,
# Added in Queens
db.backup_service_online_migration,
# Added in Queens
db.volume_service_uuids_online_data_migration,
# Added in Queens
shared_targets_online_data_migration,
# Added in Queens
db.attachment_specs_online_data_migration
# NOTE; Online migrations must be removed at the beginning of the next
# release to the one they've been introduced. A comment with the release
# a migration is introduced and the one where it must be removed must
# preceed any element of the "online_migrations" tuple, like this:
# # Added in Queens remove in Rocky
# db.service_uuids_online_data_migration,
online_migrations = tuple(
)
def __init__(self):

View File

@ -18,6 +18,9 @@
import os
import sys
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import service # noqa
from oslo_config import cfg
@ -59,6 +62,10 @@ def _get_enabled_drivers():
class Checks(uc.UpgradeCommands):
"""Upgrade checks to run."""
def __init__(self, *args, **kwargs):
super(Checks, self).__init__(*args, **kwargs)
self.context = context.get_admin_context()
def _file_exists(self, path):
"""Helper for mocking check of os.path.exists."""
return os.path.exists(path)
@ -188,11 +195,37 @@ class Checks(uc.UpgradeCommands):
return uc.Result(SUCCESS)
def _check_service_uuid(self):
try:
db.service_get_by_uuid(self.context, None)
except exception.ServiceNotFound:
volumes = db.volume_get_all(self.context,
limit=1,
filters={'service_uuid': None})
if not volumes:
return uc.Result(SUCCESS)
return uc.Result(
FAILURE,
'Services and volumes must have a service UUID. Please fix this '
'issue by running Queens online data migrations.')
def _check_attachment_specs(self):
if db.attachment_specs_exist(self.context):
return uc.Result(
FAILURE,
'There should be no more AttachmentSpecs in the system. '
'Please fix this issue by running Queens online data '
'migrations.')
return uc.Result(SUCCESS)
_upgrade_checks = (
('Backup Driver Path', _check_backup_module),
('Use of Policy File', _check_policy_file),
('Windows Driver Path', _check_legacy_windows_config),
('Removed Drivers', _check_removed_drivers),
# added in Train
('Service UUIDs', _check_service_uuid),
('Attachment specs', _check_attachment_specs),
)

View File

@ -97,18 +97,6 @@ def dispose_engine():
###################
def service_uuids_online_data_migration(context, max_count):
return IMPL.service_uuids_online_data_migration(context, max_count)
def backup_service_online_migration(context, max_count):
return IMPL.backup_service_online_migration(context, max_count)
def volume_service_uuids_online_data_migration(context, max_count):
return IMPL.volume_service_uuids_online_data_migration(context, max_count)
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
@ -1827,6 +1815,11 @@ class Condition(object):
###################
def attachment_specs_exist(context):
"""Check if there are attachment specs left."""
return IMPL.attachment_specs_exist(context)
def attachment_specs_get(context, attachment_id):
"""DEPRECATED: Get all specs for an attachment."""
return IMPL.attachment_specs_get(context, attachment_id)
@ -1850,9 +1843,6 @@ def attachment_specs_update_or_create(context,
specs)
def attachment_specs_online_data_migration(context, max_count):
return IMPL.attachment_specs_online_data_migration(context, max_count)
###################

View File

@ -52,7 +52,6 @@ from sqlalchemy.sql import func
from sqlalchemy.sql import sqltypes
from cinder.api import common
from cinder.common import constants
from cinder.common import sqlalchemyutils
from cinder import db
from cinder.db.sqlalchemy import models
@ -528,7 +527,7 @@ def service_get_all(context, backend_match_level=None, **filters):
@require_admin_context
def service_get_by_uuid(context, service_uuid):
query = model_query(context, models.Service).fitler_by(uuid=service_uuid)
query = model_query(context, models.Service).filter_by(uuid=service_uuid)
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_uuid)
@ -563,115 +562,6 @@ def service_update(context, service_id, values):
raise exception.ServiceNotFound(service_id=service_id)
@enginefacade.writer
def service_uuids_online_data_migration(context, max_count):
from cinder.objects import service
updated = 0
total = model_query(context, models.Service).filter_by(uuid=None).count()
db_services = model_query(context, models.Service).filter_by(
uuid=None).limit(max_count).all()
for db_service in db_services:
# The conversion in the Service object code
# will generate a UUID and save it for us.
service_obj = service.Service._from_db_object(
context, service.Service(), db_service)
if 'uuid' in service_obj:
updated += 1
return total, updated
@require_admin_context
def backup_service_online_migration(context, max_count):
name_rules = {'cinder.backup.drivers.swift':
'cinder.backup.drivers.swift.SwiftBackupDriver',
'cinder.backup.drivers.ceph':
'cinder.backup.drivers.ceph.CephBackupDriver',
'cinder.backup.drivers.glusterfs':
'cinder.backup.drivers.glusterfs.GlusterfsBackupDriver',
'cinder.backup.drivers.google':
'cinder.backup.drivers.google.GoogleBackupDriver',
'cinder.backup.drivers.nfs':
'cinder.backup.drivers.nfs.NFSBackupDriver',
'cinder.backup.drivers.tsm':
'cinder.backup.drivers.tsm.TSMBackupDriver',
'cinder.backup.drivers.posix':
'cinder.backup.drivers.posix.PosixBackupDriver'}
total = 0
updated = 0
session = get_session()
with session.begin():
total = model_query(
context, models.Backup, session=session).filter(
models.Backup.service.in_(name_rules.keys())).count()
backups = (model_query(
context, models.Backup, session=session).filter(
models.Backup.service.in_(
name_rules.keys())).limit(max_count)).all()
if len(backups):
for backup in backups:
updated += 1
backup.service = name_rules[backup.service]
return total, updated
@enginefacade.writer
def volume_service_uuids_online_data_migration(context, max_count):
"""Update volume service_uuid columns."""
updated = 0
query = model_query(context,
models.Volume).filter_by(service_uuid=None).\
filter(models.Volume.host.isnot(None))
total = query.count()
vol_refs = query.limit(max_count).all()
service_refs = model_query(context, models.Service).filter_by(
topic=constants.VOLUME_TOPIC).limit(max_count).all()
# build a map to access the service uuid by host
svc_map = {}
for svc in service_refs:
svc_map[svc.host] = svc.uuid
# update our volumes appropriately
for v in vol_refs:
host = v.host.split('#')
v['service_uuid'] = svc_map[host[0]]
# re-use the session we already have associated with the
# volumes here (from the query above)
session = query.session
with session.begin():
v.save(session)
updated += 1
return total, updated
@enginefacade.writer
def attachment_specs_online_data_migration(context, max_count):
from cinder.objects import volume_attachment
# First figure out how many attachments have specs which need to be
# migrated, grouped by the attachment.id from the specs table.
session = get_session()
total = session.query(models.AttachmentSpecs.attachment_id).filter_by(
deleted=False).group_by(models.AttachmentSpecs.attachment_id).count()
# Now get the limited distinct set of attachment_ids to start migrating.
result = session.query(
models.AttachmentSpecs.attachment_id).filter_by(
deleted=False).group_by(models.AttachmentSpecs.attachment_id).limit(
max_count).all()
migrated = 0
# result is a list of tuples where the first item is the attachment_id
for attachment_id in result:
attachment_id = attachment_id[0]
# Loading the volume attachment object will migrate it's related
# attachment specs and delete those attachment specs.
volume_attachment.VolumeAttachment.get_by_id(context, attachment_id)
migrated += 1
return total, migrated
###################
@ -2108,6 +1998,11 @@ def attachment_destroy(context, attachment_id):
return updated_values
def attachment_specs_exist(context):
query = model_query(context, models.AttachmentSpecs, read_deleted='no')
return bool(query.first())
def _attachment_specs_query(context, attachment_id, session=None):
return model_query(context, models.AttachmentSpecs, session=session,
read_deleted="no").\

View File

@ -14,15 +14,22 @@
import ddt
import mock
import uuid
from oslo_config import cfg
from oslo_upgradecheck import upgradecheck as uc
import testtools
import cinder.backup.manager # noqa
from cinder.cmd import status
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import api as sqla_api
from cinder import exception
from cinder import test
import cinder.volume.manager as volume_manager
CONF = cfg.CONF
@ -30,6 +37,18 @@ CONF = cfg.CONF
class TestCinderStatus(testtools.TestCase):
"""Test cases for the cinder-status upgrade check command."""
def _setup_database(self):
CONF.set_default('connection', 'sqlite://', 'database')
CONF.set_default('sqlite_synchronous', False, 'database')
if not test._DB_CACHE:
test._DB_CACHE = test.Database(
sqla_api, test.migration,
sql_connection=CONF.database.connection)
self.useFixture(test._DB_CACHE)
sqla_api._GET_METHODS = {}
self.addCleanup(CONF.reset)
def setUp(self):
super(TestCinderStatus, self).setUp()
self.checks = status.Checks()
@ -47,6 +66,9 @@ class TestCinderStatus(testtools.TestCase):
self.find_file = patcher.start()
self.find_file.return_value = '/etc/cinder/'
self._setup_database()
self.context = context.get_admin_context()
def _set_config(self, key, value, group=None):
CONF.set_override(key, value, group=group)
self.addCleanup(CONF.clear_override, key, group=group)
@ -176,3 +198,58 @@ class TestCinderStatus(testtools.TestCase):
self._set_config('enabled_backends', None)
result = self.checks._check_removed_drivers()
self.assertEqual(uc.Code.SUCCESS, result.code)
@staticmethod
def uuid():
return str(uuid.uuid4())
def _create_service(self, **values):
values.setdefault('uuid', self.uuid())
db.service_create(self.context, values)
def _create_volume(self, **values):
values.setdefault('id', self.uuid())
values.setdefault('service_uuid', self.uuid())
try:
db.volume_create(self.context, values)
# Support setting deleted on creation
except exception.VolumeNotFound:
if values.get('deleted') is not True:
raise
def test__check_service_uuid_ok(self):
self._create_service()
self._create_service()
self._create_volume()
# Confirm that we ignored deleted entries
self._create_volume(service_uuid=None, deleted=True)
result = self.checks._check_service_uuid()
self.assertEqual(uc.Code.SUCCESS, result.code)
def test__check_service_uuid_fail_service(self):
self._create_service()
self._create_service(uuid=None)
self._create_volume()
result = self.checks._check_service_uuid()
self.assertEqual(uc.Code.FAILURE, result.code)
def test__check_service_uuid_fail_volume(self):
self._create_service()
self._create_volume(service_uuid=None)
result = self.checks._check_service_uuid()
self.assertEqual(uc.Code.FAILURE, result.code)
def test__check_attachment_specs_ok(self):
attach_uuid = self.uuid()
# Confirm that we ignore deleted attachment specs
db.attachment_specs_update_or_create(self.context, attach_uuid,
{'k': 'v'})
db.attachment_specs_delete(self.context, attach_uuid, 'k')
result = self.checks._check_attachment_specs()
self.assertEqual(uc.Code.SUCCESS, result.code)
def test__check_attachment_specs_fail(self):
db.attachment_specs_update_or_create(self.context, self.uuid(),
{'k': 'v', 'k2': 'v2'})
result = self.checks._check_attachment_specs()
self.assertEqual(uc.Code.FAILURE, result.code)

View File

@ -2248,50 +2248,3 @@ class TestVolumeSharedTargetsOnlineMigration(test.TestCase):
'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
utils.create_service(ctxt, values)
self.ctxt = ctxt
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
return_value='1.8')
def test_shared_targets_migrations(self, mock_version):
"""Ensure we can update the column."""
# Run the migration and verify that we updated 1 entry
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
return_value={'connection_protocol': 'iSCSI',
'shared_targets': False}):
total, updated = (
cinder_manage.shared_targets_online_data_migration(
self.ctxt, 10))
self.assertEqual(3, total)
self.assertEqual(3, updated)
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
return_value='1.8')
def test_shared_targets_migrations_non_iscsi(self, mock_version):
"""Ensure we can update the column."""
# Run the migration and verify that we updated 1 entry
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
return_value={'connection_protocol': 'RBD'}):
total, updated = (
cinder_manage.shared_targets_online_data_migration(
self.ctxt, 10))
self.assertEqual(3, total)
self.assertEqual(3, updated)
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
return_value='1.8')
def test_shared_targets_migrations_with_limit(self, mock_version):
"""Ensure we update in batches."""
# Run the migration and verify that we updated 1 entry
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
return_value={'connection_protocol': 'iSCSI',
'shared_targets': False}):
total, updated = (
cinder_manage.shared_targets_online_data_migration(
self.ctxt, 2))
self.assertEqual(3, total)
self.assertEqual(2, updated)
total, updated = (
cinder_manage.shared_targets_online_data_migration(
self.ctxt, 2))
self.assertEqual(1, total)
self.assertEqual(1, updated)

View File

@ -27,7 +27,6 @@ import six
from sqlalchemy.sql import operators
from cinder.api import common
from cinder.common import constants
from cinder import context
from cinder import db
from cinder.db.sqlalchemy import api as sqlalchemy_api
@ -163,79 +162,6 @@ class DBAPIServiceTestCase(BaseTest):
"""Unit tests for cinder.db.api.service_*."""
def test_service_uuid_migrations(self):
# Force create one entry with no UUID
sqlalchemy_api.service_create(self.ctxt, {
'host': 'host1',
'binary': constants.VOLUME_BINARY,
'topic': 'volume', })
# Create another one with a valid UUID
sqlalchemy_api.service_create(self.ctxt, {
'host': 'host2',
'binary': constants.VOLUME_BINARY,
'topic': 'volume',
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'})
# Run the migration and verify that we updated 1 entry
total, updated = db.service_uuids_online_data_migration(
self.ctxt, 10)
self.assertEqual(1, total)
self.assertEqual(1, updated)
def test_service_uuid_migrations_with_limit(self):
sqlalchemy_api.service_create(self.ctxt, {
'host': 'host1',
'binary': constants.VOLUME_BINARY,
'topic': 'volume', })
sqlalchemy_api.service_create(self.ctxt, {
'host': 'host2',
'binary': constants.VOLUME_BINARY,
'topic': 'volume', })
sqlalchemy_api.service_create(self.ctxt, {
'host': 'host3',
'binary': constants.VOLUME_BINARY,
'topic': 'volume', })
# Run the migration and verify that we updated 1 entry
total, updated = db.service_uuids_online_data_migration(
self.ctxt, 2)
self.assertEqual(3, total)
self.assertEqual(2, updated)
# Now get the rest, intentionally setting max > what we should have
total, updated = db.service_uuids_online_data_migration(
self.ctxt, 2)
self.assertEqual(1, total)
self.assertEqual(1, updated)
@ddt.data({'count': 5, 'total': 3, 'updated': 3},
{'count': 2, 'total': 3, 'updated': 2})
@ddt.unpack
def test_backup_service_online_migration(self, count, total, updated):
volume = utils.create_volume(self.ctxt)
sqlalchemy_api.backup_create(self.ctxt, {
'service': 'cinder.backup.drivers.swift',
'volume_id': volume.id
})
sqlalchemy_api.backup_create(self.ctxt, {
'service': 'cinder.backup.drivers.ceph',
'volume_id': volume.id
})
sqlalchemy_api.backup_create(self.ctxt, {
'service': 'cinder.backup.drivers.glusterfs',
'volume_id': volume.id
})
sqlalchemy_api.backup_create(self.ctxt, {
'service': 'cinder.backup.drivers.fake_backup_service',
'volume_id': volume.id
})
t, u = db.backup_service_online_migration(self.ctxt, count)
self.assertEqual(total, t)
self.assertEqual(updated, u)
def test_service_create(self):
# Add a cluster value to the service
values = {'cluster_name': 'cluster'}
@ -461,65 +387,6 @@ class DBAPIServiceTestCase(BaseTest):
self.assertIsInstance(binary_op, sqlalchemy_api.sql.functions.Function)
self.assertEqual('binary', binary_op.name)
def test_volume_service_uuid_migrations(self):
# Force create one entry with no UUID
sqlalchemy_api.volume_create(self.ctxt,
{'host': 'host1@lvm-driver1#lvm-driver1'})
# Create another one with a valid UUID
sqlalchemy_api.volume_create(
self.ctxt,
{'host': 'host1@lvm-driver1#lvm-driver1',
'service_uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'})
# Need a service to query
values = {
'host': 'host1@lvm-driver1',
'binary': constants.VOLUME_BINARY,
'topic': constants.VOLUME_TOPIC}
utils.create_service(self.ctxt, values)
# Run the migration and verify that we updated 1 entry
total, updated = db.volume_service_uuids_online_data_migration(
self.ctxt, 10)
self.assertEqual(1, total)
self.assertEqual(1, updated)
def test_volume_service_uuid_migrations_with_limit(self):
"""Test db migrate of volumes in batches."""
db.volume_create(
self.ctxt, {'host': 'host1@lvm-driver1#lvm-driver1'})
db.volume_create(
self.ctxt, {'host': 'host1@lvm-driver1#lvm-driver1'})
db.volume_create(
self.ctxt, {'host': 'host1@lvm-driver1#lvm-driver1'})
# Entries with no host should be skipped
db.volume_create(self.ctxt, {'host': None})
values = {
'host': 'host1@lvm-driver1',
'binary': constants.VOLUME_BINARY,
'topic': constants.VOLUME_TOPIC,
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}
utils.create_service(self.ctxt, values)
# Run the migration and verify that we updated 2 entries
total, updated = db.volume_service_uuids_online_data_migration(
self.ctxt, 2)
# total = number of volumes that have hosts and don't have a
# service_uuid
self.assertEqual(3, total)
self.assertEqual(2, updated)
# Now get the last one (intentionally setting max > expected)
total, updated = db.volume_service_uuids_online_data_migration(
self.ctxt, 99)
self.assertEqual(1, total)
self.assertEqual(1, updated)
@ddt.ddt
class DBAPIVolumeTestCase(BaseTest):
@ -3482,73 +3349,3 @@ class DBAPIGroupTestCase(BaseTest):
self.assertEqual(
new_cluster_name + groups[i].cluster_name[len(cluster_name):],
db_groups[i].cluster_name)
class DBAPIAttachmentSpecsTestCase(BaseTest):
def test_attachment_specs_online_data_migration(self):
"""Tests the online data migration initiated via cinder-manage"""
# Create five attachment records:
# 1. first attachment has specs but is deleted so it's ignored
# 2. second attachment is already migrated (no attachment_specs
# entries) so it's ignored
# 3. the remaining attachments have specs so they are migrated in
# in batches of 2
# Create an attachment record with specs and delete it.
attachment = objects.VolumeAttachment(
self.ctxt, attach_status='attaching', volume_id=fake.VOLUME_ID)
attachment.create()
# Create an attachment_specs entry for attachment.
connector = {'host': '127.0.0.1'}
db.attachment_specs_update_or_create(
self.ctxt, attachment.id, connector)
# Now delete the attachment which should also delete the specs.
attachment.destroy()
# Run the migration routine to see that there is nothing to migrate.
total, migrated = db.attachment_specs_online_data_migration(
self.ctxt, 50)
self.assertEqual(0, total)
self.assertEqual(0, migrated)
# Create a volume attachment with no specs (already migrated).
attachment = objects.VolumeAttachment(
self.ctxt, attach_status='attaching', volume_id=fake.VOLUME_ID,
connector=connector)
attachment.create()
# Run the migration routine to see that there is nothing to migrate.
total, migrated = db.attachment_specs_online_data_migration(
self.ctxt, 50)
self.assertEqual(0, total)
self.assertEqual(0, migrated)
# We have to create a real volume because of the joinedload in the
# DB API query to get the volume attachment.
volume = db.volume_create(self.ctxt, {'host': 'host1'})
# Now create three volume attachments with specs and migrate them
# in batches of 2 to show we are enforcing the limit.
for x in range(3):
attachment = objects.VolumeAttachment(
self.ctxt, attach_status='attaching', volume_id=volume['id'])
attachment.create()
# Create an attachment_specs entry for the attachment.
db.attachment_specs_update_or_create(
self.ctxt, attachment.id, connector)
# Migrate 2 at a time.
total, migrated = db.attachment_specs_online_data_migration(
self.ctxt, 2)
self.assertEqual(3, total)
self.assertEqual(2, migrated)
# This should complete the migration.
total, migrated = db.attachment_specs_online_data_migration(
self.ctxt, 2)
self.assertEqual(1, total)
self.assertEqual(1, migrated)
# Run it one more time to make sure there is nothing left.
total, migrated = db.attachment_specs_online_data_migration(
self.ctxt, 2)
self.assertEqual(0, total)
self.assertEqual(0, migrated)

View File

@ -0,0 +1,5 @@
---
upgrade:
- |
Two new checks are added to the ``cinder-status upgrade check`` CLI to
ensure that online data migrations from Queens onward have been completed.