Migration step to update objects to latest version

This adds a migration step that will be executed as the last step
in the 'ironic-dbsync online_data_migration' command. It updates
all the objects that aren't in their latest version, to be in
their latest version.

Co-Authored-By: Dmitry Tantsur <dtantsur@redhat.com>
Change-Id: Ib39319a75205ce7692f0525f99200a5de3d60a88
Story: #2004174
Task: #27656
This commit is contained in:
Ruby Loo 2018-10-25 02:15:21 +00:00 committed by Dmitry Tantsur
parent 7cf2d18fa5
commit b8d0f2b091
5 changed files with 229 additions and 0 deletions

View File

@ -48,6 +48,9 @@ dbapi = db_api.get_instance()
# number migrated. If the function determines that no migrations are needed,
# it returns (0, 0).
#
# The last migration step should always remain the last one -- it migrates
# all objects to their latest known versions.
#
# Example of a function docstring:
#
# def sample_data_migration(context, max_count):
@ -71,6 +74,8 @@ ONLINE_MIGRATIONS = (
# Added in Rocky
# TODO(rloo): remove in Stein
(portgroup, 'migrate_vif_port_id'),
# NOTE(rloo): Don't remove this; it should always be last
(dbapi, 'update_to_latest_versions'),
)

View File

@ -896,6 +896,21 @@ class Connection(object):
False otherwise.
"""
@abc.abstractmethod
def update_to_latest_versions(self, context, max_count):
"""Updates objects to their latest known versions.
This scans all the tables and for objects that are not in their latest
version, updates them to that version.
:param context: the admin context
:param max_count: The maximum number of objects to migrate. Must be
>= 0. If zero, all the objects will be migrated.
:returns: A 2-tuple, 1. the total number of objects that need to be
migrated (at the beginning of this call) and 2. the number
of migrated objects.
"""
@abc.abstractmethod
def set_node_traits(self, node_id, traits, version):
"""Replace all of the node traits with specified list of traits.

View File

@ -1234,6 +1234,86 @@ class Connection(api.Connection):
return False
return True
@oslo_db_api.retry_on_deadlock
def update_to_latest_versions(self, context, max_count):
"""Updates objects to their latest known versions.
This scans all the tables and for objects that are not in their latest
version, updates them to that version.
:param context: the admin context
:param max_count: The maximum number of objects to migrate. Must be
>= 0. If zero, all the objects will be migrated.
:returns: A 2-tuple, 1. the total number of objects that need to be
migrated (at the beginning of this call) and 2. the number
of migrated objects.
"""
# NOTE(rloo): 'master' has the most recent (latest) versions.
mapping = release_mappings.RELEASE_MAPPING['master']['objects']
total_to_migrate = 0
total_migrated = 0
sql_models = [model for model in models.Base.__subclasses__()
if model.__name__ in mapping]
for model in sql_models:
version = mapping[model.__name__][0]
query = model_query(model).filter(model.version != version)
total_to_migrate += query.count()
if not total_to_migrate:
return total_to_migrate, 0
# NOTE(xek): Each of these operations happen in different transactions.
# This is to ensure a minimal load on the database, but at the same
# time it can cause an inconsistency in the amount of total and
# migrated objects returned (total could be > migrated). This is
# because some objects may have already migrated or been deleted from
# the database between the time the total was computed (above) to the
# time we do the updating (below).
#
# By the time this script is run, only the new release version is
# running, so the impact of this error will be minimal - e.g. the
# operator will run this script more than once to ensure that all
# data have been migrated.
# If max_count is zero, we want to migrate all the objects.
max_to_migrate = max_count or total_to_migrate
for model in sql_models:
version = mapping[model.__name__][0]
num_migrated = 0
with _session_for_write():
query = model_query(model).filter(model.version != version)
# NOTE(rloo) Caution here; after doing query.count(), it is
# possible that the value is different in the
# next invocation of the query.
if max_to_migrate < query.count():
# Only want to update max_to_migrate objects; cannot use
# sql's limit(), so we generate a new query with
# max_to_migrate objects.
ids = []
for obj in query.slice(0, max_to_migrate):
ids.append(obj['id'])
num_migrated = (
model_query(model).
filter(sql.and_(model.id.in_(ids),
model.version != version)).
update({model.version: version},
synchronize_session=False))
else:
num_migrated = (
model_query(model).
filter(model.version != version).
update({model.version: version},
synchronize_session=False))
total_migrated += num_migrated
max_to_migrate -= num_migrated
if max_to_migrate <= 0:
break
return total_to_migrate, total_migrated
@staticmethod
def _verify_max_traits_per_node(node_id, num_traits):
"""Verify that an operation would not exceed the per-node trait limit.

View File

@ -17,6 +17,7 @@ from oslo_db.sqlalchemy import utils as db_utils
from oslo_utils import uuidutils
from testtools import matchers
from ironic.common import context
from ironic.common import exception
from ironic.common import release_mappings
from ironic.db import api as db_api
@ -115,3 +116,118 @@ class GetNotVersionsTestCase(base.DbTestCase):
utils.create_test_node(uuid=uuidutils.generate_uuid(), version='1.4')
self.assertRaises(exception.IronicException,
self.dbapi.get_not_versions, 'NotExist', ['1.6'])
class UpdateToLatestVersionsTestCase(base.DbTestCase):
def setUp(self):
super(UpdateToLatestVersionsTestCase, self).setUp()
self.context = context.get_admin_context()
self.dbapi = db_api.get_instance()
obj_versions = release_mappings.get_object_versions(
objects=['Node', 'Chassis'])
master_objs = release_mappings.RELEASE_MAPPING['master']['objects']
self.node_ver = master_objs['Node'][0]
self.chassis_ver = master_objs['Chassis'][0]
self.node_old_ver = self._get_old_object_version(
self.node_ver, obj_versions['Node'])
self.chassis_old_ver = self._get_old_object_version(
self.chassis_ver, obj_versions['Chassis'])
self.node_version_same = self.node_old_ver == self.node_ver
self.chassis_version_same = self.chassis_old_ver == self.chassis_ver
# number of objects with different versions
self.num_diff_objs = 2
if self.node_version_same:
self.num_diff_objs -= 1
if self.chassis_version_same:
self.num_diff_objs -= 1
def _get_old_object_version(self, latest_version, versions):
"""Return a version that is older (not same) as latest version.
If there aren't any older versions, return the latest version.
"""
for v in versions:
if v != latest_version:
return v
return latest_version
def test_empty_db(self):
self.assertEqual(
(0, 0), self.dbapi.update_to_latest_versions(self.context, 10))
def test_version_exists(self):
# Node will be in latest version
utils.create_test_node()
self.assertEqual(
(0, 0), self.dbapi.update_to_latest_versions(self.context, 10))
def test_one_node(self):
node = utils.create_test_node(version=self.node_old_ver)
expected = (0, 0) if self.node_version_same else (1, 1)
self.assertEqual(
expected, self.dbapi.update_to_latest_versions(self.context, 10))
res = self.dbapi.get_node_by_uuid(node.uuid)
self.assertEqual(self.node_ver, res.version)
def test_max_count_zero(self):
orig_node = utils.create_test_node(version=self.node_old_ver)
orig_chassis = utils.create_test_chassis(version=self.chassis_old_ver)
self.assertEqual((self.num_diff_objs, self.num_diff_objs),
self.dbapi.update_to_latest_versions(self.context, 0))
node = self.dbapi.get_node_by_uuid(orig_node.uuid)
self.assertEqual(self.node_ver, node.version)
chassis = self.dbapi.get_chassis_by_uuid(orig_chassis.uuid)
self.assertEqual(self.chassis_ver, chassis.version)
def test_old_version_max_count_1(self):
orig_node = utils.create_test_node(version=self.node_old_ver)
orig_chassis = utils.create_test_chassis(version=self.chassis_old_ver)
num_modified = 1 if self.num_diff_objs else 0
self.assertEqual((self.num_diff_objs, num_modified),
self.dbapi.update_to_latest_versions(self.context, 1))
node = self.dbapi.get_node_by_uuid(orig_node.uuid)
chassis = self.dbapi.get_chassis_by_uuid(orig_chassis.uuid)
self.assertTrue(node.version == self.node_old_ver or
chassis.version == self.chassis_old_ver)
self.assertTrue(node.version == self.node_ver or
chassis.version == self.chassis_ver)
def _create_nodes(self, num_nodes):
version = self.node_old_ver
nodes = []
for i in range(0, num_nodes):
node = utils.create_test_node(version=version,
uuid=uuidutils.generate_uuid())
nodes.append(node.uuid)
for uuid in nodes:
node = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(version, node.version)
return nodes
def test_old_version_max_count_2_some_nodes(self):
if self.node_version_same:
# can't test if we don't have diff versions of the node
return
nodes = self._create_nodes(5)
self.assertEqual(
(5, 2), self.dbapi.update_to_latest_versions(self.context, 2))
self.assertEqual(
(3, 3), self.dbapi.update_to_latest_versions(self.context, 10))
for uuid in nodes:
node = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(self.node_ver, node.version)
def test_old_version_max_count_same_nodes(self):
if self.node_version_same:
# can't test if we don't have diff versions of the node
return
nodes = self._create_nodes(5)
self.assertEqual(
(5, 5), self.dbapi.update_to_latest_versions(self.context, 5))
for uuid in nodes:
node = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(self.node_ver, node.version)

View File

@ -0,0 +1,13 @@
---
critical:
- The ``ironic-dbsync online_data_migrations`` command was not updating
the objects to their latest versions, which could prevent upgrades from
working (i.e. when running the next release's ``ironic-dbsync upgrade``).
Objects are updated to their latest versions now when running that
command. See `story 2004174
<https://storyboard.openstack.org/#!/story/2004174>`_ for more information.
upgrade:
- If you are doing a minor version upgrade, please re-run the
``ironic-dbsync online_data_migrations`` command to properly update
the versions of the Objects in the database. Otherwise, the next major
upgrade may fail.