Drop request spec migration code
This removes the request spec online data migration routine added via change I61b9b50436d8bdb8ff06259cc2f876502d688b91 in Newton. A nova-status upgrade check was added for this in Rocky with change I1fb63765f0b0e8f35d6a66dccf9d12cc20e9c661 so operators should be able to tell, either from the results of the nova-status upgrade check command or "nova-manage db online_data_migrations" command, whether or not they have completed the migration and can upgrade to Stein. This also allows us to start removing the various compat checks in the API and conductor services for old instances that predated having a request spec since those should have a request spec now via the online data migration. Related to blueprint request-spec-use-by-compute Change-Id: Ib0de49b3c0d6b3d807c4eb321976848773c1a9e8
This commit is contained in:
parent
eb0efcf681
commit
ed4fe3ead6
@ -66,7 +66,6 @@ from nova.objects import instance as instance_obj
|
||||
from nova.objects import instance_mapping as instance_mapping_obj
|
||||
from nova.objects import keypair as keypair_obj
|
||||
from nova.objects import quotas as quotas_obj
|
||||
from nova.objects import request_spec
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova.scheduler.client import report
|
||||
@ -389,12 +388,6 @@ class DbCommands(object):
|
||||
# not migratable (or don't need migrating), but all migrations that can
|
||||
# complete have finished.
|
||||
online_migrations = (
|
||||
# Added in Newton
|
||||
# TODO(mriedem): Remove this in Stein along with the compatibility
|
||||
# code in the api and conductor services; the nova-status upgrade check
|
||||
# added in Rocky is the tool operators can use to make sure they have
|
||||
# completed this migration.
|
||||
request_spec.migrate_instances_add_request_spec,
|
||||
# Added in Newton
|
||||
keypair_obj.migrate_keypairs_to_api_db,
|
||||
# Added in Ocata
|
||||
|
@ -23,7 +23,6 @@ from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import fields
|
||||
from nova.objects import instance as obj_instance
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
from nova.virt import hardware
|
||||
|
||||
REQUEST_SPEC_OPTIONAL_ATTRS = ['requested_destination',
|
||||
@ -668,91 +667,6 @@ class RequestSpec(base.NovaObject):
|
||||
self.obj_reset_changes(['force_hosts', 'force_nodes'])
|
||||
|
||||
|
||||
# NOTE(sbauza): Since verifying a huge list of instances can be a performance
|
||||
# impact, we need to use a marker for only checking a set of them.
|
||||
# As the current model doesn't expose a way to persist that marker, we propose
|
||||
# here to use the request_specs table with a fake (and impossible) instance
|
||||
# UUID where the related spec field (which is Text) would be the marker, ie.
|
||||
# the last instance UUID we checked.
|
||||
# TODO(sbauza): Remove the CRUD helpers and the migration script in Ocata.
|
||||
|
||||
# NOTE(sbauza): RFC4122 (4.1.7) allows a Nil UUID to be semantically accepted.
|
||||
FAKE_UUID = '00000000-0000-0000-0000-000000000000'
|
||||
|
||||
|
||||
@db.api_context_manager.reader
|
||||
def _get_marker_for_migrate_instances(context):
|
||||
req_spec = (context.session.query(api_models.RequestSpec).filter_by(
|
||||
instance_uuid=FAKE_UUID)).first()
|
||||
marker = req_spec['spec'] if req_spec else None
|
||||
return marker
|
||||
|
||||
|
||||
@db.api_context_manager.writer
|
||||
def _set_or_delete_marker_for_migrate_instances(context, marker=None):
|
||||
# We need to delete the old marker anyway, which no longer corresponds to
|
||||
# the last instance we checked (if there was a marker)...
|
||||
# NOTE(sbauza): delete() deletes rows that match the query and if none
|
||||
# are found, returns 0 hits.
|
||||
context.session.query(api_models.RequestSpec).filter_by(
|
||||
instance_uuid=FAKE_UUID).delete()
|
||||
if marker is not None:
|
||||
# ... but there can be a new marker to set
|
||||
db_mapping = api_models.RequestSpec()
|
||||
db_mapping.update({'instance_uuid': FAKE_UUID, 'spec': marker})
|
||||
db_mapping.save(context.session)
|
||||
|
||||
|
||||
def _create_minimal_request_spec(context, instance):
|
||||
image = instance.image_meta
|
||||
# This is an old instance. Let's try to populate a RequestSpec
|
||||
# object using the existing information we have previously saved.
|
||||
request_spec = objects.RequestSpec.from_components(
|
||||
context, instance.uuid, image,
|
||||
instance.flavor, instance.numa_topology,
|
||||
instance.pci_requests,
|
||||
{}, None, instance.availability_zone,
|
||||
project_id=instance.project_id,
|
||||
user_id=instance.user_id
|
||||
)
|
||||
scheduler_utils.setup_instance_group(context, request_spec)
|
||||
request_spec.create()
|
||||
|
||||
|
||||
def migrate_instances_add_request_spec(context, max_count):
|
||||
"""Creates and persists a RequestSpec per instance not yet having it."""
|
||||
marker = _get_marker_for_migrate_instances(context)
|
||||
# Prevent lazy-load of those fields for every instance later.
|
||||
attrs = ['system_metadata', 'flavor', 'pci_requests', 'numa_topology',
|
||||
'availability_zone']
|
||||
try:
|
||||
instances = objects.InstanceList.get_by_filters(
|
||||
context, filters={'deleted': False}, sort_key='created_at',
|
||||
sort_dir='asc', limit=max_count, marker=marker,
|
||||
expected_attrs=attrs)
|
||||
except exception.MarkerNotFound:
|
||||
# Instance referenced by marker may have been purged.
|
||||
# Try again but get all instances.
|
||||
instances = objects.InstanceList.get_by_filters(
|
||||
context, filters={'deleted': False}, sort_key='created_at',
|
||||
sort_dir='asc', limit=max_count, expected_attrs=attrs)
|
||||
count_all = len(instances)
|
||||
count_hit = 0
|
||||
for instance in instances:
|
||||
try:
|
||||
RequestSpec.get_by_instance_uuid(context, instance.uuid)
|
||||
except exception.RequestSpecNotFound:
|
||||
_create_minimal_request_spec(context, instance)
|
||||
count_hit += 1
|
||||
if count_all > 0:
|
||||
# We want to persist which last instance was checked in order to make
|
||||
# sure we don't review it again in a next call.
|
||||
marker = instances[-1].uuid
|
||||
|
||||
_set_or_delete_marker_for_migrate_instances(context, marker)
|
||||
return count_all, count_hit
|
||||
|
||||
|
||||
@base.NovaObjectRegistry.register
|
||||
class Destination(base.NovaObject):
|
||||
# Version 1.0: Initial version
|
||||
|
@ -11,15 +11,11 @@
|
||||
# under the License.
|
||||
|
||||
from nova import context
|
||||
from nova.db.sqlalchemy import api as db
|
||||
from nova.db.sqlalchemy import api_models
|
||||
from nova import exception
|
||||
from nova import objects
|
||||
from nova.objects import base as obj_base
|
||||
from nova.objects import request_spec
|
||||
from nova import test
|
||||
from nova.tests import fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
from nova.tests.unit import fake_request_spec
|
||||
|
||||
|
||||
@ -82,129 +78,3 @@ class RequestSpecTestCase(test.NoDBTestCase):
|
||||
spec = self._create_spec()
|
||||
spec.destroy()
|
||||
self.assertRaises(exception.RequestSpecNotFound, spec.destroy)
|
||||
|
||||
|
||||
@db.api_context_manager.writer
|
||||
def _delete_request_spec(context, instance_uuid):
|
||||
"""Deletes a RequestSpec by the instance_uuid."""
|
||||
context.session.query(api_models.RequestSpec).filter_by(
|
||||
instance_uuid=instance_uuid).delete()
|
||||
|
||||
|
||||
class RequestSpecInstanceMigrationTestCase(
|
||||
integrated_helpers._IntegratedTestBase):
|
||||
api_major_version = 'v2.1'
|
||||
_image_ref_parameter = 'imageRef'
|
||||
_flavor_ref_parameter = 'flavorRef'
|
||||
|
||||
USE_NEUTRON = True
|
||||
|
||||
def setUp(self):
|
||||
super(RequestSpecInstanceMigrationTestCase, self).setUp()
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
def _create_instances(self, old=2, total=5):
|
||||
request = self._build_minimal_create_server_request()
|
||||
# Create all instances that would set a RequestSpec object
|
||||
request.update({'max_count': total})
|
||||
self.api.post_server({'server': request})
|
||||
|
||||
self.instances = objects.InstanceList.get_all(self.context)
|
||||
# Make sure that we have all the needed instances
|
||||
self.assertEqual(total, len(self.instances))
|
||||
|
||||
# Fake the legacy behaviour by removing the RequestSpec for some old.
|
||||
for i in range(0, old):
|
||||
_delete_request_spec(self.context, self.instances[i].uuid)
|
||||
|
||||
# Just add a deleted instance to make sure we don't create
|
||||
# a RequestSpec object for it.
|
||||
del request['max_count']
|
||||
server = self.api.post_server({'server': request})
|
||||
self.api.delete_server(server['id'])
|
||||
# Make sure we have the deleted instance only soft-deleted in DB
|
||||
deleted_instances = objects.InstanceList.get_by_filters(
|
||||
self.context, filters={'deleted': True})
|
||||
self.assertEqual(1, len(deleted_instances))
|
||||
|
||||
def test_migration(self):
|
||||
self._create_instances(old=2, total=5)
|
||||
|
||||
match, done = request_spec.migrate_instances_add_request_spec(
|
||||
self.context, 2)
|
||||
self.assertEqual(2, match)
|
||||
self.assertEqual(2, done)
|
||||
|
||||
# Run again the migration call for making sure that we don't check
|
||||
# again the same instances
|
||||
match, done = request_spec.migrate_instances_add_request_spec(
|
||||
self.context, 3)
|
||||
self.assertEqual(3, match)
|
||||
self.assertEqual(0, done)
|
||||
|
||||
# Make sure we ran over all the instances
|
||||
match, done = request_spec.migrate_instances_add_request_spec(
|
||||
self.context, 50)
|
||||
self.assertEqual(0, match)
|
||||
self.assertEqual(0, done)
|
||||
|
||||
# Make sure all instances have now a related RequestSpec
|
||||
for instance in self.instances:
|
||||
uuid = instance.uuid
|
||||
try:
|
||||
spec = objects.RequestSpec.get_by_instance_uuid(
|
||||
self.context, uuid)
|
||||
self.assertEqual(instance.project_id, spec.project_id)
|
||||
except exception.RequestSpecNotFound:
|
||||
self.fail("RequestSpec not found for instance UUID :%s ", uuid)
|
||||
|
||||
def test_migration_with_none_old(self):
|
||||
self._create_instances(old=0, total=5)
|
||||
|
||||
# Make sure no migrations can be found
|
||||
match, done = request_spec.migrate_instances_add_request_spec(
|
||||
self.context, 50)
|
||||
self.assertEqual(5, match)
|
||||
self.assertEqual(0, done)
|
||||
|
||||
def test_migration_with_missing_marker(self):
|
||||
self._create_instances(old=2, total=5)
|
||||
|
||||
# Start with 2 old (without request_spec) and 3 new instances:
|
||||
# [old, old, new, new, new]
|
||||
match, done = request_spec.migrate_instances_add_request_spec(
|
||||
self.context, 2)
|
||||
# Instance list after session 1:
|
||||
# [upgraded, upgraded<MARKER>, new, new, new]
|
||||
self.assertEqual(2, match)
|
||||
self.assertEqual(2, done)
|
||||
|
||||
# Delete and remove the marker instance from api table while leaving
|
||||
# the spec in request_specs table. This triggers MarkerNotFound
|
||||
# exception in the latter session.
|
||||
self.api.delete_server(self.instances[1].uuid)
|
||||
db.archive_deleted_rows(max_rows=100)
|
||||
# Instance list after deletion: [upgraded, new, new, new]
|
||||
|
||||
# This session of migration hits MarkerNotFound exception and then
|
||||
# starts from the beginning of the list
|
||||
match, done = request_spec.migrate_instances_add_request_spec(
|
||||
self.context, 50)
|
||||
self.assertEqual(4, match)
|
||||
self.assertEqual(0, done)
|
||||
|
||||
# Make sure we ran over all the instances
|
||||
match, done = request_spec.migrate_instances_add_request_spec(
|
||||
self.context, 50)
|
||||
self.assertEqual(0, match)
|
||||
self.assertEqual(0, done)
|
||||
|
||||
# Make sure all instances have now a related RequestSpec
|
||||
for instance in self.instances:
|
||||
uuid = instance.uuid
|
||||
try:
|
||||
spec = objects.RequestSpec.get_by_instance_uuid(
|
||||
self.context, uuid)
|
||||
self.assertEqual(instance.project_id, spec.project_id)
|
||||
except exception.RequestSpecNotFound:
|
||||
self.fail("RequestSpec not found for instance UUID :%s ", uuid)
|
||||
|
@ -37,7 +37,7 @@ from nova import context
|
||||
# NOTE(mriedem): We only use objects as a convenience to populate the database
|
||||
# in the tests, we don't use them in the actual CLI.
|
||||
from nova import objects
|
||||
from nova.objects import request_spec as reqspec_obj
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
from nova import test
|
||||
from nova.tests import fixtures as nova_fixtures
|
||||
|
||||
@ -710,6 +710,19 @@ class TestUpgradeCheckAPIServiceVersion(test.NoDBTestCase):
|
||||
self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code)
|
||||
|
||||
|
||||
def _create_minimal_request_spec(ctxt, instance):
|
||||
request_spec = objects.RequestSpec.from_components(
|
||||
ctxt, instance.uuid, instance.image_meta,
|
||||
instance.flavor, instance.numa_topology,
|
||||
instance.pci_requests,
|
||||
{}, None, instance.availability_zone,
|
||||
project_id=instance.project_id,
|
||||
user_id=instance.user_id
|
||||
)
|
||||
scheduler_utils.setup_instance_group(ctxt, request_spec)
|
||||
request_spec.create()
|
||||
|
||||
|
||||
class TestUpgradeCheckRequestSpecMigration(test.NoDBTestCase):
|
||||
"""Tests for the nova-status upgrade check for request spec migration."""
|
||||
|
||||
@ -748,7 +761,7 @@ class TestUpgradeCheckRequestSpecMigration(test.NoDBTestCase):
|
||||
inst.pci_requests = None
|
||||
inst.project_id = 'fake-project'
|
||||
inst.user_id = 'fake-user'
|
||||
reqspec_obj._create_minimal_request_spec(ctxt, inst)
|
||||
_create_minimal_request_spec(ctxt, inst)
|
||||
|
||||
return inst
|
||||
|
||||
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
The online data migration ``migrate_instances_add_request_spec``, which was
|
||||
added in the 14.0.0 Newton release, has now been removed. Compatibility
|
||||
code in the controller services for old instances without a matching
|
||||
``request_specs`` entry in the ``nova_api`` database is also gone.
|
||||
Ensure that the ``'Request Spec Migration`` check in the
|
||||
``nova-status upgrade check`` command is successful before upgrading to
|
||||
the 19.0.0 Stein release.
|
Loading…
Reference in New Issue
Block a user