Add online data migration for populating user_id
This adds an online data migration for the user_id field on InstanceMapping. It does this by processing instance mappings that do not have a value set for the field (i.e. are NULL in the database) and queries instances in each cell that need to be updated. Part of blueprint count-quota-usage-from-placement Change-Id: I8cc873ba63db7b806ab1de0a88fe8a87d4baeea9
This commit is contained in:
parent
a7de4917a0
commit
a6bc544aae
|
@ -419,6 +419,8 @@ class DbCommands(object):
|
|||
compute_node_obj.migrate_empty_ratio,
|
||||
# Added in Stein
|
||||
virtual_interface_obj.fill_virtual_interface_list,
|
||||
# Added in Stein
|
||||
instance_mapping_obj.populate_user_id,
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
|
|
|
@ -10,8 +10,11 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import versionutils
|
||||
import six
|
||||
from sqlalchemy.orm import joinedload
|
||||
from sqlalchemy.sql import false
|
||||
from sqlalchemy.sql import or_
|
||||
|
@ -25,6 +28,7 @@ from nova import objects
|
|||
from nova.objects import base
|
||||
from nova.objects import cell_mapping
|
||||
from nova.objects import fields
|
||||
from nova.objects import virtual_interface
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -218,6 +222,73 @@ def populate_queued_for_delete(context, max_count):
|
|||
return processed, processed
|
||||
|
||||
|
||||
@db_api.api_context_manager.writer
|
||||
def populate_user_id(context, max_count):
|
||||
cells = objects.CellMappingList.get_all(context)
|
||||
cms_by_id = {cell.id: cell for cell in cells}
|
||||
done = 0
|
||||
unmigratable_ims = False
|
||||
ims = (
|
||||
# Get a list of instance mappings which do not have user_id populated.
|
||||
# We need to include records with queued_for_delete=True because they
|
||||
# include SOFT_DELETED instances, which could be restored at any time
|
||||
# in the future. If we don't migrate SOFT_DELETED instances now, we
|
||||
# wouldn't be able to retire this migration code later. Also filter
|
||||
# out the marker instance created by the virtual interface migration.
|
||||
context.session.query(api_models.InstanceMapping)
|
||||
.filter_by(user_id=None)
|
||||
.filter(api_models.InstanceMapping.project_id !=
|
||||
virtual_interface.FAKE_UUID)
|
||||
.limit(max_count).all())
|
||||
found = len(ims)
|
||||
ims_by_inst_uuid = {}
|
||||
inst_uuids_by_cell_id = collections.defaultdict(set)
|
||||
for im in ims:
|
||||
ims_by_inst_uuid[im.instance_uuid] = im
|
||||
inst_uuids_by_cell_id[im.cell_id].add(im.instance_uuid)
|
||||
for cell_id, inst_uuids in inst_uuids_by_cell_id.items():
|
||||
# We cannot migrate instance mappings that don't have a cell yet.
|
||||
if cell_id is None:
|
||||
unmigratable_ims = True
|
||||
continue
|
||||
with nova_context.target_cell(context, cms_by_id[cell_id]) as cctxt:
|
||||
# We need to migrate SOFT_DELETED instances because they could be
|
||||
# restored at any time in the future, preventing us from being able
|
||||
# to remove any other interim online data migration code we have,
|
||||
# if we don't migrate them here.
|
||||
# NOTE: it's not possible to query only for SOFT_DELETED instances.
|
||||
# We must query for both deleted and SOFT_DELETED instances.
|
||||
filters = {'uuid': inst_uuids}
|
||||
try:
|
||||
instances = objects.InstanceList.get_by_filters(
|
||||
cctxt, filters, expected_attrs=[])
|
||||
except Exception as exp:
|
||||
LOG.warning('Encountered exception: "%s" while querying '
|
||||
'instances from cell: %s. Continuing to the next '
|
||||
'cell.', six.text_type(exp),
|
||||
cms_by_id[cell_id].identity)
|
||||
continue
|
||||
# Walk through every instance that has a mapping needing to be updated
|
||||
# and update it.
|
||||
for instance in instances:
|
||||
im = ims_by_inst_uuid.pop(instance.uuid)
|
||||
im.user_id = instance.user_id
|
||||
context.session.add(im)
|
||||
done += 1
|
||||
if ims_by_inst_uuid:
|
||||
unmigratable_ims = True
|
||||
if done >= max_count:
|
||||
break
|
||||
|
||||
if unmigratable_ims:
|
||||
LOG.warning('Some instance mappings were not migratable. This may '
|
||||
'be transient due to in-flight instance builds, or could '
|
||||
'be due to stale data that will be cleaned up after '
|
||||
'running "nova-manage db archive_deleted_rows --purge".')
|
||||
|
||||
return found, done
|
||||
|
||||
|
||||
@base.NovaObjectRegistry.register
|
||||
class InstanceMappingList(base.ObjectListBase, base.NovaObject):
|
||||
# Version 1.0: Initial version
|
||||
|
|
|
@ -20,13 +20,15 @@ from nova import exception
|
|||
from nova.objects import cell_mapping
|
||||
from nova.objects import instance
|
||||
from nova.objects import instance_mapping
|
||||
from nova.objects import virtual_interface
|
||||
from nova import test
|
||||
from nova.tests import fixtures
|
||||
|
||||
|
||||
sample_mapping = {'instance_uuid': '',
|
||||
'cell_id': 3,
|
||||
'project_id': 'fake-project'}
|
||||
'project_id': 'fake-project',
|
||||
'user_id': 'fake-user'}
|
||||
|
||||
|
||||
sample_cell_mapping = {'id': 3,
|
||||
|
@ -208,7 +210,7 @@ class InstanceMappingTestCase(test.NoDBTestCase):
|
|||
|
||||
def test_user_id_not_set_if_null_from_db(self):
|
||||
# Create an instance mapping with user_id=None.
|
||||
db_mapping = create_mapping()
|
||||
db_mapping = create_mapping(user_id=None)
|
||||
self.assertIsNone(db_mapping['user_id'])
|
||||
# Get the mapping to run convert from db object to versioned object.
|
||||
im = instance_mapping.InstanceMapping.get_by_instance_uuid(
|
||||
|
@ -216,6 +218,186 @@ class InstanceMappingTestCase(test.NoDBTestCase):
|
|||
# Verify the user_id is not set.
|
||||
self.assertNotIn('user_id', im)
|
||||
|
||||
@mock.patch('nova.objects.instance_mapping.LOG.warning')
|
||||
def test_populate_user_id(self, mock_log_warning):
|
||||
cells = []
|
||||
celldbs = fixtures.CellDatabases()
|
||||
|
||||
# Create two cell databases and map them
|
||||
for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
|
||||
cm = cell_mapping.CellMapping(context=self.context, uuid=uuid,
|
||||
database_connection=uuid,
|
||||
transport_url='fake://')
|
||||
cm.create()
|
||||
cells.append(cm)
|
||||
celldbs.add_cell_database(uuid)
|
||||
self.useFixture(celldbs)
|
||||
|
||||
# Create 5 instances per cell
|
||||
for cell in cells:
|
||||
for i in range(0, 5):
|
||||
with context.target_cell(self.context, cell) as cctxt:
|
||||
inst = instance.Instance(
|
||||
cctxt,
|
||||
project_id=self.context.project_id,
|
||||
user_id=self.context.user_id)
|
||||
inst.create()
|
||||
# Make every other mapping have a NULL user_id
|
||||
# Will be a total of four mappings with NULL user_id
|
||||
user_id = self.context.user_id if i % 2 == 0 else None
|
||||
create_mapping(project_id=self.context.project_id,
|
||||
user_id=user_id, cell_id=cell.id,
|
||||
instance_uuid=inst.uuid)
|
||||
|
||||
# Create a SOFT_DELETED instance with a user_id=None instance mapping.
|
||||
# This should get migrated.
|
||||
with context.target_cell(self.context, cells[0]) as cctxt:
|
||||
inst = instance.Instance(
|
||||
cctxt, project_id=self.context.project_id,
|
||||
user_id=self.context.user_id, vm_state=vm_states.SOFT_DELETED)
|
||||
inst.create()
|
||||
create_mapping(project_id=self.context.project_id, user_id=None,
|
||||
cell_id=cells[0].id, instance_uuid=inst.uuid,
|
||||
queued_for_delete=True)
|
||||
|
||||
# Create a deleted instance with a user_id=None instance mapping.
|
||||
# This should get migrated.
|
||||
with context.target_cell(self.context, cells[1]) as cctxt:
|
||||
inst = instance.Instance(
|
||||
cctxt, project_id=self.context.project_id,
|
||||
user_id=self.context.user_id)
|
||||
inst.create()
|
||||
inst.destroy()
|
||||
create_mapping(project_id=self.context.project_id, user_id=None,
|
||||
cell_id=cells[1].id, instance_uuid=inst.uuid,
|
||||
queued_for_delete=True)
|
||||
|
||||
# Create an instance mapping for an instance not yet scheduled. It
|
||||
# should not get migrated because we won't know what user_id to use.
|
||||
unscheduled = create_mapping(project_id=self.context.project_id,
|
||||
user_id=None, cell_id=None)
|
||||
|
||||
# Create two instance mappings for instances that no longer exist.
|
||||
# Example: residue from a manual cleanup or after a periodic compute
|
||||
# purge and before a database archive. This record should not get
|
||||
# migrated.
|
||||
nonexistent = []
|
||||
for i in range(2):
|
||||
nonexistent.append(
|
||||
create_mapping(project_id=self.context.project_id,
|
||||
user_id=None, cell_id=cells[i].id,
|
||||
instance_uuid=uuidutils.generate_uuid()))
|
||||
|
||||
# Create an instance mapping simulating a virtual interface migration
|
||||
# marker instance which has had map_instances run on it.
|
||||
# This should not be found by the migration.
|
||||
create_mapping(project_id=virtual_interface.FAKE_UUID, user_id=None)
|
||||
|
||||
found, done = instance_mapping.populate_user_id(self.context, 2)
|
||||
# Two needed fixing, and honored the limit.
|
||||
self.assertEqual(2, found)
|
||||
self.assertEqual(2, done)
|
||||
|
||||
found, done = instance_mapping.populate_user_id(self.context, 1000)
|
||||
# Only four left were fixable. The fifth instance found has no
|
||||
# cell and cannot be migrated yet. The 6th and 7th instances found have
|
||||
# no corresponding instance records and cannot be migrated.
|
||||
self.assertEqual(7, found)
|
||||
self.assertEqual(4, done)
|
||||
|
||||
# Verify the orphaned instance mappings warning log message was only
|
||||
# emitted once.
|
||||
mock_log_warning.assert_called_once()
|
||||
|
||||
# Check that we have only the expected number of records with
|
||||
# user_id set. We created 10 instances (5 per cell with 2 per cell
|
||||
# with NULL user_id), 1 SOFT_DELETED instance with NULL user_id,
|
||||
# 1 deleted instance with NULL user_id, and 1 not-yet-scheduled
|
||||
# instance with NULL user_id.
|
||||
# We expect 12 of them to have user_id set after migration (15 total,
|
||||
# with the not-yet-scheduled instance and the orphaned instance
|
||||
# mappings ignored).
|
||||
ims = instance_mapping.InstanceMappingList.get_by_project_id(
|
||||
self.context, self.context.project_id)
|
||||
self.assertEqual(12, len(
|
||||
[im for im in ims if 'user_id' in im]))
|
||||
|
||||
# Check that one instance mapping record (not yet scheduled) has not
|
||||
# been migrated by this script.
|
||||
# Check that two other instance mapping records (no longer existing
|
||||
# instances) have not been migrated by this script.
|
||||
self.assertEqual(15, len(ims))
|
||||
|
||||
# Set the cell and create the instance for the mapping without a cell,
|
||||
# then run the migration again.
|
||||
unscheduled = instance_mapping.InstanceMapping.get_by_instance_uuid(
|
||||
self.context, unscheduled['instance_uuid'])
|
||||
unscheduled.cell_mapping = cells[0]
|
||||
unscheduled.save()
|
||||
with context.target_cell(self.context, cells[0]) as cctxt:
|
||||
inst = instance.Instance(
|
||||
cctxt,
|
||||
uuid=unscheduled.instance_uuid,
|
||||
project_id=self.context.project_id,
|
||||
user_id=self.context.user_id)
|
||||
inst.create()
|
||||
found, done = instance_mapping.populate_user_id(self.context, 1000)
|
||||
# Should have found the not-yet-scheduled instance and the orphaned
|
||||
# instance mappings.
|
||||
self.assertEqual(3, found)
|
||||
# Should have only migrated the not-yet-schedule instance.
|
||||
self.assertEqual(1, done)
|
||||
|
||||
# Delete the orphaned instance mapping (simulate manual cleanup by an
|
||||
# operator).
|
||||
for db_im in nonexistent:
|
||||
nonexist = instance_mapping.InstanceMapping.get_by_instance_uuid(
|
||||
self.context, db_im['instance_uuid'])
|
||||
nonexist.destroy()
|
||||
|
||||
# Run the script one last time to make sure it finds nothing left to
|
||||
# migrate.
|
||||
found, done = instance_mapping.populate_user_id(self.context, 1000)
|
||||
self.assertEqual(0, found)
|
||||
self.assertEqual(0, done)
|
||||
|
||||
@mock.patch('nova.objects.InstanceList.get_by_filters')
|
||||
def test_populate_user_id_instance_get_fail(self, mock_inst_get):
|
||||
cells = []
|
||||
celldbs = fixtures.CellDatabases()
|
||||
|
||||
# Create two cell databases and map them
|
||||
for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
|
||||
cm = cell_mapping.CellMapping(context=self.context, uuid=uuid,
|
||||
database_connection=uuid,
|
||||
transport_url='fake://')
|
||||
cm.create()
|
||||
cells.append(cm)
|
||||
celldbs.add_cell_database(uuid)
|
||||
self.useFixture(celldbs)
|
||||
|
||||
# Create one instance per cell
|
||||
for cell in cells:
|
||||
with context.target_cell(self.context, cell) as cctxt:
|
||||
inst = instance.Instance(
|
||||
cctxt,
|
||||
project_id=self.context.project_id,
|
||||
user_id=self.context.user_id)
|
||||
inst.create()
|
||||
create_mapping(project_id=self.context.project_id,
|
||||
user_id=None, cell_id=cell.id,
|
||||
instance_uuid=inst.uuid)
|
||||
|
||||
# Simulate the first cell is down/has some error
|
||||
mock_inst_get.side_effect = [test.TestingException(),
|
||||
instance.InstanceList(objects=[inst])]
|
||||
|
||||
found, done = instance_mapping.populate_user_id(self.context, 1000)
|
||||
# Verify we continue to the next cell when a down/error cell is
|
||||
# encountered.
|
||||
self.assertEqual(2, found)
|
||||
self.assertEqual(1, done)
|
||||
|
||||
|
||||
class InstanceMappingListTestCase(test.NoDBTestCase):
|
||||
USES_DB_SELF = True
|
||||
|
|
Loading…
Reference in New Issue