Fix resource tracker updates during instance evacuation
Current _update_usage_from_migrations code takes into account only REBUILDING task state, while not handling properly rebuilding spawn and rebuilding volume attachments. This can cause issues with numa topologies or pci devices if several instances are being evacuated and some of them begin evacuation prior to update_available_resource periodic pass and others immediately after, causing latter ones to claim e.g. already pinned cpus. Closes-Bug: 1791075 Change-Id: I22aa680b00ae839c4acbc06cbfbc56dd27d5ab73
This commit is contained in:
parent
89c9127ded
commit
d8f7638d02
|
@ -65,9 +65,8 @@ def _instance_in_resize_state(instance):
|
|||
return True
|
||||
|
||||
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
|
||||
and task in [task_states.RESIZE_PREP,
|
||||
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
|
||||
task_states.RESIZE_FINISH, task_states.REBUILDING]):
|
||||
and task in (
|
||||
task_states.resizing_states + task_states.rebuild_states)):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
|
|
@ -123,3 +123,10 @@ ALLOW_REBOOT = [None, REBOOTING, REBOOT_PENDING, REBOOT_STARTED, RESUMING,
|
|||
# These states indicate a reboot
|
||||
soft_reboot_states = (REBOOTING, REBOOT_PENDING, REBOOT_STARTED)
|
||||
hard_reboot_states = (REBOOTING_HARD, REBOOT_PENDING_HARD, REBOOT_STARTED_HARD)
|
||||
|
||||
# These states indicate a resize in progress
|
||||
resizing_states = (RESIZE_PREP, RESIZE_MIGRATING, RESIZE_MIGRATED,
|
||||
RESIZE_FINISH)
|
||||
|
||||
# These states indicate a rebuild
|
||||
rebuild_states = (REBUILDING, REBUILD_BLOCK_DEVICE_MAPPING, REBUILD_SPAWNING)
|
||||
|
|
|
@ -2758,6 +2758,27 @@ class TestUpdateUsageFromMigrations(BaseTestCase):
|
|||
self.assertFalse(upd_mock.called)
|
||||
self.assertEqual(mig1.status, "error")
|
||||
|
||||
@mock.patch('nova.objects.migration.Migration.save')
|
||||
@mock.patch.object(resource_tracker.ResourceTracker,
|
||||
'_update_usage_from_migration')
|
||||
def test_evacuate_and_resizing_states(self, mock_update_usage, mock_save):
|
||||
self._setup_rt()
|
||||
migration_context = objects.MigrationContext(migration_id=1)
|
||||
instance = objects.Instance(
|
||||
vm_state=vm_states.STOPPED, task_state=None,
|
||||
migration_context=migration_context)
|
||||
migration = objects.Migration(
|
||||
source_compute='other-host', source_node='other-node',
|
||||
dest_compute=_HOSTNAME, dest_node=_NODENAME,
|
||||
instance_uuid=uuids.instance, id=1, instance=instance)
|
||||
for state in task_states.rebuild_states + task_states.resizing_states:
|
||||
instance.task_state = state
|
||||
self.rt._update_usage_from_migrations(
|
||||
mock.sentinel.ctx, [migration], _NODENAME)
|
||||
mock_update_usage.assert_called_once_with(
|
||||
mock.sentinel.ctx, instance, migration, _NODENAME)
|
||||
mock_update_usage.reset_mock()
|
||||
|
||||
|
||||
class TestUpdateUsageFromInstance(BaseTestCase):
|
||||
|
||||
|
|
Loading…
Reference in New Issue