tests: Add reproducer for bug #1879878

When one resizes a pinned instance, the instance claims host CPUs for
pinning purposes on the destination. However, the host CPUs on the
source are not immediately relinquished. Rather, they are held by the
migration record, to handle the event that the resize is reverted. It is
only when one confirms this resize that the old cores are finally

It appears there is a potential race between the resource tracker's
periodic task and the freeing of these resources, resulting in attempts
to unpin host cores that have already been unpinned. This test
highlights that bug pending a fix.

Change-Id: Ie092628ac71eb87c9dfa7220255a2953ada9e04d
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
Related-Bug: #1879878
(cherry picked from commit 10f0a42de1)
Stephen Finucane 2 years ago
parent 07a0358c0e
commit 8ffaac4932
  1. 83

@ -20,6 +20,7 @@ import testtools
from oslo_config import cfg
from oslo_log import log as logging
import nova
from nova.conf import neutron as neutron_conf
from nova import context as nova_context
from nova import objects
@ -677,6 +678,88 @@ class NUMAServersTest(NUMAServersTestBase):
self.assertEqual(expected_usage, compute_usage)
def test_resize_bug_1879878(self):
"""Resize a instance with a NUMA topology when confirm takes time.
Bug 1879878 describes a race between the periodic tasks of the resource
tracker and the libvirt virt driver. The virt driver expects to be the
one doing the unpinning of instances, however, the resource tracker is
stepping on the virt driver's toes.
cpu_dedicated_set='0-3', cpu_shared_set='4-7', group='compute')
orig_confirm = nova.virt.libvirt.driver.LibvirtDriver.confirm_migration
def fake_confirm_migration(*args, **kwargs):
# run periodics before finally running the confirm_resize routine,
# simulating a race between the resource tracker and the virt
# driver
# then inspect the ComputeNode objects for our two hosts
src_numa_topology = objects.NUMATopology.obj_from_db_obj(
self.ctxt, src_host,
dst_numa_topology = objects.NUMATopology.obj_from_db_obj(
self.ctxt, dst_host,
# FIXME(stephenfin): There should still be two pinned cores here
self.assertEqual(0, len(src_numa_topology.cells[0].pinned_cpus))
self.assertEqual(2, len(dst_numa_topology.cells[0].pinned_cpus))
# before continuing with the actualy confirm process
return orig_confirm(*args, **kwargs)
# start services
# create server
flavor_a_id = self._create_flavor(
vcpu=2, extra_spec={'hw:cpu_policy': 'dedicated'})
server = self._create_server(flavor_id=flavor_a_id)
src_host = server['OS-EXT-SRV-ATTR:host']
# we don't really care what the new flavor is, so long as the old
# flavor is using pinning. We use a similar flavor for simplicity.
flavor_b_id = self._create_flavor(
vcpu=2, extra_spec={'hw:cpu_policy': 'dedicated'})
# TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
# probably be less...dumb
with mock.patch(
'.migrate_disk_and_power_off', return_value='{}',
# TODO(stephenfin): Replace with a helper
post = {'resize': {'flavorRef': flavor_b_id}}
self.api.post_server_action(server['id'], post)
server = self._wait_for_state_change(server, 'VERIFY_RESIZE')
dst_host = server['OS-EXT-SRV-ATTR:host']
# Now confirm the resize
# FIXME(stephenfin): This should be successful, but it's failing with a
# HTTP 500 due to bug #1879878
post = {'confirmResize': None}
exc = self.assertRaises(
self.api.post_server_action, server['id'], post)
self.assertEqual(500, exc.response.status_code)
self.assertIn('CPUUnpinningInvalid', str(exc))
class NUMAServerTestWithCountingQuotaFromPlacement(NUMAServersTest):