Use a static resource tracker in compute manager

There was one edge case in the compute manager wherein we would
reinitialize the resource tracker. Jay promises that isn't needed
anymore, so this change removes it. That allows us to remove the
_get_resource_tracker() helper and set up the resource tracker just once
during __init__ of the compute manager.

Change-Id: Ibb8c12fb2799bb5ceb9e3d72a2b86dbb4f14451e
This commit is contained in:
Eric Fried 2018-11-28 15:58:12 -06:00 committed by Eric Fried
parent 4c34ab574e
commit 35ce77835b
9 changed files with 118 additions and 194 deletions

View File

@ -507,8 +507,6 @@ class ComputeManager(manager.Manager):
openstack_driver.is_neutron_security_groups())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.query_client = query.SchedulerQueryClient()
self._reportclient = None
self._resource_tracker = None
self.instance_events = InstanceEvents()
self._sync_power_pool = eventlet.GreenPool(
size=CONF.sync_power_state_pool_size)
@ -538,31 +536,20 @@ class ComputeManager(manager.Manager):
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
self.rt = resource_tracker.ResourceTracker(self.host, self.driver)
self.reportclient = self.rt.reportclient
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._get_resource_tracker().reportclient.clear_provider_cache()
def _get_resource_tracker(self):
if not self._resource_tracker:
rt = resource_tracker.ResourceTracker(self.host, self.driver)
self._resource_tracker = rt
return self._resource_tracker
@property
def reportclient(self):
if not self._reportclient:
self._reportclient = self._get_resource_tracker().reportclient
return self._reportclient
self.reportclient.clear_provider_cache()
def _update_resource_tracker(self, context, instance):
"""Let the resource tracker know that an instance has changed state."""
if instance.host == self.host:
rt = self._get_resource_tracker()
rt.update_usage(context, instance, instance.node)
self.rt.update_usage(context, instance, instance.node)
def _instance_update(self, context, instance, **kwargs):
"""Update an instance in the database using kwargs as value."""
@ -1801,14 +1788,12 @@ class ComputeManager(manager.Manager):
def _build_failed(self, node):
if CONF.compute.consecutive_build_service_disable_threshold:
rt = self._get_resource_tracker()
# NOTE(danms): Update our counter, but wait for the next
# update_available_resource() periodic to flush it to the DB
rt.build_failed(node)
self.rt.build_failed(node)
def _build_succeeded(self, node):
rt = self._get_resource_tracker()
rt.build_succeeded(node)
self.rt.build_succeeded(node)
@wrap_exception()
@reverts_task_state
@ -2093,8 +2078,7 @@ class ComputeManager(manager.Manager):
try:
scheduler_hints = self._get_scheduler_hints(filter_properties,
request_spec)
rt = self._get_resource_tracker()
with rt.instance_claim(context, instance, node, limits):
with self.rt.instance_claim(context, instance, node, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
@ -2983,11 +2967,10 @@ class ComputeManager(manager.Manager):
else:
LOG.info("Rebuilding instance", instance=instance)
rt = self._get_resource_tracker()
if evacuate:
# This is an evacuation to a new host, so we need to perform a
# resource claim.
rebuild_claim = rt.rebuild_claim
rebuild_claim = self.rt.rebuild_claim
else:
# This is a rebuild to the same host, so we don't need to make
# a claim since the instance is already on this host.
@ -3053,7 +3036,7 @@ class ComputeManager(manager.Manager):
# get here when evacuating to a destination node. Rebuilding
# on the same host (not evacuate) uses the NopClaim which will
# not raise ComputeResourcesUnavailable.
rt.delete_allocation_for_evacuated_instance(
self.rt.delete_allocation_for_evacuated_instance(
context, instance, scheduled_node, node_type='destination')
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise exception.BuildAbortException(
@ -3067,7 +3050,7 @@ class ComputeManager(manager.Manager):
except Exception as e:
self._set_migration_status(migration, 'failed')
if evacuate or scheduled_node is not None:
rt.delete_allocation_for_evacuated_instance(
self.rt.delete_allocation_for_evacuated_instance(
context, instance, scheduled_node,
node_type='destination')
self._notify_instance_rebuild_error(context, instance, e, bdms)
@ -3898,9 +3881,8 @@ class ComputeManager(manager.Manager):
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker()
rt.drop_move_claim(context, instance, migration.source_node,
old_instance_type, prefix='old_')
self.rt.drop_move_claim(context, instance, migration.source_node,
old_instance_type, prefix='old_')
self._delete_allocation_after_move(context, instance, migration)
instance.drop_migration_context()
@ -4007,8 +3989,7 @@ class ComputeManager(manager.Manager):
instance.revert_migration_context()
instance.save()
rt = self._get_resource_tracker()
rt.drop_move_claim(context, instance, instance.node)
self.rt.drop_move_claim(context, instance, instance.node)
# RPC cast back to the source host to finish the revert there.
self.compute_rpcapi.finish_revert_resize(context, instance,
@ -4190,10 +4171,9 @@ class ComputeManager(manager.Manager):
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker()
with rt.resize_claim(context, instance, instance_type, node,
migration, image_meta=image,
limits=limits) as claim:
with self.rt.resize_claim(context, instance, instance_type, node,
migration, image_meta=image,
limits=limits) as claim:
LOG.info('Migrating', instance=instance)
# RPC cast to the source host to start the actual resize/migration.
self.compute_rpcapi.resize_instance(
@ -4933,8 +4913,8 @@ class ComputeManager(manager.Manager):
# This should happen *before* the vm_state is changed to
# SHELVED_OFFLOADED in case client-side code is polling the API to
# schedule more instances (or unshelve) once this server is offloaded.
rt = self._get_resource_tracker()
rt.delete_allocation_for_shelve_offloaded_instance(context, instance)
self.rt.delete_allocation_for_shelve_offloaded_instance(context,
instance)
instance.power_state = current_power_state
# NOTE(mriedem): The vm_state has to be set before updating the
@ -5021,7 +5001,6 @@ class ComputeManager(manager.Manager):
if node is None:
node = self._get_nodename(instance)
rt = self._get_resource_tracker()
limits = filter_properties.get('limits', {})
allocations = self.reportclient.get_allocations_for_consumer(
@ -5040,7 +5019,7 @@ class ComputeManager(manager.Manager):
self.host)
network_info = self.network_api.get_instance_nw_info(context, instance)
try:
with rt.instance_claim(context, instance, node, limits):
with self.rt.instance_claim(context, instance, node, limits):
self.driver.spawn(context, instance, image_meta,
injected_files=[],
admin_password=None,
@ -7688,25 +7667,12 @@ class ComputeManager(manager.Manager):
def _update_available_resource_for_node(self, context, nodename,
startup=False):
rt = self._get_resource_tracker()
try:
rt.update_available_resource(context, nodename, startup=startup)
self.rt.update_available_resource(context, nodename,
startup=startup)
except exception.ComputeHostNotFound:
# NOTE(comstud): We can get to this case if a node was
# marked 'deleted' in the DB and then re-added with a
# different auto-increment id. The cached resource
# tracker tried to update a deleted record and failed.
# Don't add this resource tracker to the new dict, so
# that this will resolve itself on the next run.
LOG.info("Compute node '%s' not found in "
"update_available_resource.", nodename)
# TODO(jaypipes): Yes, this is inefficient to throw away all of the
# compute nodes to force a rebuild, but this is only temporary
# until Ironic baremetal node resource providers are tracked
# properly in the report client and this is a tiny edge case
# anyway.
self._resource_tracker = None
return
LOG.warning("Compute node '%s' not found in "
"update_available_resource.", nodename)
except exception.ReshapeFailed:
# We're only supposed to get here on startup, if a reshape was
# needed, was attempted, and failed. We want to kill the service.
@ -7756,7 +7722,6 @@ class ComputeManager(manager.Manager):
LOG.warning("Virt driver is not ready.")
return
rt = self._get_resource_tracker()
# Delete orphan compute node not reported by driver but still in db
for cn in compute_nodes_in_db:
if cn.hypervisor_hostname not in nodenames:
@ -7766,7 +7731,7 @@ class ComputeManager(manager.Manager):
{'id': cn.id, 'hh': cn.hypervisor_hostname,
'nodes': nodenames})
cn.destroy()
rt.remove_node(cn.hypervisor_hostname)
self.rt.remove_node(cn.hypervisor_hostname)
# Delete the corresponding resource provider in placement,
# along with any associated allocations and inventory.
self.reportclient.delete_resource_provider(context, cn,

View File

@ -49,6 +49,7 @@ from oslotest import moxstubout
import six
import testtools
from nova.compute import resource_tracker
from nova import context
from nova.db import api as db
from nova import exception
@ -415,8 +416,9 @@ class TestCase(testtools.TestCase):
# So this helper method tries to simulate a better compute service
# restart by cleaning up some of the internal state of the compute
# manager.
host, driver = compute.manager.host, compute.manager.driver
compute.stop()
compute.manager._resource_tracker = None
compute.manager.rt = resource_tracker.ResourceTracker(host, driver)
compute.start()
def assertJsonEqual(self, expected, observed, message=''):

View File

@ -413,7 +413,7 @@ class TestUpdateComputeNodeReservedAndAllocationRatio(
rp_uuid = self._get_provider_uuid_by_host('fake-host')
ctxt = context.get_admin_context()
rt = compute_service.manager._get_resource_tracker()
rt = compute_service.manager.rt
inv = self.placement_api.get(
'/resource_providers/%s/inventories' % rp_uuid).body

View File

@ -29,7 +29,7 @@ class TestMetricsNotificationSample(
self.flags(compute_monitors=['cpu.virt_driver'])
super(TestMetricsNotificationSample, self).setUp()
# Reset the cpu stats of the 'cpu.virt_driver' monitor
self.compute.manager._resource_tracker.monitors[0]._cpu_stats = {}
self.compute.manager.rt.monitors[0]._cpu_stats = {}
def test_metrics_update(self):
self.compute.manager.update_available_resource(

View File

@ -60,7 +60,7 @@ class ComputeManagerTestCase(test.TestCase):
@mock.patch.object(self.compute.manager.network_api,
'cleanup_instance_network_on_host')
@mock.patch('nova.compute.utils.notify_about_instance_usage')
@mock.patch.object(self.compute.manager, '_get_resource_tracker')
@mock.patch.object(self.compute.manager, 'rt')
@mock.patch.object(self.compute.manager.driver, 'spawn')
def _test(mock_spawn, mock_grt, mock_notify, mock_cinoh, mock_can):
mock_spawn.side_effect = test.TestingException('Preserve this')

View File

@ -1507,9 +1507,8 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
self.assertEqual([], self._get_provider_traits(self.host_uuid))
def _run_update_available_resource(self, startup):
ctx = context.get_admin_context()
rt = self.compute._get_resource_tracker()
rt.update_available_resource(ctx, self.compute.host, startup=startup)
self.compute.rt.update_available_resource(
context.get_admin_context(), self.compute.host, startup=startup)
def _run_update_available_resource_and_assert_raises(
self, exc=exception.ResourceProviderSyncFailed, startup=False):

View File

@ -13,6 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from nova.compute import resource_tracker
@ -21,3 +24,9 @@ class FakeResourceTracker(resource_tracker.ResourceTracker):
def _update(self, context, compute_node, startup=False):
pass
class RTMockMixin(object):
def _mock_rt(self, **kwargs):
return self.useFixture(fixtures.MockPatchObject(
self.compute, 'rt', **kwargs)).mock

View File

@ -166,7 +166,7 @@ class BaseTestCase(test.TestCase):
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver)
self.compute._resource_tracker = fake_rt
self.compute.rt = fake_rt
def fake_get_compute_nodes_in_db(self, context, **kwargs):
fake_compute_nodes = [{'local_gb': 259,
@ -262,7 +262,7 @@ class BaseTestCase(test.TestCase):
self.compute_api = compute.API()
# Just to make long lines short
self.rt = self.compute._get_resource_tracker()
self.rt = self.compute.rt
self.mock_get_allocs = self.useFixture(
fixtures.fixtures.MockPatch(
@ -1464,7 +1464,8 @@ class ComputeVolumeTestCase(BaseTestCase):
class ComputeTestCase(BaseTestCase,
test_diagnostics.DiagnosticsComparisonMixin):
test_diagnostics.DiagnosticsComparisonMixin,
fake_resource_tracker.RTMockMixin):
def setUp(self):
super(ComputeTestCase, self).setUp()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
@ -6229,7 +6230,7 @@ class ComputeTestCase(BaseTestCase,
migration = objects.Migration(uuid=uuids.migration)
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
@mock.patch.object(self.compute, '_reportclient')
@mock.patch.object(self.compute, 'reportclient')
def do_it(mock_client, mock_setup):
mock_client.get_allocations_for_consumer.return_value = {
mock.sentinel.source: {
@ -6550,12 +6551,11 @@ class ComputeTestCase(BaseTestCase,
'clear_events_for_instance'),
mock.patch.object(self.compute, 'update_available_resource'),
mock.patch.object(migration_obj, 'save'),
mock.patch.object(self.compute, '_get_resource_tracker'),
) as (
post_live_migration, unfilter_instance,
migrate_instance_start, post_live_migration_at_destination,
post_live_migration_at_source, setup_networks_on_host,
clear_events, update_available_resource, mig_save, getrt
clear_events, update_available_resource, mig_save
):
self.compute._post_live_migration(c, instance, dest,
migrate_data=migrate_data)
@ -7466,14 +7466,11 @@ class ComputeTestCase(BaseTestCase,
@mock.patch.object(objects.Instance, 'save')
def test_instance_update_host_check(self, mock_save):
# make sure rt usage doesn't happen if the host or node is different
def fail_get(self):
raise test.TestingException("wrong host")
self.stub_out('nova.compute.manager.ComputeManager.'
'_get_resource_tracker', fail_get)
# make sure rt usage doesn't update if the host or node is different
instance = self._create_fake_instance_obj({'host': 'someotherhost'})
self.compute._instance_update(self.context, instance, vcpus=4)
with mock.patch.object(self.compute.rt, '_update_usage',
new=mock.NonCallableMock()):
self.compute._instance_update(self.context, instance, vcpus=4)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_tree_from_instance_allocation')
@ -7698,7 +7695,6 @@ class ComputeTestCase(BaseTestCase,
self.assertNotEqual(0, instance.deleted)
def test_terminate_instance_updates_tracker(self):
rt = self.compute._get_resource_tracker()
admin_context = context.get_admin_context()
cn = self.rt.compute_nodes[NODENAME]
@ -7706,7 +7702,7 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj()
instance.vcpus = 1
rt.instance_claim(admin_context, instance, NODENAME)
self.compute.rt.instance_claim(admin_context, instance, NODENAME)
self.assertEqual(1, cn.vcpus_used)
self.compute.terminate_instance(admin_context, instance, [])
@ -7720,17 +7716,16 @@ class ComputeTestCase(BaseTestCase,
# update properly.
@mock.patch('nova.objects.Instance.destroy')
def test_init_deleted_instance_updates_tracker(self, noop1, noop2, noop3):
rt = self.compute._get_resource_tracker()
admin_context = context.get_admin_context()
cn = rt.compute_nodes[NODENAME]
cn = self.compute.rt.compute_nodes[NODENAME]
self.assertEqual(0, cn.vcpus_used)
instance = self._create_fake_instance_obj()
instance.vcpus = 1
self.assertEqual(0, cn.vcpus_used)
rt.instance_claim(admin_context, instance, NODENAME)
self.compute.rt.instance_claim(admin_context, instance, NODENAME)
self.compute._init_instance(admin_context, instance)
self.assertEqual(1, cn.vcpus_used)
@ -8022,25 +8017,22 @@ class ComputeTestCase(BaseTestCase,
instance.old_flavor = old_type
instance.new_flavor = new_type
fake_rt = mock.MagicMock()
def fake_drop_move_claim(*args, **kwargs):
pass
def fake_setup_networks_on_host(self, *args, **kwargs):
pass
self._mock_rt(
drop_move_claim=mock.Mock(side_effect=fake_drop_move_claim))
with test.nested(
mock.patch.object(fake_rt, 'drop_move_claim',
side_effect=fake_drop_move_claim),
mock.patch.object(self.compute, '_get_resource_tracker',
return_value=fake_rt),
mock.patch.object(self.compute.network_api,
'setup_networks_on_host',
side_effect=fake_setup_networks_on_host),
mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_tree_from_instance_allocation')
) as (mock_drop, mock_get, mock_setup, mock_remove_allocs):
) as (mock_setup, mock_remove_allocs):
migration = objects.Migration(context=self.context.elevated())
migration.instance_uuid = instance.uuid
migration.status = 'finished'
@ -13041,11 +13033,11 @@ class EvacuateHostTestCase(BaseTestCase):
patch_spawn = mock.patch.object(self.compute.driver, 'spawn')
patch_on_disk = mock.patch.object(
self.compute.driver, 'instance_on_disk', return_value=True)
self.compute._resource_tracker.rebuild_claim = mock.MagicMock()
self.compute.rt.rebuild_claim = mock.MagicMock()
with patch_spawn, patch_on_disk:
self._rebuild(migration=migration)
self.assertTrue(self.compute._resource_tracker.rebuild_claim.called)
self.assertTrue(self.compute.rt.rebuild_claim.called)
self.assertEqual('done', migration.status)
migration.save.assert_called_once_with()
@ -13070,7 +13062,7 @@ class EvacuateHostTestCase(BaseTestCase):
patch_on_disk = mock.patch.object(
self.compute.driver, 'instance_on_disk', return_value=True)
patch_claim = mock.patch.object(
self.compute._resource_tracker, 'rebuild_claim',
self.compute.rt, 'rebuild_claim',
side_effect=exception.ComputeResourcesUnavailable(reason="boom"))
patch_remove_allocs = mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.'
@ -13088,8 +13080,7 @@ class EvacuateHostTestCase(BaseTestCase):
patch_spawn = mock.patch.object(self.compute.driver, 'spawn')
patch_on_disk = mock.patch.object(
self.compute.driver, 'instance_on_disk', return_value=True)
patch_claim = mock.patch.object(
self.compute._resource_tracker, 'rebuild_claim')
patch_claim = mock.patch.object(self.compute.rt, 'rebuild_claim')
patch_rebuild = mock.patch.object(
self.compute, '_do_rebuild_instance_with_claim',
side_effect=test.TestingException())

View File

@ -82,7 +82,8 @@ fake_host_list = [mock.sentinel.host1]
@ddt.ddt
class ComputeManagerUnitTestCase(test.NoDBTestCase):
class ComputeManagerUnitTestCase(test.NoDBTestCase,
fake_resource_tracker.RTMockMixin):
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
self.compute = manager.ComputeManager()
@ -257,10 +258,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
cn.hypervisor_hostname = hyp_hostname
return cn
@mock.patch.object(manager.ComputeManager, '_get_resource_tracker')
def test_update_available_resource_for_node(self, get_rt):
rt = mock.Mock(spec_set=['update_available_resource'])
get_rt.return_value = rt
def test_update_available_resource_for_node(self):
rt = self._mock_rt(spec_set=['update_available_resource'])
self.compute._update_available_resource_for_node(
self.context,
@ -273,36 +272,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
)
@mock.patch('nova.compute.manager.LOG')
@mock.patch.object(manager.ComputeManager, '_get_resource_tracker')
def test_update_available_resource_for_node_fail_no_host(self, get_rt,
log_mock):
rt = mock.Mock(spec_set=['update_available_resource'])
exc = exception.ComputeHostNotFound(host=mock.sentinel.host)
rt.update_available_resource.side_effect = exc
get_rt.return_value = rt
# Fake out the RT on the compute manager object so we can assert it's
# nulled out after the ComputeHostNotFound exception is raised.
self.compute._resource_tracker = rt
self.compute._update_available_resource_for_node(
self.context,
mock.sentinel.node,
)
rt.update_available_resource.assert_called_once_with(
self.context,
mock.sentinel.node,
startup=False,
)
self.assertTrue(log_mock.info.called)
self.assertIsNone(self.compute._resource_tracker)
@mock.patch('nova.compute.manager.LOG')
@mock.patch.object(manager.ComputeManager, '_get_resource_tracker')
def test_update_available_resource_for_node_reshape_failed(self, get_rt,
log_mock):
def test_update_available_resource_for_node_reshape_failed(self, log_mock):
"""ReshapeFailed logs and reraises."""
rt = mock.Mock(spec_set=['update_available_resource'])
get_rt.return_value = rt
rt = self._mock_rt(spec_set=['update_available_resource'])
rt.update_available_resource.side_effect = exception.ReshapeFailed(
error='error')
@ -316,12 +288,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
log_mock.critical.assert_called_once()
@mock.patch('nova.compute.manager.LOG')
@mock.patch.object(manager.ComputeManager, '_get_resource_tracker')
def test_update_available_resource_for_node_reshape_needed(self, get_rt,
log_mock):
def test_update_available_resource_for_node_reshape_needed(self, log_mock):
"""ReshapeNeeded logs and reraises."""
rt = mock.Mock(spec_set=['update_available_resource'])
get_rt.return_value = rt
rt = self._mock_rt(spec_set=['update_available_resource'])
rt.update_available_resource.side_effect = exception.ReshapeNeeded()
self.assertRaises(exception.ReshapeNeeded,
@ -333,13 +302,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.context, mock.sentinel.node, startup=True)
log_mock.exception.assert_called_once()
@mock.patch.object(manager.ComputeManager, '_get_resource_tracker')
@mock.patch.object(manager.ComputeManager,
'_update_available_resource_for_node')
@mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
@mock.patch.object(manager.ComputeManager, '_get_compute_nodes_in_db')
def test_update_available_resource(self, get_db_nodes, get_avail_nodes,
update_mock, mock_get_rt):
update_mock):
mock_rt = self._mock_rt()
rc_mock = self.useFixture(fixtures.fixtures.MockPatchObject(
self.compute, 'reportclient')).mock
db_nodes = [self._make_compute_node('node%s' % i, i)
for i in range(1, 5)]
avail_nodes = set(['node2', 'node3', 'node4', 'node5'])
@ -355,14 +326,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
for node in avail_nodes_l]
)
rc_mock = mock_get_rt.return_value.reportclient
# First node in set should have been removed from DB
for db_node in db_nodes:
if db_node.hypervisor_hostname == 'node1':
db_node.destroy.assert_called_once_with()
rc_mock.delete_resource_provider.assert_called_once_with(
self.context, db_node, cascade=True)
mock_get_rt.return_value.remove_node.assert_called_once_with(
mock_rt.remove_node.assert_called_once_with(
'node1')
else:
self.assertFalse(db_node.destroy.called)
@ -4191,12 +4161,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
def test_rebuild_driver_error_same_host(self, mock_error, mock_aiffe):
instance = fake_instance.fake_instance_obj(self.context)
ex = test.TestingException('foo')
with mock.patch.object(self.compute, '_get_resource_tracker') as mrt:
self.assertRaises(test.TestingException,
self._test_rebuild_ex, instance, ex)
rt = mrt.return_value
self.assertFalse(
rt.delete_allocation_for_evacuated_instance.called)
rt = self._mock_rt()
self.assertRaises(test.TestingException,
self._test_rebuild_ex, instance, ex)
self.assertFalse(
rt.delete_allocation_for_evacuated_instance.called)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.compute.utils.add_instance_fault_from_exc')
@ -4208,14 +4177,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(self.context)
instance.system_metadata = {}
ex = test.TestingException('foo')
with mock.patch.object(self.compute, '_get_resource_tracker') as mrt:
self.assertRaises(test.TestingException,
self._test_rebuild_ex, instance, ex,
recreate=True, scheduled_node='foo')
rt = mrt.return_value
delete_alloc = rt.delete_allocation_for_evacuated_instance
delete_alloc.assert_called_once_with(self.context, instance, 'foo',
node_type='destination')
rt = self._mock_rt()
self.assertRaises(test.TestingException,
self._test_rebuild_ex, instance, ex,
recreate=True, scheduled_node='foo')
delete_alloc = rt.delete_allocation_for_evacuated_instance
delete_alloc.assert_called_once_with(self.context, instance, 'foo',
node_type='destination')
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.objects.instance.Instance.drop_migration_context')
@ -4329,13 +4297,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
node=dead_node)
instance.system_metadata = img_sys_meta
instance.migration_context = None
mock_rt = self._mock_rt()
with test.nested(
mock.patch.object(self.compute, '_get_resource_tracker'),
mock.patch.object(self.compute, '_get_compute_info'),
mock.patch.object(self.compute, '_do_rebuild_instance_with_claim'),
mock.patch.object(objects.Instance, 'save'),
mock.patch.object(self.compute, '_set_migration_status'),
) as (mock_rt, mock_get, mock_rebuild, mock_save, mock_set):
) as (mock_get, mock_rebuild, mock_save, mock_set):
mock_get.return_value.hypervisor_hostname = 'new-node'
self.compute.rebuild_instance(self.context, instance, None, None,
None, None, None, None, True,
@ -4343,13 +4311,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
self.assertEqual('new-node', instance.node)
mock_set.assert_called_once_with(None, 'done')
mock_rt.assert_called_once_with()
# Make sure the rebuild_claim was called with the proper image_meta
# from the instance.
mock_rebuild_claim = mock_rt.return_value.rebuild_claim
mock_rebuild_claim.assert_called_once()
self.assertIn('image_meta', mock_rebuild_claim.call_args[1])
actual_image_meta = mock_rebuild_claim.call_args[1][
mock_rt.rebuild_claim.assert_called_once()
self.assertIn('image_meta', mock_rt.rebuild_claim.call_args[1])
actual_image_meta = mock_rt.rebuild_claim.call_args[1][
'image_meta'].properties
self.assertIn('hw_numa_nodes', actual_image_meta)
self.assertEqual(1, actual_image_meta.hw_numa_nodes)
@ -4748,7 +4714,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
def test_reset_clears_provider_cache(self):
# Seed the cache so we can tell we've cleared it
reportclient = self.compute._get_resource_tracker().reportclient
reportclient = self.compute.reportclient
ptree = reportclient._provider_tree
ptree.new_root('foo', uuids.foo)
self.assertEqual([uuids.foo], ptree.get_provider_uuids())
@ -4997,7 +4963,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver)
self.compute._resource_tracker = fake_rt
self.compute.rt = fake_rt
self.allocations = [{
"resource_provider": {
@ -5010,7 +4976,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
}]
self.mock_get_allocs = self.useFixture(
fixtures.fixtures.MockPatchObject(
fake_rt.reportclient, 'get_allocations_for_consumer')).mock
self.compute.reportclient,
'get_allocations_for_consumer')).mock
self.mock_get_allocs.return_value = self.allocations
def _do_build_instance_update(self, mock_save, reschedule_update=False):
@ -6652,7 +6619,8 @@ class ComputeManagerErrorsOutMigrationTestCase(test.NoDBTestCase):
@ddt.ddt
class ComputeManagerMigrationTestCase(test.NoDBTestCase):
class ComputeManagerMigrationTestCase(test.NoDBTestCase,
fake_resource_tracker.RTMockMixin):
class TestResizeError(Exception):
pass
@ -6798,7 +6766,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
@mock.patch.object(self.compute, '_is_instance_storage_shared')
@mock.patch.object(self.compute, 'finish_revert_resize')
@mock.patch.object(self.compute, '_instance_update')
@mock.patch.object(self.compute, '_get_resource_tracker')
@mock.patch.object(self.compute.driver, 'destroy')
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@ -6812,7 +6779,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
migrate_instance_start,
setup_networks_on_host,
destroy,
_get_resource_tracker,
_instance_update,
finish_revert_resize,
_is_instance_storage_shared,
@ -6820,6 +6786,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
revert_migration_context,
mock_finish_revert):
self._mock_rt()
# NOTE(danms): Before a revert, the instance is "on"
# the destination host/node
self.migration.uuid = uuids.migration
@ -6866,7 +6833,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
reportclient = self.compute.reportclient
@mock.patch.object(self.compute, '_get_resource_tracker')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch.object(self.compute.driver, 'finish_revert_migration')
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
@ -6893,11 +6859,10 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
migrate_instance_finish,
get_instance_nw_info,
finish_revert_migration,
mock_get_cn,
get_resource_tracker):
mock_get_cn):
# Restore the report client
get_resource_tracker.return_value.reportclient = reportclient
# Mock the resource tracker, but keep the report client
self._mock_rt().reportclient = reportclient
fault_create.return_value = (
test_instance_fault.fake_faults['fake-uuid'][0])
self.instance.migration_context = objects.MigrationContext()
@ -6913,10 +6878,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
do_test()
def test_finish_revert_resize_migration_context(self):
fake_rt = resource_tracker.ResourceTracker(None, None)
fake_rt.tracked_migrations[self.instance['uuid']] = (
self.migration, None)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'drop_move_claim')
@mock.patch('nova.compute.rpcapi.ComputeAPI.finish_revert_resize')
@ -6924,8 +6885,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
@mock.patch.object(self.compute, '_is_instance_storage_shared')
@mock.patch.object(self.compute, '_instance_update')
@mock.patch.object(self.compute, '_get_resource_tracker',
return_value=fake_rt)
@mock.patch.object(self.compute.driver, 'destroy')
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@ -6941,7 +6900,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
mock_migrate_instance_start,
mock_setup_networks_on_host,
mock_destroy,
mock_get_resource_tracker,
mock_instance_update,
mock_is_instance_storage_shared,
mock_get_instance_nw_info,
@ -6949,6 +6907,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
mock_finish_revert,
mock_drop_move_claim):
self.compute.rt.tracked_migrations[self.instance['uuid']] = (
self.migration, None)
self.instance.migration_context = objects.MigrationContext()
self.migration.source_compute = self.instance['host']
self.migration.source_node = self.instance['node']
@ -7032,13 +6992,13 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(self.compute.driver, 'confirm_migration')
@mock.patch.object(self.compute, '_get_resource_tracker')
@mock.patch.object(self.compute, '_delete_allocation_after_move')
@mock.patch.object(self.instance, 'drop_migration_context')
@mock.patch.object(self.instance, 'save')
def do_confirm_resize(mock_save, mock_drop, mock_delete, mock_get_rt,
def do_confirm_resize(mock_save, mock_drop, mock_delete,
mock_confirm, mock_nwapi, mock_notify,
mock_mig_save):
self._mock_rt()
self.instance.migration_context = objects.MigrationContext()
self.migration.source_compute = self.instance['host']
self.migration.source_node = self.instance['node']
@ -7053,7 +7013,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
do_confirm_resize()
def test_delete_allocation_after_move_confirm_by_migration(self):
with mock.patch.object(self.compute, '_reportclient') as mock_report:
with mock.patch.object(self.compute, 'reportclient') as mock_report:
mock_report.delete_allocation_for_instance.return_value = True
self.compute._delete_allocation_after_move(self.context,
self.instance,
@ -7064,7 +7024,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def test_revert_allocation(self):
"""New-style migration-based allocation revert."""
@mock.patch.object(self.compute, '_reportclient')
@mock.patch.object(self.compute, 'reportclient')
def doit(mock_report):
cu = uuids.node
a = {cu: {'resources': {'DISK_GB': 1}}}
@ -7083,7 +7043,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def test_revert_allocation_old_style(self):
"""Test that we don't delete allocs for migration if none found."""
@mock.patch.object(self.compute, '_reportclient')
@mock.patch.object(self.compute, 'reportclient')
def doit(mock_report):
mock_report.get_allocations_for_consumer.return_value = {}
self.migration.uuid = uuids.migration
@ -7104,7 +7064,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
we can do.
"""
@mock.patch.object(self.compute, '_reportclient')
@mock.patch.object(self.compute, 'reportclient')
def doit(mock_report):
a = {
uuids.node: {'resources': {'DISK_GB': 1}},
@ -7899,7 +7859,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
is_shared_instance_path=False,
is_shared_block_storage=False)
with test.nested(
mock.patch.object(self.compute, '_reportclient'),
mock.patch.object(self.compute, 'reportclient'),
mock.patch.object(self.compute,
'_delete_allocation_after_move'),
) as (
@ -7955,7 +7915,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
new=lambda: None)
@mock.patch.object(compute.reportclient,
'get_allocations_for_consumer_by_provider')
@mock.patch.object(compute, '_get_resource_tracker')
@mock.patch.object(vol_bdm, 'save')
@mock.patch.object(compute, 'update_available_resource')
@mock.patch.object(compute.volume_api, 'attachment_delete')
@ -7968,9 +7927,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
'get_by_instance_uuid')
def _test(mock_get_bdms, mock_net_api, mock_notify, mock_driver,
mock_rpc, mock_get_bdm_info, mock_attach_delete,
mock_update_resource, mock_bdm_save, mock_rt, mock_ga,
mock_update_resource, mock_bdm_save, mock_ga,
mock_clean, mock_notify_action):
mock_rt.return_value = mock.Mock()
self._mock_rt()
mock_get_bdms.return_value = [vol_bdm, image_bdm]
compute._post_live_migration(self.context, instance, dest_host,
@ -8035,14 +7994,14 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
@mock.patch.object(self.compute, 'update_available_resource')
@mock.patch.object(self.compute, '_update_scheduler_instance_info')
@mock.patch.object(self.compute, '_clean_instance_console_tokens')
@mock.patch.object(self.compute, '_get_resource_tracker')
def _test(_get_resource_tracker, _clean_instance_console_tokens,
def _test(_clean_instance_console_tokens,
_update_scheduler_instance_info, update_available_resource,
driver_cleanup, _live_migration_cleanup_flags,
post_live_migration_at_destination,
post_live_migration_at_source, migrate_instance_start,
_notify_about_instance_usage, get_instance_nw_info,
_get_instance_block_device_info):
self._mock_rt()
self.compute._post_live_migration(
self.context, self.instance, 'fake-dest',
migrate_data=migrate_data)
@ -8374,10 +8333,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
@mock.patch.object(self.compute, '_reschedule')
@mock.patch.object(self.compute, '_prep_resize')
@mock.patch.object(self.compute, '_get_resource_tracker')
def doit(mock_grt, mock_pr, mock_r):
# Restore the report client
mock_grt.return_value.reportclient = reportclient
def doit(mock_pr, mock_r):
# Mock the resource tracker, but keep the report client
self._mock_rt().reportclient = reportclient
mock_r.return_value = False
mock_pr.side_effect = test.TestingException