virt: Remove 'get_per_instance_usage' API

Another pretty trivial one. This one was intended to provide an overview
of instances that weren't properly tracked but were running on the host.
It was only ever implemented for the XenAPI driver so remove it now.

Change-Id: Icaba3fc89e3295200e3d165722a5c24ee070002c
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2020-09-01 10:37:03 +01:00
parent 83ae149c72
commit 8aea747c97
5 changed files with 4 additions and 118 deletions

View File

@ -934,18 +934,13 @@ class ResourceTracker(object):
context, self.compute_nodes[nodename], migrations,
instance_by_uuid)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations, orphans)
self.pci_tracker.clean_usage(instances, migrations)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
@ -1630,40 +1625,6 @@ class ResourceTracker(object):
"instance on the %s node %s",
node_type, cn_uuid, instance=instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances)
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,

View File

@ -363,14 +363,13 @@ class PciDevTracker(object):
if sign == 1:
self.allocate_instance(instance)
def clean_usage(self, instances, migrations, orphans):
def clean_usage(self, instances, migrations):
"""Remove all usages for instances not passed in the parameter.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
existed = set(inst['uuid'] for inst in instances)
existed |= set(mig['instance_uuid'] for mig in migrations)
existed |= set(inst['uuid'] for inst in orphans)
# need to copy keys, because the dict is modified in the loop body
for uuid in list(self.claims):

View File

@ -815,70 +815,6 @@ class TestUpdateAvailableResources(BaseTestCase):
actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_orphaned_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock, pci_mock,
instance_pci_mock):
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(memory_mb_used=64)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Orphaned instances are those that the virt driver has on
# record as consuming resources on the compute node, but the
# Nova database has no record of the instance being active
# on the host. For some reason, the resource tracker only
# considers orphaned instance's memory usage in its calculations
# of free resources...
orphaned_usages = {
'71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d': {
# Yes, the return result format of get_per_instance_usage
# is indeed this stupid and redundant. Also note that the
# libvirt driver just returns an empty dict always for this
# method and so who the heck knows whether this stuff
# actually works.
'uuid': '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d',
'memory_mb': 64
}
}
vd = self.driver_mock
vd.get_per_instance_usage.return_value = orphaned_usages
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 6,
'local_gb': 6,
'free_ram_mb': 448, # 512 - 64 orphaned usage
'memory_mb_used': 64,
'vcpus_used': 0,
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
# Yep, for some reason, orphaned instances are not counted
# as running VMs...
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
actual_resources = update_mock.call_args[0][1]
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',

View File

@ -473,7 +473,6 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
inst_2 = copy.copy(self.inst)
inst_2.uuid = uuidsentinel.instance2
migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
pci_requests_obj = self._create_pci_requests_object(
[{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
@ -490,7 +489,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.assertEqual(len(free_devs), 1)
self.assertEqual(free_devs[0].vendor_id, 'v')
self.tracker.clean_usage([self.inst], [migr], [orph])
self.tracker.clean_usage([self.inst], [migr])
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 2)
self.assertEqual(
@ -504,7 +503,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.tracker.update_pci_for_instance(None, self.inst, sign=1)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(3, len(free_devs))
self.tracker.clean_usage([], [], [])
self.tracker.clean_usage([], [])
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(3, len(free_devs))
self.assertEqual(

View File

@ -1577,15 +1577,6 @@ class ComputeDriver(object):
# Refresh and check again.
return nodename in self.get_available_nodes(refresh=True)
# TODO(stephenfin): This was only implemented (properly) for XenAPI and
# should be removed.
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return {}
def instance_on_disk(self, instance):
"""Checks access of instance files on the host.